summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjhendersonHDF <jhenderson@hdfgroup.org>2023-05-02 19:52:39 (GMT)
committerJordan Henderson <jhenderson@hdfgroup.org>2023-05-03 18:26:57 (GMT)
commitc75b4af1a2630ace445da1ec661191601583f79a (patch)
treea345b94dc09dd1ea1c38c1136a133b00939ba395
parent75d64819b050bb30b2a2751d9ba55651f9a1af79 (diff)
downloadhdf5-c75b4af1a2630ace445da1ec661191601583f79a.zip
hdf5-c75b4af1a2630ace445da1ec661191601583f79a.tar.gz
hdf5-c75b4af1a2630ace445da1ec661191601583f79a.tar.bz2
Add initial version of HDF5 API tests (#2877)
-rw-r--r--.github/workflows/codespell.yml2
-rw-r--r--CMakeLists.txt54
-rw-r--r--hl/c++/test/Makefile.am2
-rw-r--r--test/API/CMake/CheckAsan.cmake37
-rw-r--r--test/API/CMake/CheckUbsan.cmake37
-rw-r--r--test/API/CMakeLists.txt314
-rw-r--r--test/API/H5_api_async_test.c2730
-rw-r--r--test/API/H5_api_async_test.h29
-rw-r--r--test/API/H5_api_attribute_test.c11027
-rw-r--r--test/API/H5_api_attribute_test.h203
-rw-r--r--test/API/H5_api_dataset_test.c11683
-rw-r--r--test/API/H5_api_dataset_test.h331
-rw-r--r--test/API/H5_api_datatype_test.c2693
-rw-r--r--test/API/H5_api_datatype_test.h79
-rw-r--r--test/API/H5_api_file_test.c2564
-rw-r--r--test/API/H5_api_file_test.h85
-rw-r--r--test/API/H5_api_group_test.c2394
-rw-r--r--test/API/H5_api_group_test.h65
-rw-r--r--test/API/H5_api_link_test.c27072
-rw-r--r--test/API/H5_api_link_test.h437
-rw-r--r--test/API/H5_api_misc_test.c1060
-rw-r--r--test/API/H5_api_misc_test.h52
-rw-r--r--test/API/H5_api_object_test.c7172
-rw-r--r--test/API/H5_api_object_test.h191
-rw-r--r--test/API/H5_api_test.c227
-rw-r--r--test/API/H5_api_test.h73
-rw-r--r--test/API/H5_api_test_config.h.in66
-rw-r--r--test/API/H5_api_test_util.c819
-rw-r--r--test/API/H5_api_test_util.h24
-rw-r--r--test/API/H5_api_tests_disabled.h46
-rw-r--r--test/API/README.md86
-rw-r--r--test/API/driver/CMakeLists.txt17
-rw-r--r--test/API/driver/h5_api_test_driver.cxx910
-rw-r--r--test/API/driver/h5_api_test_driver.hxx93
-rw-r--r--test/API/driver/kwsys/.clang-format22
-rw-r--r--test/API/driver/kwsys/.hooks-config2
-rw-r--r--test/API/driver/kwsys/Base64.c225
-rw-r--r--test/API/driver/kwsys/Base64.h.in110
-rw-r--r--test/API/driver/kwsys/CMakeLists.txt1260
-rw-r--r--test/API/driver/kwsys/CONTRIBUTING.rst49
-rw-r--r--test/API/driver/kwsys/CTestConfig.cmake9
-rw-r--r--test/API/driver/kwsys/CTestCustom.cmake.in14
-rw-r--r--test/API/driver/kwsys/CommandLineArguments.cxx768
-rw-r--r--test/API/driver/kwsys/CommandLineArguments.hxx.in270
-rw-r--r--test/API/driver/kwsys/Configure.h.in89
-rw-r--r--test/API/driver/kwsys/Configure.hxx.in65
-rw-r--r--test/API/driver/kwsys/ConsoleBuf.hxx.in398
-rw-r--r--test/API/driver/kwsys/Copyright.txt38
-rw-r--r--test/API/driver/kwsys/Directory.cxx236
-rw-r--r--test/API/driver/kwsys/Directory.hxx.in72
-rw-r--r--test/API/driver/kwsys/DynamicLoader.cxx495
-rw-r--r--test/API/driver/kwsys/DynamicLoader.hxx.in106
-rw-r--r--test/API/driver/kwsys/Encoding.h.in69
-rw-r--r--test/API/driver/kwsys/Encoding.hxx.in80
-rw-r--r--test/API/driver/kwsys/EncodingC.c72
-rw-r--r--test/API/driver/kwsys/EncodingCXX.cxx288
-rw-r--r--test/API/driver/kwsys/ExtraTest.cmake.in1
-rw-r--r--test/API/driver/kwsys/FStream.cxx55
-rw-r--r--test/API/driver/kwsys/FStream.hxx.in278
-rw-r--r--test/API/driver/kwsys/GitSetup/.gitattributes6
-rw-r--r--test/API/driver/kwsys/GitSetup/LICENSE202
-rw-r--r--test/API/driver/kwsys/GitSetup/NOTICE5
-rw-r--r--test/API/driver/kwsys/GitSetup/README87
-rw-r--r--test/API/driver/kwsys/GitSetup/config4
-rw-r--r--test/API/driver/kwsys/GitSetup/config.sample32
-rw-r--r--test/API/driver/kwsys/GitSetup/git-gerrit-push74
-rw-r--r--test/API/driver/kwsys/GitSetup/git-gitlab-push177
-rw-r--r--test/API/driver/kwsys/GitSetup/pre-commit26
-rw-r--r--test/API/driver/kwsys/GitSetup/setup-aliases6
-rw-r--r--test/API/driver/kwsys/GitSetup/setup-gerrit147
-rw-r--r--test/API/driver/kwsys/GitSetup/setup-gitlab140
-rw-r--r--test/API/driver/kwsys/GitSetup/setup-hooks64
-rw-r--r--test/API/driver/kwsys/GitSetup/setup-ssh111
-rw-r--r--test/API/driver/kwsys/GitSetup/setup-stage82
-rw-r--r--test/API/driver/kwsys/GitSetup/setup-upstream104
-rw-r--r--test/API/driver/kwsys/GitSetup/setup-user39
-rw-r--r--test/API/driver/kwsys/GitSetup/tips55
-rw-r--r--test/API/driver/kwsys/Glob.cxx448
-rw-r--r--test/API/driver/kwsys/Glob.hxx.in134
-rw-r--r--test/API/driver/kwsys/IOStream.cxx255
-rw-r--r--test/API/driver/kwsys/IOStream.hxx.in126
-rw-r--r--test/API/driver/kwsys/MD5.c494
-rw-r--r--test/API/driver/kwsys/MD5.h.in97
-rw-r--r--test/API/driver/kwsys/Process.h.in544
-rw-r--r--test/API/driver/kwsys/ProcessUNIX.c2920
-rw-r--r--test/API/driver/kwsys/ProcessWin32.c2786
-rw-r--r--test/API/driver/kwsys/README.rst37
-rw-r--r--test/API/driver/kwsys/RegularExpression.cxx1218
-rw-r--r--test/API/driver/kwsys/RegularExpression.hxx.in562
-rw-r--r--test/API/driver/kwsys/SetupForDevelopment.sh20
-rw-r--r--test/API/driver/kwsys/SharedForward.h.in879
-rw-r--r--test/API/driver/kwsys/String.c100
-rw-r--r--test/API/driver/kwsys/String.h.in57
-rw-r--r--test/API/driver/kwsys/String.hxx.in65
-rw-r--r--test/API/driver/kwsys/System.c236
-rw-r--r--test/API/driver/kwsys/System.h.in60
-rw-r--r--test/API/driver/kwsys/SystemInformation.cxx5466
-rw-r--r--test/API/driver/kwsys/SystemInformation.hxx.in170
-rw-r--r--test/API/driver/kwsys/SystemTools.cxx4703
-rw-r--r--test/API/driver/kwsys/SystemTools.hxx.in981
-rw-r--r--test/API/driver/kwsys/Terminal.c414
-rw-r--r--test/API/driver/kwsys/Terminal.h.in170
-rw-r--r--test/API/driver/kwsys/clang-format.bash128
-rw-r--r--test/API/driver/kwsys/hash_fun.hxx.in166
-rw-r--r--test/API/driver/kwsys/hash_map.hxx.in423
-rw-r--r--test/API/driver/kwsys/hash_set.hxx.in392
-rw-r--r--test/API/driver/kwsys/hashtable.hxx.in995
-rw-r--r--test/API/driver/kwsys/kwsysHeaderDump.pl41
-rw-r--r--test/API/driver/kwsys/kwsysPlatformTests.cmake216
-rw-r--r--test/API/driver/kwsys/kwsysPlatformTestsC.c108
-rw-r--r--test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx335
-rw-r--r--test/API/driver/kwsys/kwsysPrivate.h34
-rw-r--r--test/API/driver/kwsys/testCommandLineArguments.cxx209
-rw-r--r--test/API/driver/kwsys/testCommandLineArguments1.cxx93
-rw-r--r--test/API/driver/kwsys/testConfigure.cxx30
-rw-r--r--test/API/driver/kwsys/testConsoleBuf.cxx782
-rw-r--r--test/API/driver/kwsys/testConsoleBuf.hxx17
-rw-r--r--test/API/driver/kwsys/testConsoleBufChild.cxx55
-rw-r--r--test/API/driver/kwsys/testDirectory.cxx110
-rw-r--r--test/API/driver/kwsys/testDynamicLoader.cxx133
-rw-r--r--test/API/driver/kwsys/testDynload.c13
-rw-r--r--test/API/driver/kwsys/testDynloadImpl.c10
-rw-r--r--test/API/driver/kwsys/testDynloadImpl.h15
-rw-r--r--test/API/driver/kwsys/testDynloadUse.c15
-rw-r--r--test/API/driver/kwsys/testEncode.c67
-rw-r--r--test/API/driver/kwsys/testEncoding.cxx286
-rw-r--r--test/API/driver/kwsys/testFStream.cxx113
-rw-r--r--test/API/driver/kwsys/testFail.c24
-rw-r--r--test/API/driver/kwsys/testHashSTL.cxx64
-rw-r--r--test/API/driver/kwsys/testProcess.c728
-rw-r--r--test/API/driver/kwsys/testSharedForward.c.in27
-rw-r--r--test/API/driver/kwsys/testSystemInformation.cxx106
-rw-r--r--test/API/driver/kwsys/testSystemTools.binbin0 -> 766 bytes
-rw-r--r--test/API/driver/kwsys/testSystemTools.cxx1128
-rw-r--r--test/API/driver/kwsys/testSystemTools.h.in12
-rw-r--r--test/API/driver/kwsys/testTerminal.c22
-rw-r--r--test/API/driver/kwsys/update-gitsetup.bash20
-rw-r--r--test/API/driver/kwsys/update-third-party.bash169
-rw-r--r--test/API/tarray.c2250
-rw-r--r--test/API/tattr.c11929
-rw-r--r--test/API/tchecksum.c251
-rw-r--r--test/API/tconfig.c199
-rw-r--r--test/API/tcoords.c724
-rw-r--r--test/API/testhdf5.c729
-rw-r--r--test/API/testhdf5.h349
-rw-r--r--test/API/tfile.c8381
-rw-r--r--test/API/tgenprop.c2201
-rw-r--r--test/API/th5o.c1889
-rw-r--r--test/API/th5s.c3538
-rw-r--r--test/API/tid.c1413
-rw-r--r--test/API/titerate.c1263
-rw-r--r--test/API/tmisc.c6349
-rw-r--r--test/API/trefer.c3641
-rw-r--r--test/API/tselect.c16314
-rw-r--r--test/API/ttime.c231
-rw-r--r--test/API/tunicode.c867
-rw-r--r--test/API/tvlstr.c1013
-rw-r--r--test/API/tvltypes.c3268
-rw-r--r--test/CMakeLists.txt20
-rw-r--r--test/h5test.c7
-rw-r--r--test/h5test.h72
-rw-r--r--test/vol.c62
-rw-r--r--testpar/API/CMakeLists.txt279
-rw-r--r--testpar/API/H5_api_async_test_parallel.c3668
-rw-r--r--testpar/API/H5_api_async_test_parallel.h29
-rw-r--r--testpar/API/H5_api_attribute_test_parallel.c47
-rw-r--r--testpar/API/H5_api_attribute_test_parallel.h20
-rw-r--r--testpar/API/H5_api_dataset_test_parallel.c8149
-rw-r--r--testpar/API/H5_api_dataset_test_parallel.h20
-rw-r--r--testpar/API/H5_api_datatype_test_parallel.c47
-rw-r--r--testpar/API/H5_api_datatype_test_parallel.h20
-rw-r--r--testpar/API/H5_api_file_test_parallel.c367
-rw-r--r--testpar/API/H5_api_file_test_parallel.h20
-rw-r--r--testpar/API/H5_api_group_test_parallel.c47
-rw-r--r--testpar/API/H5_api_group_test_parallel.h20
-rw-r--r--testpar/API/H5_api_link_test_parallel.c47
-rw-r--r--testpar/API/H5_api_link_test_parallel.h20
-rw-r--r--testpar/API/H5_api_misc_test_parallel.c47
-rw-r--r--testpar/API/H5_api_misc_test_parallel.h20
-rw-r--r--testpar/API/H5_api_object_test_parallel.c47
-rw-r--r--testpar/API/H5_api_object_test_parallel.h20
-rw-r--r--testpar/API/H5_api_test_parallel.c338
-rw-r--r--testpar/API/H5_api_test_parallel.h188
-rw-r--r--testpar/API/t_bigio.c1942
-rw-r--r--testpar/API/t_chunk_alloc.c512
-rw-r--r--testpar/API/t_coll_chunk.c1417
-rw-r--r--testpar/API/t_coll_md_read.c654
-rw-r--r--testpar/API/t_dset.c4335
-rw-r--r--testpar/API/t_file.c1032
-rw-r--r--testpar/API/t_file_image.c371
-rw-r--r--testpar/API/t_filter_read.c564
-rw-r--r--testpar/API/t_mdset.c2814
-rw-r--r--testpar/API/t_ph5basic.c192
-rw-r--r--testpar/API/t_prop.c646
-rw-r--r--testpar/API/t_pshutdown.c150
-rw-r--r--testpar/API/t_shapesame.c4516
-rw-r--r--testpar/API/t_span_tree.c2622
-rw-r--r--testpar/API/testphdf5.c1007
-rw-r--r--testpar/API/testphdf5.h343
-rw-r--r--testpar/CMakeLists.txt20
200 files changed, 212642 insertions, 49 deletions
diff --git a/.github/workflows/codespell.yml b/.github/workflows/codespell.yml
index a4edb0f..ddf1038 100644
--- a/.github/workflows/codespell.yml
+++ b/.github/workflows/codespell.yml
@@ -11,5 +11,5 @@ jobs:
- uses: actions/checkout@v3
- uses: codespell-project/actions-codespell@master
with:
- skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c,./config/sanitizer/LICENSE,./config/sanitizer/sanitizers.cmake,./tools/test/h5repack/testfiles/*.dat
+ skip: ./bin/trace,./hl/tools/h5watch/h5watch.c,./tools/test/h5jam/tellub.c,./config/sanitizer/LICENSE,./config/sanitizer/sanitizers.cmake,./tools/test/h5repack/testfiles/*.dat,./test/API/driver
ignore_words_list: isnt,inout,nd,parms,parm,ba,offsetP,ser,ois,had,fiter,fo,clude,refere,minnum,offsetp,creat,ans:,eiter,lastr,ans,isn't,ifset,sur,trun,dne,tthe,hda,filname,te,htmp,minnum,ake,gord,numer,ro,oce,msdos
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 9da39fa..62bbd00 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -208,20 +208,23 @@ set (HDF5_HL_F90_C_LIBSH_TARGET "${HDF5_HL_F90_C_LIB_CORENAME}-shared")
#-----------------------------------------------------------------------------
# Define some CMake variables for use later in the project
#-----------------------------------------------------------------------------
-set (HDF_CONFIG_DIR ${HDF5_SOURCE_DIR}/config)
-set (HDF_RESOURCES_DIR ${HDF5_SOURCE_DIR}/config/cmake)
-set (HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/src)
-set (HDF5_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/test)
-set (HDF5_CPP_SRC_DIR ${HDF5_SOURCE_DIR}/c++)
-set (HDF5_CPP_TST_DIR ${HDF5_SOURCE_DIR}/c++/test)
-set (HDF5_HL_SRC_DIR ${HDF5_SOURCE_DIR}/hl)
-set (HDF5_HL_CPP_SRC_DIR ${HDF5_SOURCE_DIR}/hl/c++)
-set (HDF5_HL_TOOLS_DIR ${HDF5_SOURCE_DIR}/hl/tools)
-set (HDF5_TOOLS_DIR ${HDF5_SOURCE_DIR}/tools)
-set (HDF5_TOOLS_SRC_DIR ${HDF5_SOURCE_DIR}/tools/src)
-set (HDF5_PERFORM_SRC_DIR ${HDF5_SOURCE_DIR}/tools/src/perform)
-set (HDF5_UTILS_DIR ${HDF5_SOURCE_DIR}/utils)
-set (HDF5_F90_SRC_DIR ${HDF5_SOURCE_DIR}/fortran)
+set (HDF_CONFIG_DIR ${HDF5_SOURCE_DIR}/config)
+set (HDF_RESOURCES_DIR ${HDF5_SOURCE_DIR}/config/cmake)
+set (HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/src)
+set (HDF5_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/test)
+set (HDF5_TEST_PAR_DIR ${HDF5_SOURCE_DIR}/testpar)
+set (HDF5_TEST_API_SRC_DIR ${HDF5_SOURCE_DIR}/test/API)
+set (HDF5_TEST_API_PAR_SRC_DIR ${HDF5_SOURCE_DIR}/testpar/API)
+set (HDF5_CPP_SRC_DIR ${HDF5_SOURCE_DIR}/c++)
+set (HDF5_CPP_TST_DIR ${HDF5_SOURCE_DIR}/c++/test)
+set (HDF5_HL_SRC_DIR ${HDF5_SOURCE_DIR}/hl)
+set (HDF5_HL_CPP_SRC_DIR ${HDF5_SOURCE_DIR}/hl/c++)
+set (HDF5_HL_TOOLS_DIR ${HDF5_SOURCE_DIR}/hl/tools)
+set (HDF5_TOOLS_DIR ${HDF5_SOURCE_DIR}/tools)
+set (HDF5_TOOLS_SRC_DIR ${HDF5_SOURCE_DIR}/tools/src)
+set (HDF5_PERFORM_SRC_DIR ${HDF5_SOURCE_DIR}/tools/src/perform)
+set (HDF5_UTILS_DIR ${HDF5_SOURCE_DIR}/utils)
+set (HDF5_F90_SRC_DIR ${HDF5_SOURCE_DIR}/fortran)
set (HDF5_JAVA_JNI_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/jni)
set (HDF5_JAVA_HDF5_SRC_DIR ${HDF5_SOURCE_DIR}/java/src/hdf)
set (HDF5_JAVA_TEST_SRC_DIR ${HDF5_SOURCE_DIR}/java/test)
@@ -947,6 +950,25 @@ if (BUILD_TESTING)
math (EXPR CTEST_LONG_TIMEOUT "${DART_TESTING_TIMEOUT} * 2")
math (EXPR CTEST_VERY_LONG_TIMEOUT "${DART_TESTING_TIMEOUT} * 3")
+ option (HDF5_TEST_API "Execute HDF5 API tests" OFF)
+ mark_as_advanced (HDF5_TEST_API)
+ if (HDF5_TEST_API)
+ option (HDF5_TEST_API_INSTALL "Install HDF5 API tests" OFF)
+ mark_as_advanced (HDF5_TEST_API_INSTALL)
+
+ # Enable HDF5 Async API tests
+ option (HDF5_TEST_API_ENABLE_ASYNC "Enable HDF5 Async API tests" OFF)
+ mark_as_advanced (HDF5_TEST_API_ENABLE_ASYNC)
+
+ # Build and use HDF5 test driver program for API tests
+ option (HDF5_TEST_API_ENABLE_DRIVER "Enable HDF5 API test driver program" OFF)
+ mark_as_advanced (HDF5_TEST_API_ENABLE_DRIVER)
+ if (HDF5_TEST_API_ENABLE_DRIVER)
+ set (HDF5_TEST_API_SERVER "" CACHE STRING "Server executable for running API tests")
+ mark_as_advanced (HDF5_TEST_API_SERVER)
+ endif ()
+ endif ()
+
option (HDF5_TEST_VFD "Execute tests with different VFDs" OFF)
mark_as_advanced (HDF5_TEST_VFD)
if (HDF5_TEST_VFD)
@@ -1003,11 +1025,11 @@ if (BUILD_TESTING)
mark_as_advanced (HDF5_TEST_JAVA)
if (NOT HDF5_EXTERNALLY_CONFIGURED)
- if (EXISTS "${HDF5_SOURCE_DIR}/test" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/test")
+ if (EXISTS "${HDF5_TEST_SRC_DIR}" AND IS_DIRECTORY "${HDF5_TEST_SRC_DIR}")
add_subdirectory (test)
endif ()
if (H5_HAVE_PARALLEL)
- if (EXISTS "${HDF5_SOURCE_DIR}/testpar" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/testpar")
+ if (EXISTS "${HDF5_TEST_PAR_DIR}" AND IS_DIRECTORY "${HDF5_TEST_PAR_DIR}")
add_subdirectory (testpar)
endif ()
endif ()
diff --git a/hl/c++/test/Makefile.am b/hl/c++/test/Makefile.am
index 251d56a..73f1463 100644
--- a/hl/c++/test/Makefile.am
+++ b/hl/c++/test/Makefile.am
@@ -26,7 +26,7 @@ TEST_PROG=ptableTest
check_PROGRAMS=$(TEST_PROG)
# The tests depend on the hdf5, hdf5 C++, and hdf5_hl libraries
-LDADD=$(LIBH5CPP_HL) $(LIBH5_HL) $(LIBH5CPP) $(LIBHDF5)
+LDADD=$(LIBH5CPP_HL) $(LIBH5_HL) $(LIBH5TEST) $(LIBH5CPP) $(LIBHDF5)
ptableTest_SOURCES=ptableTest.cpp
diff --git a/test/API/CMake/CheckAsan.cmake b/test/API/CMake/CheckAsan.cmake
new file mode 100644
index 0000000..32f4b45
--- /dev/null
+++ b/test/API/CMake/CheckAsan.cmake
@@ -0,0 +1,37 @@
+set(ASAN_FLAG "-fsanitize=address")
+set(ASAN_C_FLAGS "-O1 -g ${ASAN_FLAG} -fsanitize-address-use-after-scope -fno-omit-frame-pointer -fno-optimize-sibling-calls")
+set(ASAN_CXX_FLAGS ${ASAN_C_FLAGS})
+
+get_property(ASAN_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
+foreach(lang ${ASAN_LANGUAGES})
+ set(ASAN_${lang}_LANG_ENABLED 1)
+endforeach()
+
+if(ASAN_C_LANG_ENABLED)
+ include(CheckCCompilerFlag)
+ set(CMAKE_REQUIRED_LINK_OPTIONS ${ASAN_FLAG})
+ check_c_compiler_flag(${ASAN_FLAG} ASAN_C_FLAG_SUPPORTED)
+ if(NOT ASAN_C_FLAG_SUPPORTED)
+ message(STATUS "Asan flags are not supported by the C compiler.")
+ else()
+ if(NOT CMAKE_C_FLAGS_ASAN)
+ set(CMAKE_C_FLAGS_ASAN ${ASAN_C_FLAGS} CACHE STRING "Flags used by the C compiler during ASAN builds." FORCE)
+ endif()
+ endif()
+ unset(CMAKE_REQUIRED_LINK_OPTIONS)
+endif()
+
+if(ASAN_CXX_LANG_ENABLED)
+ include(CheckCXXCompilerFlag)
+ set(CMAKE_REQUIRED_LINK_OPTIONS ${ASAN_FLAG})
+ check_cxx_compiler_flag(${ASAN_FLAG} ASAN_CXX_FLAG_SUPPORTED)
+ if(NOT ASAN_CXX_FLAG_SUPPORTED)
+ message(STATUS "Asan flags are not supported by the CXX compiler.")
+ else()
+ if(NOT CMAKE_CXX_FLAGS_ASAN)
+ set(CMAKE_CXX_FLAGS_ASAN ${ASAN_CXX_FLAGS} CACHE STRING "Flags used by the CXX compiler during ASAN builds." FORCE)
+ endif()
+ endif()
+ unset(CMAKE_REQUIRED_LINK_OPTIONS)
+endif()
+
diff --git a/test/API/CMake/CheckUbsan.cmake b/test/API/CMake/CheckUbsan.cmake
new file mode 100644
index 0000000..f2b9c2c
--- /dev/null
+++ b/test/API/CMake/CheckUbsan.cmake
@@ -0,0 +1,37 @@
+set(UBSAN_FLAG "-fsanitize=undefined")
+set(UBSAN_C_FLAGS "-O1 -g ${UBSAN_FLAG} -fno-omit-frame-pointer")
+set(UBSAN_CXX_FLAGS ${UBSAN_C_FLAGS})
+
+get_property(UBSAN_LANGUAGES GLOBAL PROPERTY ENABLED_LANGUAGES)
+foreach(lang ${UBSAN_LANGUAGES})
+ set(UBSAN_${lang}_LANG_ENABLED 1)
+endforeach()
+
+if(UBSAN_C_LANG_ENABLED)
+ include(CheckCCompilerFlag)
+ set(CMAKE_REQUIRED_LINK_OPTIONS ${UBSAN_FLAG})
+ check_c_compiler_flag(${UBSAN_FLAG} UBSAN_C_FLAG_SUPPORTED)
+ if(NOT UBSAN_C_FLAG_SUPPORTED)
+ message(STATUS "Ubsan flags are not supported by the C compiler.")
+ else()
+ if(NOT CMAKE_C_FLAGS_UBSAN)
+ set(CMAKE_C_FLAGS_UBSAN ${UBSAN_C_FLAGS} CACHE STRING "Flags used by the C compiler during UBSAN builds." FORCE)
+ endif()
+ endif()
+ unset(CMAKE_REQUIRED_LINK_OPTIONS)
+endif()
+
+if(UBSAN_CXX_LANG_ENABLED)
+ include(CheckCXXCompilerFlag)
+ set(CMAKE_REQUIRED_LINK_OPTIONS ${UBSAN_FLAG})
+ check_cxx_compiler_flag(${UBSAN_FLAG} UBSAN_CXX_FLAG_SUPPORTED)
+ if(NOT UBSAN_CXX_FLAG_SUPPORTED)
+ message(STATUS "Ubsan flags are not supported by the CXX compiler.")
+ else()
+ if(NOT CMAKE_CXX_FLAGS_UBSAN)
+ set(CMAKE_CXX_FLAGS_UBSAN ${UBSAN_CXX_FLAGS} CACHE STRING "Flags used by the CXX compiler during UBSAN builds." FORCE)
+ endif()
+ endif()
+ unset(CMAKE_REQUIRED_LINK_OPTIONS)
+endif()
+
diff --git a/test/API/CMakeLists.txt b/test/API/CMakeLists.txt
new file mode 100644
index 0000000..d189d67
--- /dev/null
+++ b/test/API/CMakeLists.txt
@@ -0,0 +1,314 @@
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://www.hdfgroup.org/licenses.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+#------------------------------------------------------------------------------
+# Set module path
+#------------------------------------------------------------------------------
+set(HDF5_TEST_API_CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake")
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${HDF5_TEST_API_CMAKE_MODULE_PATH})
+
+# TODO: probably not necessary
+#------------------------------------------------------------------------------
+# Setup CMake Environment
+#------------------------------------------------------------------------------
+if (WIN32)
+ message("The HDF5 API test suite is currently not supported on this platform." FATAL_ERROR)
+endif ()
+
+#------------------------------------------------------------------------------
+# Setup testing configuration file
+#------------------------------------------------------------------------------
+if (HDF5_TEST_PARALLEL)
+ set (HDF5_TEST_API_HAVE_PARALLEL 1)
+endif ()
+if (HDF5_TEST_API_ENABLE_ASYNC)
+ set (H5_API_TEST_HAVE_ASYNC 1)
+endif ()
+
+configure_file(
+ ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_test_config.h.in
+ ${HDF5_TEST_BINARY_DIR}/H5_api_test_config.h
+)
+
+#------------------------------------------------------------------------------
+# Compile kwsys library and setup TestDriver
+#------------------------------------------------------------------------------
+if (HDF5_TEST_API_ENABLE_DRIVER)
+ add_subdirectory (driver)
+endif ()
+
+#------------------------------------------------------------------------------
+# Setup for API tests
+#------------------------------------------------------------------------------
+
+# Ported HDF5 tests
+set (HDF5_API_TESTS_EXTRA
+ testhdf5
+)
+
+# List of files generated by the HDF5 API tests which
+# should be cleaned up in case the test failed to remove
+# them
+set (HDF5_API_TESTS_FILES
+ H5_api_test.h5
+ H5_api_async_test.h5
+ H5_api_async_test_0.h5
+ H5_api_async_test_1.h5
+ H5_api_async_test_2.h5
+ H5_api_async_test_3.h5
+ H5_api_async_test_4.h5
+ test_file.h5
+ invalid_params_file.h5
+ excl_flag_file.h5
+ overlapping_file.h5
+ file_permission.h5
+ flush_file.h5
+ property_list_test_file1.h5
+ property_list_test_file2.h5
+ intent_test_file.h5
+ file_obj_count1.h5
+ file_obj_count2.h5
+ file_mount.h5
+ file_name_retrieval.h5
+ filespace_info.h5
+ test_file_id.h5
+ test_close_degree.h5
+ test_free_sections.h5
+ file_size.h5
+ file_info.h5
+ double_group_open.h5
+ ext_link_file.h5
+ ext_link_file_2.h5
+ ext_link_file_3.h5
+ ext_link_file_4.h5
+ ext_link_file_ping_pong_1.h5
+ ext_link_file_ping_pong_2.h5
+ ext_link_invalid_params_file.h5
+ object_copy_test_file.h5
+)
+
+#-----------------------------------------------------------------------------
+# Build the main API test executable
+#-----------------------------------------------------------------------------
+foreach (api_test ${HDF5_API_TESTS})
+ set (HDF5_API_TEST_SRCS
+ ${HDF5_API_TEST_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_${api_test}_test.c
+ )
+endforeach ()
+
+set (HDF5_API_TEST_SRCS
+ ${HDF5_API_TEST_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_test.c
+ ${HDF5_TEST_API_SRC_DIR}/H5_api_test_util.c
+)
+
+add_executable (h5_api_test ${HDF5_API_TEST_SRCS})
+target_include_directories (
+ h5_api_test
+ PRIVATE
+ "${HDF5_SRC_INCLUDE_DIRS}"
+ "${HDF5_TEST_SRC_DIR}"
+ "${HDF5_TEST_API_SRC_DIR}"
+ "${HDF5_SRC_BINARY_DIR}"
+ "${HDF5_TEST_BINARY_DIR}"
+)
+target_compile_options (
+ h5_api_test
+ PRIVATE
+ "${HDF5_CMAKE_C_FLAGS}"
+)
+target_compile_definitions (
+ h5_api_test
+ PRIVATE
+ $<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}>
+)
+if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (h5_api_test STATIC)
+ target_link_libraries (
+ h5_api_test
+ PRIVATE
+ ${HDF5_TEST_LIB_TARGET}
+ )
+else ()
+ TARGET_C_PROPERTIES (h5_api_test SHARED)
+ target_link_libraries (
+ h5_api_test
+ PRIVATE
+ ${HDF5_TEST_LIBSH_TARGET}
+ )
+endif ()
+set_target_properties (
+ h5_api_test
+ PROPERTIES
+ FOLDER test/API
+)
+# Add Target to clang-format
+if (HDF5_ENABLE_FORMATTERS)
+ clang_format (HDF5_TEST_h5_api_test_FORMAT h5_api_test)
+endif ()
+
+#-----------------------------------------------------------------------------
+# Build the ported HDF5 test executables
+#-----------------------------------------------------------------------------
+foreach (api_test_extra ${HDF5_API_TESTS_EXTRA})
+ unset (HDF5_API_TEST_EXTRA_SRCS)
+
+ set (HDF5_API_TEST_EXTRA_SRCS
+ ${HDF5_API_TEST_EXTRA_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/${api_test_extra}.c
+ )
+
+ if (${api_test_extra} STREQUAL "testhdf5")
+ set (HDF5_API_TEST_EXTRA_SRCS
+ ${HDF5_API_TEST_EXTRA_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/tarray.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tattr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tchecksum.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tconfig.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tcoords.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tfile.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tgenprop.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/th5o.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/th5s.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tid.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/titerate.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tmisc.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/trefer.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tselect.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/ttime.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tunicode.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tvlstr.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/tvltypes.c
+ )
+ endif ()
+
+ add_executable (h5_api_test_${api_test_extra} ${HDF5_API_TEST_EXTRA_SRCS})
+ target_include_directories (
+ h5_api_test_${api_test_extra}
+ PRIVATE
+ "${HDF5_SRC_INCLUDE_DIRS}"
+ "${HDF5_TEST_SRC_DIR}"
+ "${HDF5_TEST_API_SRC_DIR}"
+ "${HDF5_SRC_BINARY_DIR}"
+ "${HDF5_TEST_BINARY_DIR}"
+ )
+ target_compile_options (
+ h5_api_test_${api_test_extra}
+ PRIVATE
+ "${HDF5_CMAKE_C_FLAGS}"
+ )
+ target_compile_definitions (
+ h5_api_test_${api_test_extra}
+ PRIVATE
+ $<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}>
+ )
+ if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (h5_api_test_${api_test_extra} STATIC)
+ target_link_libraries (h5_api_test_${api_test_extra} PRIVATE ${HDF5_TEST_LIB_TARGET})
+ else ()
+ TARGET_C_PROPERTIES (h5_api_test_${api_test_extra} SHARED)
+ target_link_libraries (h5_api_test_${api_test_extra} PRIVATE ${HDF5_TEST_LIBSH_TARGET})
+ endif ()
+ set_target_properties (
+ h5_api_test_${api_test_extra}
+ PROPERTIES
+ FOLDER test/API
+ )
+ # Add Target to clang-format
+ if (HDF5_ENABLE_FORMATTERS)
+ clang_format (HDF5_TEST_h5_api_test_${api_test_extra}_FORMAT h5_api_test_${api_test_extra})
+ endif ()
+endforeach ()
+
+#-----------------------------------------------------------------------------
+# Add tests if HDF5 serial testing is enabled
+#-----------------------------------------------------------------------------
+if (HDF5_TEST_SERIAL)
+ if (HDF5_TEST_API_ENABLE_DRIVER)
+ if ("${HDF5_TEST_API_SERVER}" STREQUAL "")
+ message (FATAL_ERROR "Please set HDF5_TEST_API_SERVER to point to a server executable for the test driver program.")
+ endif ()
+
+ # Driver options
+ if (HDF5_TEST_API_SERVER_ALLOW_ERRORS)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS --allow-server-errors)
+ endif ()
+ if (HDF5_TEST_API_CLIENT_HELPER)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ --client-helper ${HDF5_TEST_API_CLIENT_HELPER}
+ )
+ endif ()
+ if (HDF5_TEST_API_CLIENT_INIT)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ --client-init ${HDF5_TEST_API_CLIENT_INIT}
+ )
+ endif ()
+
+ set(last_api_test "")
+ foreach (api_test ${HDF5_API_TESTS})
+ add_test (
+ NAME "h5_api_test_${api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:h5_api_test> "${api_test}"
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+
+ set_tests_properties("h5_api_test_${api_test}" PROPERTIES DEPENDS "${last_api_test}")
+
+ set(last_api_test "h5_api_test_${api_test}")
+ endforeach ()
+
+ foreach (hdf5_test ${HDF5_API_TESTS_EXTRA})
+ add_test (
+ NAME "h5_api_test_${hdf5_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:h5_api_test_${hdf5_test}>
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ endforeach ()
+
+ # Hook external tests to same test suite
+ foreach (ext_api_test ${HDF5_API_EXT_SERIAL_TESTS})
+ add_test (
+ NAME "h5_api_ext_test_${ext_api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:${ext_api_test}>
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ endforeach ()
+ else ()
+ set(last_api_test "")
+ foreach (api_test ${HDF5_API_TESTS})
+ add_test (
+ NAME "h5_api_test_${api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test> "${api_test}"
+ )
+
+ set_tests_properties("h5_api_test_${api_test}" PROPERTIES DEPENDS "${last_api_test}")
+
+ set(last_api_test "h5_api_test_${api_test}")
+ endforeach ()
+
+ foreach (hdf5_test ${HDF5_API_TESTS_EXTRA})
+ add_test (
+ NAME "h5_api_test_${hdf5_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_${hdf5_test}>
+ )
+ endforeach ()
+ endif ()
+endif ()
diff --git a/test/API/H5_api_async_test.c b/test/API/H5_api_async_test.c
new file mode 100644
index 0000000..b5208ba
--- /dev/null
+++ b/test/API/H5_api_async_test.c
@@ -0,0 +1,2730 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_async_test.h"
+
+#ifdef H5ESpublic_H
+
+static int test_one_dataset_io(void);
+static int test_multi_dataset_io(void);
+static int test_multi_file_dataset_io(void);
+static int test_multi_file_grp_dset_io(void);
+static int test_set_extent(void);
+static int test_attribute_exists(void);
+static int test_attribute_io(void);
+static int test_attribute_io_tconv(void);
+static int test_attribute_io_compound(void);
+static int test_group(void);
+static int test_link(void);
+static int test_ocopy_orefresh(void);
+static int test_file_reopen(void);
+
+/*
+ * The array of async tests to be performed.
+ */
+static int (*async_tests[])(void) = {
+ test_one_dataset_io,
+ test_multi_dataset_io,
+ test_multi_file_dataset_io,
+ test_multi_file_grp_dset_io,
+ test_set_extent,
+ test_attribute_exists,
+ test_attribute_io,
+ test_attribute_io_tconv,
+ test_attribute_io_compound,
+ test_group,
+ test_link,
+ test_ocopy_orefresh,
+ test_file_reopen,
+};
+
+/* Highest "printf" file created (starting at 0) */
+int max_printf_file = -1;
+
+/*
+ * Create file and dataset, write to dataset
+ */
+static int
+test_one_dataset_io(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+ int wbuf[6][10];
+ int rbuf[6][10];
+ int i, j;
+
+ TESTING_MULTIPART("single dataset I/O");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(single_dset_eswait)
+ {
+ TESTING_2("synchronization using H5ESwait()");
+
+ /* Initialize wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ wbuf[i][j] = 10 * i + j;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_eswait);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_eswait);
+
+ PART_BEGIN(single_dset_dclose)
+ {
+ TESTING_2("synchronization using H5Dclose()");
+
+ /* Update wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ wbuf[i][j] += 6 * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Close the dataset synchronously */
+ if (H5Dclose(dset_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Close the dataset synchronously */
+ if (H5Dclose(dset_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_dclose);
+ } /* end if */
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ PASSED();
+ }
+ PART_END(single_dset_dclose);
+
+ PART_BEGIN(single_dset_dflush)
+ {
+ TESTING_2("synchronization using H5Oflush_async()");
+
+ /* Update wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ wbuf[i][j] += 6 * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Oflush_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_dflush);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_dflush);
+
+ PART_BEGIN(single_dset_fclose)
+ {
+ TESTING_2("synchronization using H5Fclose()");
+
+ /* Update wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ wbuf[i][j] += 6 * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the file synchronously */
+ if (H5Fclose(file_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Reopen the file asynchronously. */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDONLY, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the file synchronously */
+ if (H5Fclose(file_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_fclose);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_fclose);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_one_dataset_io() */
+
+/*
+ * Create file and multiple datasets, write to them and read from them
+ */
+static int
+test_multi_dataset_io(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id[5] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+ char dset_name[32];
+ int wbuf[5][6][10];
+ int rbuf[5][6][10];
+ int i, j, k;
+
+ TESTING_MULTIPART("multi dataset I/O");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_dset_open)
+ {
+ TESTING_2("keeping datasets open");
+
+ /* Loop over datasets */
+ for (i = 0; i < 5; i++) {
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", i);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id[i] = H5Dcreate_async(file_id, dset_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ /* Initialize wbuf. Must use a new slice of wbuf for each dset
+ * since we can't overwrite the buffers until I/O is done. */
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ wbuf[i][j][k] = 6 * 10 * i + 10 * j + k;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i],
+ es_id) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+ } /* end for */
+
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ /* Loop over datasets */
+ for (i = 0; i < 5; i++) {
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) <
+ 0)
+ PART_TEST_ERROR(multi_dset_open);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+ if (op_failed)
+ PART_TEST_ERROR(multi_dset_open);
+ /*printf("\nwbuf:\n");
+ for(i = 0; i < 5; i++) {
+ for(j = 0; j < 6; j++) {
+ for(k = 0; k < 10; k++)
+ printf("%d ", wbuf[i][j][k]);
+ printf("\n");
+ }
+ printf("\n");
+ }
+ printf("\nrbuf:\n");
+ for(i = 0; i < 5; i++) {
+ for(j = 0; j < 6; j++) {
+ for(k = 0; k < 10; k++)
+ printf("%d ", rbuf[i][j][k]);
+ printf("\n");
+ }
+ printf("\n");
+ }*/
+ /* Verify the read data */
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ if (wbuf[i][j][k] != rbuf[i][j][k]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_dset_open);
+ } /* end if */
+
+ /* Close the datasets */
+ for (i = 0; i < 5; i++)
+ if (H5Dclose(dset_id[i]) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ PASSED();
+ }
+ PART_END(multi_dset_open);
+
+ PART_BEGIN(multi_dset_close)
+ {
+ TESTING_2("closing datasets between I/O");
+
+ /* Loop over datasets */
+ for (i = 0; i < 5; i++) {
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", i);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Update wbuf */
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ wbuf[i][j][k] += 5 * 6 * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i],
+ es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ } /* end for */
+
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Loop over datasets */
+ for (i = 0; i < 5; i++) {
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", i);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) <
+ 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ if (op_failed)
+ PART_TEST_ERROR(multi_dset_close);
+ /*printf("\nwbuf:\n");
+ for(i = 0; i < 5; i++) {
+ for(j = 0; j < 6; j++) {
+ for(k = 0; k < 10; k++)
+ printf("%d ", wbuf[i][j][k]);
+ printf("\n");
+ }
+ printf("\n");
+ }
+ printf("\nrbuf:\n");
+ for(i = 0; i < 5; i++) {
+ for(j = 0; j < 6; j++) {
+ for(k = 0; k < 10; k++)
+ printf("%d ", rbuf[i][j][k]);
+ printf("\n");
+ }
+ printf("\n");
+ }*/
+ /* Verify the read data */
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ if (wbuf[i][j][k] != rbuf[i][j][k]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_dset_close);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_dset_close);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ for (i = 0; i < 5; i++)
+ H5Dclose(dset_id[i]);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_multi_dataset_io() */
+
+/*
+ * Create multiple files, each with a single dataset, write to them and read
+ * from them
+ */
+static int
+test_multi_file_dataset_io(void)
+{
+ hid_t file_id[5] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t dset_id[5] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+ char file_name[32];
+ int wbuf[5][6][10];
+ int rbuf[5][6][10];
+ int i, j, k;
+
+ TESTING_MULTIPART("multi file dataset I/O");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_file_dset_open)
+ {
+ TESTING_2("keeping files and datasets open");
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Set file name */
+ sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i);
+
+ /* Create file asynchronously */
+ if ((file_id[i] =
+ H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ if (i > max_printf_file)
+ max_printf_file = i;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id[i] = H5Dcreate_async(file_id[i], "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Initialize wbuf. Must use a new slice of wbuf for each dset
+ * since we can't overwrite the buffers until I/O is done. */
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ wbuf[i][j][k] = 6 * 10 * i + 10 * j + k;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i],
+ es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ } /* end for */
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Oflush_async(dset_id[i], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) <
+ 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Verify the read data */
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ if (wbuf[i][j][k] != rbuf[i][j][k]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_open);
+ } /* end if */
+
+ /* Close the datasets */
+ for (i = 0; i < 5; i++)
+ if (H5Dclose(dset_id[i]) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_open);
+
+ PART_BEGIN(multi_file_dset_dclose)
+ {
+ TESTING_2("closing datasets between I/O");
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Update wbuf */
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ wbuf[i][j][k] += 5 * 6 * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i],
+ es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ } /* end for */
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id[i], H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) <
+ 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Verify the read data */
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ if (wbuf[i][j][k] != rbuf[i][j][k]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_dclose);
+ } /* end if */
+
+ /* Close the files */
+ for (i = 0; i < 5; i++)
+ if (H5Fclose(file_id[i]) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_dclose);
+
+ PART_BEGIN(multi_file_dset_fclose)
+ {
+ TESTING_2("closing files between I/O");
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Set file name */
+ sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i);
+
+ /* Open the file asynchronously */
+ if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Update wbuf */
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ wbuf[i][j][k] += 5 * 6 * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i],
+ es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Set file name */
+ sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i);
+
+ /* Open the file asynchronously */
+ if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDONLY, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) <
+ 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Verify the read data */
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ if (wbuf[i][j][k] != rbuf[i][j][k]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_fclose);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_fclose);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ for (i = 0; i < 5; i++) {
+ H5Dclose(dset_id[i]);
+ H5Fclose(file_id[i]);
+ } /* end for */
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_multi_file_dataset_io() */
+
+/*
+ * Create multiple files, each with a single group and dataset, write to them
+ * and read from them
+ */
+static int
+test_multi_file_grp_dset_io(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t grp_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+ char file_name[32];
+ int wbuf[5][6][10];
+ int rbuf[5][6][10];
+ int i, j, k;
+
+ TESTING_MULTIPART("multi file dataset I/O with groups");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_file_grp_dset_no_kick)
+ {
+ TESTING_2("without intermediate calls to H5ESwait()");
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Set file name */
+ sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i);
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if (i > max_printf_file)
+ max_printf_file = i;
+
+ /* Create the group asynchronously */
+ if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Initialize wbuf. Must use a new slice of wbuf for each dset
+ * since we can't overwrite the buffers until I/O is done. */
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ wbuf[i][j][k] = 6 * 10 * i + 10 * j + k;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], es_id) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Set file name */
+ sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i);
+
+ /* Open the file asynchronously */
+ if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Open the group asynchronously */
+ if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Verify the read data */
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ if (wbuf[i][j][k] != rbuf[i][j][k]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_grp_dset_no_kick);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_grp_dset_no_kick);
+
+ PART_BEGIN(multi_file_grp_dset_kick)
+ {
+ TESTING_2("with intermediate calls to H5ESwait() (0 timeout)");
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Set file name */
+ sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i);
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (i > max_printf_file)
+ max_printf_file = i;
+
+ /* Create the group asynchronously */
+ if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Update wbuf */
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ wbuf[i][j][k] += 5 * 6 * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf[i], es_id) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Kick the event stack to make progress */
+ if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Loop over files */
+ for (i = 0; i < 5; i++) {
+ /* Set file name */
+ sprintf(file_name, ASYNC_API_TEST_FILE_PRINTF, i);
+
+ /* Open the file asynchronously */
+ if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Open the group asynchronously */
+ if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf[i], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Kick the event stack to make progress */
+ if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Verify the read data */
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 6; j++)
+ for (k = 0; k < 10; k++)
+ if (wbuf[i][j][k] != rbuf[i][j][k]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_grp_dset_kick);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_grp_dset_kick);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Gclose(grp_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_multi_file_grp_dset_io() */
+
+/*
+ * Create file and dataset, write to dataset
+ */
+static int
+test_set_extent(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id[6] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
+ H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t fspace_out[6] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
+ H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {1, 10};
+ hsize_t mdims[2] = {7, 10};
+ hsize_t cdims[2] = {2, 3};
+ hsize_t start[2] = {0, 0};
+ hsize_t count[2] = {1, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+ htri_t tri_ret;
+ int wbuf[6][10];
+ int rbuf[6][10];
+ int i, j;
+
+ TESTING("H5Dset_extent() and H5Dget_space()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, or flush aren't supported with "
+ "this connector\n");
+ return 0;
+ }
+
+ /* Create file dataspace */
+ if ((fspace_id[0] = H5Screate_simple(2, dims, mdims)) < 0)
+ TEST_ERROR;
+
+ /* Create memory dataspace */
+ if ((mspace_id = H5Screate_simple(1, &dims[1], NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create DCPL */
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Set chunking */
+ if (H5Pset_chunk(dcpl_id, 2, cdims) < 0)
+ TEST_ERROR;
+
+ /* Initialize wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ wbuf[i][j] = 10 * i + j;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, fspace_id[0], H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Extend the first dataset from 1 to 6, 1 at a time */
+ for (i = 0; i < 6; i++) {
+ /* No need to extend on the first iteration */
+ if (i) {
+ /* Copy dataspace */
+ if ((fspace_id[i] = H5Scopy(fspace_id[i - 1])) < 0)
+ TEST_ERROR;
+
+ /* Extend datapace */
+ dims[0] = (hsize_t)(i + 1);
+ if (H5Sset_extent_simple(fspace_id[i], 2, dims, mdims) < 0)
+ TEST_ERROR;
+
+ /* Extend dataset asynchronously */
+ if (H5Dset_extent_async(dset_id, dims, es_id) < 0)
+ TEST_ERROR;
+
+ /* Select hyperslab in file space to match new region */
+ start[0] = (hsize_t)i;
+ if (H5Sselect_hyperslab(fspace_id[i], H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ TEST_ERROR;
+ } /* end if */
+
+ /* Get dataset dataspace */
+ if ((fspace_out[i] = H5Dget_space_async(dset_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Write the dataset slice asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id[i], H5P_DEFAULT, wbuf[i], es_id) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the entire dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Verify extents are correct. We do not need to wait because of the
+ * "future id" capability. */
+ for (i = 0; i < 6; i++) {
+ if ((tri_ret = H5Sextent_equal(fspace_id[i], fspace_out[i])) < 0)
+ TEST_ERROR;
+ if (!tri_ret)
+ FAIL_PUTS_ERROR(" dataspaces are not equal\n");
+ if (i && H5Sclose(fspace_id[i]) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_out[i]) < 0)
+ TEST_ERROR;
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j])
+ FAIL_PUTS_ERROR(" data verification failed\n");
+
+ /*
+ * Now try extending the dataset, closing it, reopening it, and getting the
+ * space.
+ */
+ /* Extend datapace */
+ dims[0] = (hsize_t)7;
+ if (H5Sset_extent_simple(fspace_id[0], 2, dims, mdims) < 0)
+ TEST_ERROR;
+
+ /* Extend dataset asynchronously */
+ if (H5Dset_extent_async(dset_id, dims, es_id) < 0)
+ TEST_ERROR;
+
+ /* Close dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Get dataset dataspace asynchronously */
+ if ((fspace_out[0] = H5Dget_space_async(dset_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Verify the extents match */
+ if ((tri_ret = H5Sextent_equal(fspace_id[0], fspace_out[0])) < 0)
+ TEST_ERROR;
+ if (!tri_ret)
+ FAIL_PUTS_ERROR(" dataspaces are not equal\n");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Close */
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id[0]) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_out[0]) < 0)
+ TEST_ERROR;
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ for (i = 0; i < 6; i++) {
+ H5Sclose(fspace_id[i]);
+ H5Sclose(fspace_out[i]);
+ } /* end for */
+ H5Pclose(dcpl_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_set_extent() */
+
+/*
+ * Test H5Aexists()
+ */
+static int
+test_attribute_exists(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ hbool_t exists1;
+ hbool_t exists2;
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("H5Aexists()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "attr_exists_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Check if the attribute exists asynchronously */
+ if (H5Aexists_async(dset_id, "attr", &exists1, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the create takes place after the existence check
+ */
+ if (H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously */
+ if ((attr_id =
+ H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the existence check takes place after the create.
+ */
+ if (H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if the attribute exists asynchronously */
+ if (H5Aexists_async(dset_id, "attr", &exists2, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Check if H5Aexists returned the correct values */
+ if (exists1)
+ FAIL_PUTS_ERROR(" H5Aexists returned TRUE for an attribute that should not exist");
+ if (!exists2)
+ FAIL_PUTS_ERROR(" H5Aexists returned FALSE for an attribute that should exist");
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_attribute_io() */
+
+/*
+ * Create file, dataset, and attribute, write to attribute
+ */
+static int
+test_attribute_io(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+ int wbuf[6][10];
+ int rbuf[6][10];
+ int i, j;
+
+ TESTING("attribute I/O");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "attr_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously */
+ if ((attr_id =
+ H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Initialize wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ wbuf[i][j] = 10 * i + j;
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, H5T_NATIVE_INT, wbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j])
+ FAIL_PUTS_ERROR(" data verification failed\n");
+
+ /* Close the attribute asynchronously */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open the attribute asynchronously */
+ if ((attr_id = H5Aopen_async(dset_id, "attr", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j])
+ FAIL_PUTS_ERROR(" data verification failed\n");
+
+ /* Close out of order to see if it trips things up */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_attribute_io() */
+
+/*
+ * Create file, dataset, and attribute, write to attribute with type conversion
+ */
+static int
+test_attribute_io_tconv(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+ int wbuf[6][10];
+ int rbuf[6][10];
+ int i, j;
+
+ TESTING("attribute I/O with type conversion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, attribute, or flush aren't supported with this connector\n");
+ return 0;
+ }
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously by name */
+ if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_tconv", H5T_STD_U16BE, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Initialize wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ wbuf[i][j] = 10 * i + j;
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, H5T_NATIVE_INT, wbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j])
+ FAIL_PUTS_ERROR(" data verification failed\n");
+
+ /* Close the attribute asynchronously */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open the attribute asynchronously */
+ if ((attr_id =
+ H5Aopen_by_name_async(file_id, "attr_dset", "attr_tconv", H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ if (wbuf[i][j] != rbuf[i][j])
+ FAIL_PUTS_ERROR(" data verification failed\n");
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_attribute_io_tconv() */
+
+/*
+ * Create file, dataset, and attribute, write to attribute with compound type
+ * conversion
+ */
+typedef struct tattr_cmpd_t {
+ int a;
+ int b;
+} tattr_cmpd_t;
+
+static int
+test_attribute_io_compound(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mtype_id = H5I_INVALID_HID;
+ hid_t ftype_id = H5I_INVALID_HID;
+ hid_t mtypea_id = H5I_INVALID_HID;
+ hid_t mtypeb_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+ tattr_cmpd_t wbuf[6][10];
+ tattr_cmpd_t rbuf[6][10];
+ tattr_cmpd_t fbuf[6][10];
+ int i, j;
+
+ TESTING("attribute I/O with compound type conversion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, attribute, or flush aren't supported with this connector\n");
+ return 0;
+ }
+
+ /* Create datatype */
+ if ((mtype_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtype_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtype_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((mtypea_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtypea_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((mtypeb_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtypeb_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((ftype_id = H5Tcreate(H5T_COMPOUND, 2 + 8)) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(ftype_id, "a_name", 0, H5T_STD_U16BE) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(ftype_id, "b_name", 2, H5T_STD_I64LE) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously by name */
+ if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_cmpd", ftype_id, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Initialize wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ wbuf[i][j].a = 2 * (10 * i + j);
+ wbuf[i][j].b = 2 * (10 * i + j) + 1;
+ } /* end for */
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, mtype_id, wbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ fbuf[i][j].a = wbuf[i][j].a;
+ fbuf[i][j].b = wbuf[i][j].b;
+ } /* end for */
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ if (rbuf[i][j].a != fbuf[i][j].a)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ if (rbuf[i][j].b != fbuf[i][j].b)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ } /* end for */
+
+ /* Clear the read buffer */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ rbuf[i][j].a = -2;
+ rbuf[i][j].b = -2;
+ } /* end for */
+
+ /* Read the attribute asynchronously (element a only) */
+ if (H5Aread_async(attr_id, mtypea_id, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ if (rbuf[i][j].a != fbuf[i][j].a)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ if (rbuf[i][j].b != -2)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ } /* end for */
+
+ /* Clear the read buffer */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ rbuf[i][j].a = -2;
+ rbuf[i][j].b = -2;
+ } /* end for */
+
+ /* Read the attribute asynchronously (element b only) */
+ if (H5Aread_async(attr_id, mtypeb_id, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ if (rbuf[i][j].a != -2)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ if (rbuf[i][j].b != fbuf[i][j].b)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ } /* end for */
+
+ /* Update wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ wbuf[i][j].a += 2 * 6 * 10;
+ wbuf[i][j].b += 2 * 6 * 10;
+ } /* end for */
+
+ /* Write the attribute asynchronously (element a only) */
+ if (H5Awrite_async(attr_id, mtypea_id, wbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ fbuf[i][j].a = wbuf[i][j].a;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Clear the read buffer */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ rbuf[i][j].a = -2;
+ rbuf[i][j].b = -2;
+ } /* end for */
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ if (rbuf[i][j].a != fbuf[i][j].a)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ if (rbuf[i][j].b != fbuf[i][j].b)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ } /* end for */
+
+ /* Update wbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ wbuf[i][j].a += 2 * 6 * 10;
+ wbuf[i][j].b += 2 * 6 * 10;
+ } /* end for */
+
+ /* Write the attribute asynchronously (element b only) */
+ if (H5Awrite_async(attr_id, mtypeb_id, wbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++)
+ fbuf[i][j].b = wbuf[i][j].b;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Clear the read buffer */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ rbuf[i][j].a = -2;
+ rbuf[i][j].b = -2;
+ } /* end for */
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, rbuf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 10; j++) {
+ if (rbuf[i][j].a != fbuf[i][j].a)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ if (rbuf[i][j].b != fbuf[i][j].b)
+ FAIL_PUTS_ERROR(" data verification failed\n");
+ } /* end for */
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtype_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(ftype_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtypea_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtypeb_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(mtype_id);
+ H5Tclose(ftype_id);
+ H5Tclose(mtypea_id);
+ H5Tclose(mtypeb_id);
+ H5Aclose(attr_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_attribute_io_compound() */
+
+/*
+ * Test group interfaces
+ */
+static int
+test_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ H5G_info_t info1;
+ H5G_info_t info2;
+ H5G_info_t info3;
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("group operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, group more, or creation order aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ /* Create GCPL */
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Track creation order */
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "group_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create 3 subgroups asynchronously, the first with no sub-subgroups, the
+ * second with 1, and the third with 2 */
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the file asynchronously. This will effectively work as a barrier,
+ * guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_async */
+ /* Open group1 asynchronously */
+ if ((group_id = H5Gopen_async(parent_group_id, "group1", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Get info */
+ if (H5Gget_info_async(group_id, &info1, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_by_idx_async */
+ if (H5Gget_info_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &info2,
+ H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_by_name_async */
+ if (H5Gget_info_by_name_async(parent_group_id, "group3", &info3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify group infos */
+ if (info1.nlinks != 0)
+ FAIL_PUTS_ERROR(" incorrect number of links");
+ if (info2.nlinks != 1)
+ FAIL_PUTS_ERROR(" incorrect number of links");
+ if (info3.nlinks != 2)
+ FAIL_PUTS_ERROR(" incorrect number of links");
+
+ /* Close */
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(parent_group_id);
+ H5Fclose(file_id);
+ H5Pclose(gcpl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_group() */
+
+/*
+ * Test link interfaces
+ */
+static int
+test_link(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hbool_t existsh1;
+ hbool_t existsh2;
+ hbool_t existsh3;
+ hbool_t existss1;
+ hbool_t existss2;
+ hbool_t existss3;
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("link operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, link, hard link, soft link, flush, or creation order "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ /* Create GCPL */
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Track creation order */
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "link_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create subgroup asynchronously. */
+ if ((group_id = H5Gcreate_async(parent_group_id, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the link to the subgroup is visible to later tasks.
+ */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create hard link asynchronously */
+ if (H5Lcreate_hard_async(parent_group_id, "group", parent_group_id, "hard_link", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the soft link create takes place after the hard
+ * link create. */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create soft link asynchronously */
+ if (H5Lcreate_soft_async("/link_parent/group", parent_group_id, "soft_link", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the writes. */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh1, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss1, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the delete takes place after the reads. */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Delete soft link by index */
+ if (H5Ldelete_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, H5P_DEFAULT, es_id) <
+ 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the delete. */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh2, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss2, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the delete takes place after the reads. */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Delete hard link */
+ if (H5Ldelete_async(parent_group_id, "hard_link", H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the delete. */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Check if existence returns were correct */
+ if (!existsh1)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist");
+ if (!existss1)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist");
+ if (!existsh2)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist");
+ if (existss2)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist");
+ if (existsh3)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist");
+ if (existsh3)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist");
+
+ /* Close */
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(parent_group_id);
+ H5Fclose(file_id);
+ H5Pclose(gcpl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_link() */
+
+/*
+ * Test H5Ocopy() and H5Orefresh()
+ */
+static int
+test_ocopy_orefresh(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {6, 10};
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("H5Ocopy() and H5Orefresh()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, object more, flush, or refresh aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "ocopy_parent", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create dataset asynchronously. */
+ if ((dset_id = H5Dcreate_async(parent_group_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the copy takes place after dataset create. */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Copy dataset */
+ if (H5Ocopy_async(parent_group_id, "dset", parent_group_id, "copied_dset", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the dataset open takes place copy. */
+ if (H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open the copied dataset asynchronously */
+ if ((dset_id = H5Dopen_async(parent_group_id, "copied_dset", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Refresh the copied dataset asynchronously */
+ if (H5Orefresh(dset_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Close */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ H5Gclose(parent_group_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_ocopy_orefresh() */
+
+/*
+ * Test H5Freopen()
+ */
+static int
+test_file_reopen(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t reopened_file_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("H5Freopen()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or file more aren't supported with this connector\n");
+ return 0;
+ }
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(ASYNC_API_TEST_FILE, H5F_ACC_RDWR, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Reopen file asynchronously */
+ if ((reopened_file_id = H5Freopen_async(file_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Close */
+ if (H5Fclose_async(reopened_file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(reopened_file_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_file_reopen() */
+
+/*
+ * Cleanup temporary test files
+ */
+static void
+cleanup_files(void)
+{
+ char file_name[64];
+ int i;
+
+ H5Fdelete(ASYNC_API_TEST_FILE, H5P_DEFAULT);
+ for (i = 0; i <= max_printf_file; i++) {
+ HDsnprintf(file_name, 64, ASYNC_API_TEST_FILE_PRINTF, i);
+ H5Fdelete(file_name, H5P_DEFAULT);
+ } /* end for */
+}
+
+int
+H5_api_async_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Async Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC)) {
+ SKIPPED();
+ HDprintf(" Async APIs aren't supported with this connector\n");
+ return 0;
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(async_tests); i++) {
+ nerrors += (*async_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ HDprintf("Cleaning up testing files\n");
+ cleanup_files();
+
+ return nerrors;
+}
+
+#else /* H5ESpublic_H */
+
+int
+H5_api_async_test(void)
+{
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Async Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ HDprintf("SKIPPED due to no async support in HDF5 library\n");
+
+ return 0;
+}
+
+#endif /* H5ESpublic_H */
diff --git a/test/API/H5_api_async_test.h b/test/API/H5_api_async_test.h
new file mode 100644
index 0000000..f6df48a
--- /dev/null
+++ b/test/API/H5_api_async_test.h
@@ -0,0 +1,29 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_ASYNC_TEST_H
+#define H5_API_ASYNC_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_async_test(void);
+
+/************************************************
+ * *
+ * API async test defines *
+ * *
+ ************************************************/
+
+#define ASYNC_API_TEST_FILE "H5_api_async_test.h5"
+#define ASYNC_API_TEST_FILE_PRINTF "H5_api_async_test_%d.h5"
+
+#endif
diff --git a/test/API/H5_api_attribute_test.c b/test/API/H5_api_attribute_test.c
new file mode 100644
index 0000000..7f767a7
--- /dev/null
+++ b/test/API/H5_api_attribute_test.c
@@ -0,0 +1,11027 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_attribute_test.h"
+
+/*
+ * TODO: Additional tests to be written:
+ *
+ * - Test for creating a large attribute.
+ * - Test for checking that object's max. attr. creation
+ * order value gets reset when all attributes are removed.
+ */
+
+static int test_create_attribute_on_root(void);
+static int test_create_attribute_on_dataset(void);
+static int test_create_attribute_on_datatype(void);
+static int test_create_attribute_with_null_space(void);
+static int test_create_attribute_with_scalar_space(void);
+static int test_create_attribute_with_space_in_name(void);
+static int test_create_attribute_invalid_params(void);
+static int test_open_attribute(void);
+static int test_open_attribute_invalid_params(void);
+static int test_write_attribute(void);
+static int test_write_attribute_invalid_params(void);
+static int test_read_attribute(void);
+static int test_read_attribute_invalid_params(void);
+static int test_read_empty_attribute(void);
+static int test_close_attribute_invalid_id(void);
+static int test_get_attribute_space_and_type(void);
+static int test_get_attribute_space_and_type_invalid_params(void);
+static int test_attribute_property_lists(void);
+static int test_get_attribute_name(void);
+static int test_get_attribute_name_invalid_params(void);
+static int test_get_attribute_storage_size(void);
+static int test_get_attribute_info(void);
+static int test_get_attribute_info_invalid_params(void);
+static int test_rename_attribute(void);
+static int test_rename_attribute_invalid_params(void);
+static int test_attribute_iterate_group(void);
+static int test_attribute_iterate_dataset(void);
+static int test_attribute_iterate_datatype(void);
+static int test_attribute_iterate_index_saving(void);
+static int test_attribute_iterate_invalid_params(void);
+static int test_attribute_iterate_0_attributes(void);
+static int test_delete_attribute(void);
+static int test_delete_attribute_invalid_params(void);
+static int test_attribute_exists(void);
+static int test_attribute_exists_invalid_params(void);
+static int test_attribute_many(void);
+static int test_attribute_duplicate_id(void);
+static int test_get_number_attributes(void);
+static int test_attr_shared_dtype(void);
+
+static herr_t attr_iter_callback1(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo,
+ void *op_data);
+static herr_t attr_iter_callback2(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo,
+ void *op_data);
+
+/*
+ * The array of attribute tests to be performed.
+ */
+static int (*attribute_tests[])(void) = {test_create_attribute_on_root,
+ test_create_attribute_on_dataset,
+ test_create_attribute_on_datatype,
+ test_create_attribute_with_null_space,
+ test_create_attribute_with_scalar_space,
+ test_create_attribute_with_space_in_name,
+ test_create_attribute_invalid_params,
+ test_open_attribute,
+ test_open_attribute_invalid_params,
+ test_write_attribute,
+ test_write_attribute_invalid_params,
+ test_read_attribute,
+ test_read_attribute_invalid_params,
+ test_read_empty_attribute,
+ test_close_attribute_invalid_id,
+ test_get_attribute_space_and_type,
+ test_get_attribute_space_and_type_invalid_params,
+ test_attribute_property_lists,
+ test_get_attribute_name,
+ test_get_attribute_name_invalid_params,
+ test_get_attribute_storage_size,
+ test_get_attribute_info,
+ test_get_attribute_info_invalid_params,
+ test_rename_attribute,
+ test_rename_attribute_invalid_params,
+ test_attribute_iterate_group,
+ test_attribute_iterate_dataset,
+ test_attribute_iterate_datatype,
+ test_attribute_iterate_index_saving,
+ test_attribute_iterate_invalid_params,
+ test_attribute_iterate_0_attributes,
+ test_delete_attribute,
+ test_delete_attribute_invalid_params,
+ test_attribute_exists,
+ test_attribute_exists_invalid_params,
+ test_attribute_duplicate_id,
+ test_attribute_many,
+ test_get_number_attributes,
+ test_attr_shared_dtype};
+
+/*
+ * A test to check that an attribute can be created on
+ * the root group.
+ */
+static int
+test_create_attribute_on_root(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID;
+ hid_t attr_dtype1 = H5I_INVALID_HID, attr_dtype2 = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute creation on the root group");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_CREATE_ON_ROOT_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype1 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+ if ((attr_dtype2 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Acreate2)
+ {
+ TESTING_2("H5Acreate on the root group");
+
+ if ((attr_id = H5Acreate2(file_id, ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME, attr_dtype1, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' using H5Acreate\n",
+ ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME);
+ PART_ERROR(H5Acreate2);
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(file_id, ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME);
+ PART_ERROR(H5Acreate2);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME);
+ PART_ERROR(H5Acreate2);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate2);
+
+ PART_BEGIN(H5Acreate_by_name)
+ {
+ TESTING_2("H5Acreate_by_name on the root group");
+
+ if ((attr_id2 = H5Acreate_by_name(file_id, "/", ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2, attr_dtype2,
+ space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute on root group using H5Acreate_by_name\n");
+ PART_ERROR(H5Acreate_by_name);
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(file_id, ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2);
+ PART_ERROR(H5Acreate_by_name);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2);
+ PART_ERROR(H5Acreate_by_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype2) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id2) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype1);
+ H5Tclose(attr_dtype2);
+ H5Aclose(attr_id);
+ H5Aclose(attr_id2);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can be created on
+ * a dataset.
+ */
+static int
+test_create_attribute_on_dataset(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID;
+ hid_t attr_dtype1 = H5I_INVALID_HID, attr_dtype2 = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dset_space_id = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute creation on a dataset");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or attribute aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_ON_DATASET_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_CREATE_ON_DATASET_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_space_id =
+ generate_random_dataspace(ATTRIBUTE_CREATE_ON_DATASET_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+ if ((attr_space_id =
+ generate_random_dataspace(ATTRIBUTE_CREATE_ON_DATASET_ATTR_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((attr_dtype1 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+ if ((attr_dtype2 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, ATTRIBUTE_CREATE_ON_DATASET_DSET_NAME, dset_dtype, dset_space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Acreate_on_dataset)
+ {
+ TESTING_2("H5Acreate on a dataset");
+
+ if ((attr_id = H5Acreate2(dset_id, ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME, attr_dtype1,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ PART_ERROR(H5Acreate_on_dataset);
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(dset_id, ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME);
+ PART_ERROR(H5Acreate_on_dataset);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME);
+ PART_ERROR(H5Acreate_on_dataset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_on_dataset);
+
+ PART_BEGIN(H5Acreate_by_name_on_dataset)
+ {
+ TESTING_2("H5Acreate_by_name on a dataset");
+
+ if ((attr_id2 = H5Acreate_by_name(group_id, ATTRIBUTE_CREATE_ON_DATASET_DSET_NAME,
+ ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2, attr_dtype2,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute on dataset by name\n");
+ PART_ERROR(H5Acreate_by_name_on_dataset);
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(dset_id, ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2);
+ PART_ERROR(H5Acreate_by_name_on_dataset);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2);
+ PART_ERROR(H5Acreate_by_name_on_dataset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_on_dataset);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dset_space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype2) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dset_space_id);
+ H5Sclose(attr_space_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(attr_dtype1);
+ H5Tclose(attr_dtype2);
+ H5Dclose(dset_id);
+ H5Aclose(attr_id);
+ H5Aclose(attr_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can be created on
+ * a committed datatype.
+ */
+static int
+test_create_attribute_on_datatype(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID;
+ hid_t attr_dtype1 = H5I_INVALID_HID, attr_dtype2 = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute creation on a committed datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, stored datatype, or attribute aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_ON_DATATYPE_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_CREATE_ON_DATATYPE_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, ATTRIBUTE_CREATE_ON_DATATYPE_DTYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype\n");
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_CREATE_ON_DATATYPE_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype1 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+ if ((attr_dtype2 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Acreate_on_datatype)
+ {
+ TESTING_2("H5Acreate on a committed datatype");
+
+ if ((attr_id = H5Acreate2(type_id, ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME, attr_dtype1, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute on datatype using H5Acreate\n");
+ PART_ERROR(H5Acreate_on_datatype);
+ }
+
+ if ((attr_exists = H5Aexists(type_id, ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ PART_ERROR(H5Acreate_on_datatype);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ PART_ERROR(H5Acreate_on_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_on_datatype);
+
+ PART_BEGIN(H5Acreate_by_name_on_datatype)
+ {
+ TESTING_2("H5Acreate_by_name on a committed datatype");
+
+ if ((attr_id2 = H5Acreate_by_name(group_id, ATTRIBUTE_CREATE_ON_DATATYPE_DTYPE_NAME,
+ ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME2, attr_dtype2, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute on datatype using H5Acreate_by_name\n");
+ PART_ERROR(H5Acreate_by_name_on_datatype);
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(type_id, ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ PART_ERROR(H5Acreate_by_name_on_datatype);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ PART_ERROR(H5Acreate_by_name_on_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_on_datatype);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype2) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id2) < 0)
+ TEST_ERROR;
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype1);
+ H5Tclose(attr_dtype2);
+ H5Aclose(attr_id);
+ H5Aclose(attr_id2);
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that creating an attribute with a
+ * NULL dataspace is not problematic.
+ */
+static int
+test_create_attribute_with_null_space(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING("attribute creation with a NULL dataspace");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup\n");
+ goto error;
+ }
+
+ if ((space_id = H5Screate(H5S_NULL)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute\n");
+ goto error;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that creating an attribute with a
+ * scalar dataspace is not problematic.
+ */
+static int
+test_create_attribute_with_scalar_space(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING("attribute creation with a SCALAR dataspace");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup\n");
+ goto error;
+ }
+
+ if ((space_id = H5Screate(H5S_SCALAR)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_ATTR_NAME, attr_dtype,
+ space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute\n");
+ goto error;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a space in an attribute's name
+ * is not problematic.
+ */
+static int
+test_create_attribute_with_space_in_name(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_id2 = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING("attribute creation with a space in attribute's name");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id =
+ generate_random_dataspace(ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Aclose(attr_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can't be created when
+ * H5Acreate is passed invalid parameters.
+ */
+static int
+test_create_attribute_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute creation with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group\n");
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_CREATE_INVALID_PARAMS_SPACE_RANK, NULL, NULL, TRUE)) <
+ 0)
+ TEST_ERROR;
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Acreate_invalid_loc_id)
+ {
+ TESTING_2("H5Acreate with invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate2(H5I_INVALID_HID, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype,
+ space_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate with an invalid loc_id!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_invalid_loc_id);
+
+ PART_BEGIN(H5Acreate_invalid_attr_name)
+ {
+ TESTING_2("H5Acreate with invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate2(group_id, NULL, attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate with a NULL name!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate2(group_id, "", attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate with an invalid name of ''!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_invalid_attr_name);
+
+ PART_BEGIN(H5Acreate_invalid_datatype)
+ {
+ TESTING_2("H5Acreate with an invalid datatype");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, H5I_INVALID_HID,
+ space_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate with an invalid datatype!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_invalid_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_invalid_datatype);
+
+ PART_BEGIN(H5Acreate_invalid_dataspace)
+ {
+ TESTING_2("H5Acreate with an invalid dataspace");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype,
+ H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate with an invalid dataspace!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_invalid_dataspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_invalid_dataspace);
+
+ PART_BEGIN(H5Acreate_invalid_acpl)
+ {
+ TESTING_2("H5Acreate with an invalid ACPL");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype,
+ space_id, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate with an invalid ACPL!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_invalid_acpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_invalid_acpl);
+
+ PART_BEGIN(H5Acreate_invalid_aapl)
+ {
+ TESTING_2("H5Acreate with an invalid AAPL");
+#ifndef NO_INVALID_PROPERTY_LIST_TESTS
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype,
+ space_id, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate with an invalid AAPL!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_invalid_aapl);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Acreate_invalid_aapl);
+#endif
+ }
+ PART_END(H5Acreate_invalid_aapl);
+
+ PART_BEGIN(H5Acreate_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Acreate_by_name with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(H5I_INVALID_HID, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME,
+ ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate_by_name with an invalid loc_id!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Acreate_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Acreate_by_name with invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(group_id, NULL, ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME,
+ attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate_by_name with a NULL object name!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(group_id, "", ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME,
+ attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " created attribute using H5Acreate_by_name with an invalid object name of ''!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Acreate_by_name_invalid_attr_name)
+ {
+ TESTING_2("H5Acreate_by_name with invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, NULL,
+ attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate_by_name with a NULL attribute name!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME, "",
+ attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " created attribute using H5Acreate_by_name with an invalid attribute name of ''!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_invalid_attr_name);
+
+ PART_BEGIN(H5Acreate_by_name_invalid_datatype)
+ {
+ TESTING_2("H5Acreate_by_name with invalid datatype");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME,
+ ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, H5I_INVALID_HID,
+ space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate_by_name with an invalid datatype!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_invalid_datatype);
+
+ PART_BEGIN(H5Acreate_by_name_invalid_dataspace)
+ {
+ TESTING_2("H5Acreate_by_name with invalid dataspace");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME,
+ ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype,
+ H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate_by_name with an invalid dataspace!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_dataspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_invalid_dataspace);
+
+ PART_BEGIN(H5Acreate_by_name_invalid_acpl)
+ {
+ TESTING_2("H5Acreate_by_name with invalid ACPL");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME,
+ ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, space_id,
+ H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate_by_name with an invalid ACPL!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_acpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_invalid_acpl);
+
+ PART_BEGIN(H5Acreate_by_name_invalid_aapl)
+ {
+ TESTING_2("H5Acreate_by_name with invalid AAPL");
+#ifndef NO_INVALID_PROPERTY_LIST_TESTS
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME,
+ ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate_by_name with an invalid AAPL!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_aapl);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Acreate_by_name_invalid_aapl);
+#endif
+ }
+ PART_END(H5Acreate_by_name_invalid_aapl);
+
+ PART_BEGIN(H5Acreate_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Acreate_by_name with invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate_by_name(container_group, ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME,
+ ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created attribute using H5Acreate_by_name with an invalid LAPL!\n");
+ H5Aclose(attr_id);
+ PART_ERROR(H5Acreate_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_by_name_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Aopen(_by_idx).
+ */
+static int
+test_open_attribute(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t attr_type = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute opening");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for attribute creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_OPEN_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_OPEN_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_type = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ /* Create several attributes */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_OPEN_TEST_ATTR_NAME, attr_type, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_OPEN_TEST_ATTR_NAME2, attr_type, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_OPEN_TEST_ATTR_NAME3, attr_type, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aopen)
+ {
+ TESTING_2("H5Aopen");
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_OPEN_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' using H5Aopen\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ PART_ERROR(H5Aopen);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ PART_ERROR(H5Aopen);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen);
+
+ PART_BEGIN(H5Aopen_by_name)
+ {
+ TESTING_2("H5Aopen_by_name");
+
+ if ((attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME,
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' using H5Aopen_by_name\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ PART_ERROR(H5Aopen_by_name);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ PART_ERROR(H5Aopen_by_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_name);
+
+ PART_BEGIN(H5Aopen_by_idx_crt_order_increasing)
+ {
+ TESTING_2("H5Aopen_by_idx by creation order in increasing order");
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation "
+ "order in increasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME, 0);
+ PART_ERROR(H5Aopen_by_idx_crt_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ PART_ERROR(H5Aopen_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 1, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation "
+ "order in increasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME2, 1);
+ PART_ERROR(H5Aopen_by_idx_crt_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2);
+ PART_ERROR(H5Aopen_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 2, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation "
+ "order in increasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME3, 2);
+ PART_ERROR(H5Aopen_by_idx_crt_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3);
+ PART_ERROR(H5Aopen_by_idx_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_crt_order_increasing);
+
+ PART_BEGIN(H5Aopen_by_idx_crt_order_decreasing)
+ {
+ TESTING_2("H5Aopen_by_idx by creation order in decreasing order");
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 2, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation "
+ "order in decreasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME, 2);
+ PART_ERROR(H5Aopen_by_idx_crt_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ PART_ERROR(H5Aopen_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 1, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation "
+ "order in decreasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME2, 1);
+ PART_ERROR(H5Aopen_by_idx_crt_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2);
+ PART_ERROR(H5Aopen_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 0, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by creation "
+ "order in decreasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME3, 0);
+ PART_ERROR(H5Aopen_by_idx_crt_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3);
+ PART_ERROR(H5Aopen_by_idx_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_crt_order_decreasing);
+
+ PART_BEGIN(H5Aopen_by_idx_name_order_increasing)
+ {
+ TESTING_2("H5Aopen_by_idx by alphabetical order in increasing order");
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by alphabetical "
+ "order in increasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME, 0);
+ PART_ERROR(H5Aopen_by_idx_name_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ PART_ERROR(H5Aopen_by_idx_name_order_increasing);
+ }
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 1, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by alphabetical "
+ "order in increasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME2, 1);
+ PART_ERROR(H5Aopen_by_idx_name_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2);
+ PART_ERROR(H5Aopen_by_idx_name_order_increasing);
+ }
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 2, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %d using H5Aopen_by_idx by alphabetical "
+ "order in increasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME3, 2);
+ PART_ERROR(H5Aopen_by_idx_name_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3);
+ PART_ERROR(H5Aopen_by_idx_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_name_order_increasing);
+
+ PART_BEGIN(H5Aopen_by_idx_name_order_decreasing)
+ {
+ TESTING_2("H5Aopen_by_idx by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 2, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %lld using H5Aopen_by_idx by "
+ "alphabetical order in decreasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME, 2);
+ PART_ERROR(H5Aopen_by_idx_name_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME);
+ PART_ERROR(H5Aopen_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 1, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %lld using H5Aopen_by_idx by "
+ "alphabetical order in decreasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME2, 1);
+ PART_ERROR(H5Aopen_by_idx_name_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME2);
+ PART_ERROR(H5Aopen_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 0, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s' at index %lld using H5Aopen_by_idx by "
+ "alphabetical order in decreasing order\n",
+ ATTRIBUTE_OPEN_TEST_ATTR_NAME3, 0);
+ PART_ERROR(H5Aopen_by_idx_name_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_OPEN_TEST_ATTR_NAME3);
+ PART_ERROR(H5Aopen_by_idx_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aopen_by_idx_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Aopen_by_idx_name_order_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_type) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_type);
+ H5Aclose(attr_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can't be opened when
+ * H5Aopen(_by_name/_by_idx) is passed invalid parameters.
+ */
+static int
+test_open_attribute_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t attr_type = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute opening with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id =
+ generate_random_dataspace(ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_type = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, attr_type, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aopen_invalid_loc_id)
+ {
+ TESTING_2("H5Aopen with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen(H5I_INVALID_HID, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen with an invalid loc_id!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_invalid_loc_id);
+
+ PART_BEGIN(H5Aopen_invalid_attr_name)
+ {
+ TESTING_2("H5Aopen with an invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen(group_id, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen with a NULL attribute name!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen(group_id, "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen with an invalid attribute name of ''!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_invalid_attr_name);
+
+ PART_BEGIN(H5Aopen_invalid_aapl)
+ {
+ TESTING_2("H5Aopen with an invalid AAPL");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen(group_id, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen with an invalid AAPL!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_invalid_aapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_invalid_aapl);
+
+ PART_BEGIN(H5Aopen_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Aopen_by_name with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id =
+ H5Aopen_by_name(H5I_INVALID_HID, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_name with an invalid loc_id!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Aopen_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Aopen_by_name with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_name(container_group, NULL, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_name with a NULL object name!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_name(container_group, "", ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " opened attribute '%s' using H5Aopen_by_name with an invalid object name of ''!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Aopen_by_name_invalid_attr_name)
+ {
+ TESTING_2("H5Aopen_by_name with an invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ NULL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_name with a NULL attribute name!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_name_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, "",
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " opened attribute '%s' using H5Aopen_by_name with an invalid attribute name of ''!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_name_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_name_invalid_attr_name);
+
+ PART_BEGIN(H5Aopen_by_name_invalid_aapl)
+ {
+ TESTING_2("H5Aopen_by_name with an invalid AAPL");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5I_INVALID_HID,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_name with an invalid AAPL!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_name_invalid_aapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_name_invalid_aapl);
+
+ PART_BEGIN(H5Aopen_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Aopen_by_name with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_name(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT,
+ H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_name with an invalid LAPL!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_name_invalid_lapl);
+
+ PART_BEGIN(H5Aopen_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Aopen_by_idx with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(H5I_INVALID_HID, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_idx with an invalid loc_id!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Aopen_by_idx_invalid_obj_name)
+ {
+ TESTING_2("H5Aopen_by_idx with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(container_group, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_idx with a NULL object name!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(container_group, "", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " opened attribute '%s' using H5Aopen_by_idx with an invalid object name of ''!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_invalid_obj_name);
+
+ PART_BEGIN(H5Aopen_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Aopen_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_UNKNOWN, H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_idx with invalid index type "
+ "H5_INDEX_UNKNOWN!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_N, H5_ITER_INC, 0, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " opened attribute '%s' using H5Aopen_by_idx with invalid index type H5_INDEX_N!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Aopen_by_idx_invalid_iter_order)
+ {
+ TESTING_2("H5Aopen_by_idx with an invalid iteration order");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_idx with invalid iteration order "
+ "H5_ITER_UNKNOWN!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_N, 0, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_idx with invalid iteration order "
+ "H5_ITER_N!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_invalid_iter_order);
+
+ PART_BEGIN(H5Aopen_by_idx_invalid_aapl)
+ {
+ TESTING_2("H5Aopen_by_idx with an invalid AAPL");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, 0, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_idx with an invalid AAPL!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_aapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_invalid_aapl);
+
+ PART_BEGIN(H5Aopen_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Aopen_by_idx with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Aopen_by_idx(container_group, ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened attribute '%s' using H5Aopen_by_idx with an invalid LAPL!\n",
+ ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME);
+ H5Aclose(attr_id);
+ PART_ERROR(H5Aopen_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aopen_by_idx_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_type) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_type);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a simple write to an attribute
+ * can be made.
+ */
+static int
+test_write_attribute(void)
+{
+ hsize_t dims[ATTRIBUTE_WRITE_TEST_SPACE_RANK];
+ size_t i, data_size;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING("H5Awrite");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or file flush aren't supported with "
+ "this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_WRITE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_WRITE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_WRITE_TEST_SPACE_RANK, NULL, dims, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_WRITE_TEST_ATTR_NAME, ATTRIBUTE_WRITE_TEST_ATTR_DTYPE,
+ space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_WRITE_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < ATTRIBUTE_WRITE_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= ATTRIBUTE_WRITE_TEST_ATTR_DTYPE_SIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / ATTRIBUTE_WRITE_TEST_ATTR_DTYPE_SIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ if (H5Awrite(attr_id, ATTRIBUTE_WRITE_TEST_ATTR_DTYPE, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to attribute\n");
+ goto error;
+ }
+
+ /* Make sure that the attribute can be flushed to the file */
+ if (H5Fflush(file_id, H5F_SCOPE_GLOBAL) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't flush the attribute\n");
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that writing an attribute fails when
+ * H5Awrite is passed invalid parameters.
+ */
+static int
+test_write_attribute_invalid_params(void)
+{
+ hsize_t dims[ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_SPACE_RANK];
+ size_t i, data_size;
+ htri_t attr_exists;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING_MULTIPART("H5Awrite with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id =
+ generate_random_dataspace(ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_SPACE_RANK, NULL, dims, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_NAME,
+ ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Awrite_invalid_attr_id)
+ {
+ TESTING_2("H5Awrite with an invalid attr_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Awrite(H5I_INVALID_HID, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE, data);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to attribute using an invalid attr_id!\n");
+ PART_ERROR(H5Awrite_invalid_attr_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Awrite_invalid_attr_id);
+
+ PART_BEGIN(H5Awrite_invalid_datatype)
+ {
+ TESTING_2("H5Awrite with an invalid datatype");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Awrite(attr_id, H5I_INVALID_HID, data);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to attribute using an invalid datatype!\n");
+ PART_ERROR(H5Awrite_invalid_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Awrite_invalid_datatype);
+
+ PART_BEGIN(H5Awrite_invalid_data_buf)
+ {
+ TESTING_2("H5Awrite with an invalid data buffer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Awrite(attr_id, ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to attribute using an invalid data buffer!\n");
+ PART_ERROR(H5Awrite_invalid_data_buf);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Awrite_invalid_data_buf);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that simple data can be read back
+ * and verified after it has been written to an
+ * attribute.
+ */
+static int
+test_read_attribute(void)
+{
+ hsize_t dims[ATTRIBUTE_READ_TEST_SPACE_RANK];
+ size_t i, data_size;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ TESTING("H5Aread");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_READ_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_READ_TEST_SPACE_RANK, NULL, dims, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_READ_TEST_ATTR_NAME, ATTRIBUTE_READ_TEST_ATTR_DTYPE,
+ space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_READ_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < ATTRIBUTE_READ_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= ATTRIBUTE_READ_TEST_ATTR_DTYPE_SIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+ if (NULL == (read_buf = HDcalloc(1, data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / ATTRIBUTE_READ_TEST_ATTR_DTYPE_SIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ if (H5Awrite(attr_id, ATTRIBUTE_READ_TEST_ATTR_DTYPE, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to attribute\n");
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_READ_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute\n");
+ goto error;
+ }
+
+ if (H5Aread(attr_id, ATTRIBUTE_READ_TEST_ATTR_DTYPE, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from attribute\n");
+ goto error;
+ }
+
+ for (i = 0; i < data_size / ATTRIBUTE_READ_TEST_ATTR_DTYPE_SIZE; i++) {
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that reading an attribute fails when
+ * H5Aread is passed invalid parameters.
+ */
+static int
+test_read_attribute_invalid_params(void)
+{
+ hsize_t dims[ATTRIBUTE_READ_INVALID_PARAMS_TEST_SPACE_RANK];
+ size_t i, data_size;
+ htri_t attr_exists;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ TESTING_MULTIPART("H5Aread with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_READ_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_READ_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id =
+ generate_random_dataspace(ATTRIBUTE_READ_INVALID_PARAMS_TEST_SPACE_RANK, NULL, dims, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_NAME,
+ ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < ATTRIBUTE_READ_INVALID_PARAMS_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+ if (NULL == (read_buf = HDcalloc(1, data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ if (H5Awrite(attr_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to attribute\n");
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aread_invalid_attr_id)
+ {
+ TESTING_2("H5Aread with an invalid attr_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aread(H5I_INVALID_HID, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE, read_buf);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read attribute with an invalid attr_id!\n");
+ PART_ERROR(H5Aread_invalid_attr_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aread_invalid_attr_id);
+
+ PART_BEGIN(H5Aread_invalid_datatype)
+ {
+ TESTING_2("H5Aread with an invalid datatype");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aread(attr_id, H5I_INVALID_HID, read_buf);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read attribute with an invalid datatype!\n");
+ PART_ERROR(H5Aread_invalid_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aread_invalid_datatype);
+
+ PART_BEGIN(H5Aread_invalid_read_buf)
+ {
+ TESTING_2("H5Aread with an invalid read buffer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aread(attr_id, ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read attribute with an invalid read buffer!\n");
+ PART_ERROR(H5Aread_invalid_read_buf);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aread_invalid_read_buf);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Test reading an empty attribute is ok
+ */
+static int
+test_read_empty_attribute(void)
+{
+ hsize_t dims[ATTRIBUTE_READ_EMPTY_SPACE_RANK];
+ size_t i, data_size;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+
+ TESTING("reading an empty attribute");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_READ_EMPTY_ATTR_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_READ_EMPTY_ATTR_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_READ_EMPTY_SPACE_RANK, NULL, dims, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_READ_EMPTY_ATTR_NAME, ATTRIBUTE_READ_EMPTY_DTYPE, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_READ_EMPTY_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_READ_EMPTY_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute\n");
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < ATTRIBUTE_READ_EMPTY_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= ATTRIBUTE_READ_EMPTY_DTYPE_SIZE;
+
+ if (NULL == (read_buf = HDcalloc(1, data_size)))
+ TEST_ERROR;
+
+ if (H5Aread(attr_id, ATTRIBUTE_READ_EMPTY_DTYPE, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from attribute\n");
+ goto error;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+/*
+ * A test to check that H5Aclose fails when it is passed
+ * an invalid attribute ID.
+ */
+static int
+test_close_attribute_invalid_id(void)
+{
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+
+ TESTING("H5Aclose with an invalid attribute ID");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aclose(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aclose succeeded with an invalid attribute ID!\n");
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that valid copies of an attribute's
+ * dataspace and datatype can be retrieved with
+ * H5Aget_space and H5Aget_type, respectively.
+ */
+static int
+test_get_attribute_space_and_type(void)
+{
+ hsize_t attr_dims[ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK];
+ size_t i;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t tmp_type_id = H5I_INVALID_HID;
+ hid_t tmp_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("retrieval of an attribute's dataspace and datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_SPACE_TYPE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_GET_SPACE_TYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((attr_space_id =
+ generate_random_dataspace(ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK, NULL, attr_dims, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME, attr_dtype, attr_space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /* Retrieve the attribute's datatype and dataspace and verify them */
+ PART_BEGIN(H5Aget_type)
+ {
+ TESTING_2("H5Aget_type");
+
+ if ((tmp_type_id = H5Aget_type(attr_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve attribute's datatype\n");
+ PART_ERROR(H5Aget_type);
+ }
+
+ {
+ htri_t types_equal = H5Tequal(tmp_type_id, attr_dtype);
+
+ if (types_equal < 0) {
+ H5_FAILED();
+ HDprintf(" datatype was invalid\n");
+ PART_ERROR(H5Aget_type);
+ }
+
+ if (!types_equal) {
+ H5_FAILED();
+ HDprintf(" attribute's datatype did not match\n");
+ PART_ERROR(H5Aget_type);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_type);
+
+ PART_BEGIN(H5Aget_space)
+ {
+ TESTING_2("H5Aget_space");
+
+ if ((tmp_space_id = H5Aget_space(attr_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve attribute's dataspace\n");
+ PART_ERROR(H5Aget_space);
+ }
+
+ {
+ hsize_t space_dims[ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK];
+
+ if (H5Sget_simple_extent_dims(tmp_space_id, space_dims, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve dimensions of dataspace\n");
+ PART_ERROR(H5Aget_space);
+ }
+
+ for (i = 0; i < ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK; i++)
+ if (space_dims[i] != attr_dims[i]) {
+ H5_FAILED();
+ HDprintf(" attribute's dataspace dims didn't match\n");
+ PART_ERROR(H5Aget_space);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_space);
+
+ /* Now close the attribute and verify that this still works after opening an
+ * attribute instead of creating it
+ */
+ if (attr_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ }
+ H5E_END_TRY;
+ attr_id = H5I_INVALID_HID;
+ }
+ if (tmp_type_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(tmp_type_id);
+ }
+ H5E_END_TRY;
+ tmp_type_id = H5I_INVALID_HID;
+ }
+ if (tmp_space_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(tmp_space_id);
+ }
+ H5E_END_TRY;
+ tmp_space_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Aget_type_reopened)
+ {
+ TESTING_2("H5Aget_type after re-opening an attribute");
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME);
+ PART_ERROR(H5Aget_type_reopened);
+ }
+
+ if ((tmp_type_id = H5Aget_type(attr_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve attribute's datatype\n");
+ PART_ERROR(H5Aget_type_reopened);
+ }
+
+ {
+ htri_t types_equal = H5Tequal(tmp_type_id, attr_dtype);
+
+ if (types_equal < 0) {
+ H5_FAILED();
+ HDprintf(" datatype was invalid\n");
+ PART_ERROR(H5Aget_type_reopened);
+ }
+
+ if (!types_equal) {
+ H5_FAILED();
+ HDprintf(" attribute's datatype did not match\n");
+ PART_ERROR(H5Aget_type_reopened);
+ }
+ }
+
+ if (attr_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ }
+ H5E_END_TRY;
+ attr_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_type_reopened);
+
+ PART_BEGIN(H5Aget_space_reopened)
+ {
+ TESTING_2("H5Aget_space after re-opening an attribute");
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME);
+ PART_ERROR(H5Aget_space_reopened);
+ }
+
+ if ((tmp_space_id = H5Aget_space(attr_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve attribute's dataspace\n");
+ PART_ERROR(H5Aget_space_reopened);
+ }
+
+ {
+ hsize_t space_dims[ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK];
+
+ if (H5Sget_simple_extent_dims(tmp_space_id, space_dims, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve dimensions of dataspace\n");
+ PART_ERROR(H5Aget_space_reopened);
+ }
+
+ for (i = 0; i < ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK; i++) {
+ if (space_dims[i] != attr_dims[i]) {
+ H5_FAILED();
+ HDprintf(" dataspace dims didn't match!\n");
+ PART_ERROR(H5Aget_space_reopened);
+ }
+ }
+ }
+
+ if (attr_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ }
+ H5E_END_TRY;
+ attr_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_space_reopened);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(tmp_space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(tmp_type_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(tmp_space_id);
+ H5Sclose(attr_space_id);
+ H5Tclose(tmp_type_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute's dataspace and datatype
+ * can't be retrieved when H5Aget_space and H5Aget_type are passed
+ * invalid parameters, respectively.
+ */
+static int
+test_get_attribute_space_and_type_invalid_params(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t tmp_type_id = H5I_INVALID_HID;
+ hid_t tmp_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Aget_type/H5Aget_space with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((attr_space_id = generate_random_dataspace(ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_SPACE_RANK,
+ NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /* Retrieve the attribute's datatype and dataspace and verify them */
+ PART_BEGIN(H5Aget_type_invalid_attr_id)
+ {
+ TESTING_2("H5Aget_type with an invalid attr_id");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_type_id = H5Aget_type(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (tmp_type_id >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved copy of attribute's datatype using an invalid attr_id!\n");
+ PART_ERROR(H5Aget_type_invalid_attr_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_type_invalid_attr_id);
+
+ PART_BEGIN(H5Aget_space_invalid_attr_id)
+ {
+ TESTING_2("H5Aget_space with an invalid attr_id");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_space_id = H5Aget_space(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (tmp_space_id >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved copy of attribute's dataspace using an invalid attr_id!\n");
+ PART_ERROR(H5Aget_space_invalid_attr_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_space_invalid_attr_id);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(tmp_space_id);
+ H5Sclose(attr_space_id);
+ H5Tclose(tmp_type_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an ACPL used for attribute creation
+ * can be persisted and that a valid copy of that ACPL can
+ * be retrieved later with a call to H5Aget_create_plist.
+ */
+static int
+test_attribute_property_lists(void)
+{
+ H5T_cset_t encoding = H5T_CSET_UTF8;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id1 = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID;
+ hid_t attr_dtype1 = H5I_INVALID_HID, attr_dtype2 = H5I_INVALID_HID;
+ hid_t acpl_id1 = H5I_INVALID_HID, acpl_id2 = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute property list operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_PROPERTY_LIST_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group\n");
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_PROPERTY_LIST_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype1 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+ if ((attr_dtype2 = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((acpl_id1 = H5Pcreate(H5P_ATTRIBUTE_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create ACPL\n");
+ goto error;
+ }
+
+ if (H5Pset_char_encoding(acpl_id1, encoding) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set ACPL property value\n");
+ goto error;
+ }
+
+ if ((attr_id1 = H5Acreate2(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1, attr_dtype1, space_id,
+ acpl_id1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ if ((attr_id2 = H5Acreate2(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2, attr_dtype2, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ if (H5Pclose(acpl_id1) < 0)
+ TEST_ERROR;
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aget_create_plist)
+ {
+ TESTING_2("H5Aget_create_plist");
+
+ /* Try to retrieve copies of the two property lists, one which has the property set and one which
+ * does not */
+ if ((acpl_id1 = H5Aget_create_plist(attr_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Aget_create_plist);
+ }
+
+ if ((acpl_id2 = H5Aget_create_plist(attr_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Aget_create_plist);
+ }
+
+ /* Ensure that property list 1 has the property list set and property list 2 does not */
+ encoding = H5T_CSET_ERROR;
+
+ if (H5Pget_char_encoding(acpl_id1, &encoding) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve ACPL property value\n");
+ PART_ERROR(H5Aget_create_plist);
+ }
+
+ if (H5T_CSET_UTF8 != encoding) {
+ H5_FAILED();
+ HDprintf(" ACPL property value was incorrect\n");
+ PART_ERROR(H5Aget_create_plist);
+ }
+
+ encoding = H5T_CSET_ERROR;
+
+ if (H5Pget_char_encoding(acpl_id2, &encoding) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve ACPL property value\n");
+ PART_ERROR(H5Aget_create_plist);
+ }
+
+ if (H5T_CSET_UTF8 == encoding) {
+ H5_FAILED();
+ HDprintf(" ACPL property value was set!\n");
+ PART_ERROR(H5Aget_create_plist);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_create_plist);
+
+ /* Now close the property lists and attribute and see if we can still retrieve copies of
+ * the property lists upon opening (instead of creating) an attribute
+ */
+ if (acpl_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(acpl_id1);
+ }
+ H5E_END_TRY;
+ acpl_id1 = H5I_INVALID_HID;
+ }
+ if (acpl_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(acpl_id2);
+ }
+ H5E_END_TRY;
+ acpl_id2 = H5I_INVALID_HID;
+ }
+ if (attr_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id1);
+ }
+ H5E_END_TRY;
+ attr_id1 = H5I_INVALID_HID;
+ }
+ if (attr_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id2);
+ }
+ H5E_END_TRY;
+ attr_id2 = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Aget_create_plist_reopened)
+ {
+ TESTING_2("H5Aget_create_plist after re-opening an attribute");
+
+ if ((attr_id1 = H5Aopen(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1);
+ PART_ERROR(H5Aget_create_plist_reopened);
+ }
+
+ if ((attr_id2 = H5Aopen(group_id, ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2);
+ PART_ERROR(H5Aget_create_plist_reopened);
+ }
+
+ if ((acpl_id1 = H5Aget_create_plist(attr_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Aget_create_plist_reopened);
+ }
+
+ if ((acpl_id2 = H5Aget_create_plist(attr_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Aget_create_plist_reopened);
+ }
+
+ /* XXX: Check the value to be tested as above */
+ PASSED();
+ }
+ PART_END(H5Aget_create_plist_reopened);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(acpl_id1) < 0)
+ TEST_ERROR;
+ if (H5Pclose(acpl_id2) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype2) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id1) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(acpl_id1);
+ H5Pclose(acpl_id2);
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype1);
+ H5Tclose(attr_dtype2);
+ H5Aclose(attr_id1);
+ H5Aclose(attr_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute's name can be
+ * correctly retrieved with H5Aget_name and
+ * H5Aget_name_by_idx.
+ */
+static int
+test_get_attribute_name(void)
+{
+ ssize_t name_buf_size;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char *name_buf = NULL;
+
+ TESTING_MULTIPART("retrieval of an attribute's name");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or creation order aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for attribute creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_GET_NAME_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_NAME_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ /* Create several attributes */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2);
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3);
+ goto error;
+ }
+
+ /* Allocate the name buffer */
+ name_buf_size = strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 2;
+ if (NULL == (name_buf = (char *)HDmalloc((size_t)name_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for storing attribute's name\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aget_name)
+ {
+ TESTING_2("H5Aget_name");
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ PART_ERROR(H5Aget_name);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name(attr_id, (size_t)name_buf_size, name_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve attribute name\n");
+ PART_ERROR(H5Aget_name);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ PART_ERROR(H5Aget_name);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ PART_ERROR(H5Aget_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name);
+
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ attr_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Aget_name_by_idx_crt_order_increasing)
+ {
+ TESTING_2("H5Aget_name_by_idx by creation order in increasing order");
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 0, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "creation order in increasing order\n",
+ 0);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_increasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_increasing);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 1, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "creation order in increasing order\n",
+ 1);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_increasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_increasing);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 2, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "creation order in increasing order\n",
+ 2);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_increasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_crt_order_increasing);
+
+ PART_BEGIN(H5Aget_name_by_idx_crt_order_decreasing)
+ {
+ TESTING_2("H5Aget_name_by_idx by creation order in decreasing order");
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 2, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "creation order in decreasing order\n",
+ 2);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 1, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "creation order in decreasing order\n",
+ 1);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 0, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "creation order in decreasing order\n",
+ 0);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3);
+ PART_ERROR(H5Aget_name_by_idx_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_crt_order_decreasing);
+
+ PART_BEGIN(H5Aget_name_by_idx_name_order_increasing)
+ {
+ TESTING_2("H5Aget_name_by_idx by alphabetical order in increasing order");
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "alphabetical order in increasing order\n",
+ 0);
+ PART_ERROR(H5Aget_name_by_idx_name_order_increasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ PART_ERROR(H5Aget_name_by_idx_name_order_increasing);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 1, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "alphabetical order in increasing order\n",
+ 1);
+ PART_ERROR(H5Aget_name_by_idx_name_order_increasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2);
+ PART_ERROR(H5Aget_name_by_idx_name_order_increasing);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 2, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %d using H5Aget_name_by_index by "
+ "alphabetical order in increasing order\n",
+ 2);
+ PART_ERROR(H5Aget_name_by_idx_name_order_increasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3);
+ PART_ERROR(H5Aget_name_by_idx_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_name_order_increasing);
+
+ PART_BEGIN(H5Aget_name_by_idx_name_order_decreasing)
+ {
+ TESTING_2("H5Aget_name_by_idx by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 2, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %lld using H5Aget_name_by_index "
+ "by alphabetical order in decreasing order\n",
+ 2);
+ PART_ERROR(H5Aget_name_by_idx_name_order_decreasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME);
+ PART_ERROR(H5Aget_name_by_idx_name_order_decreasing);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 1, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %lld using H5Aget_name_by_index "
+ "by alphabetical order in decreasing order\n",
+ 1);
+ PART_ERROR(H5Aget_name_by_idx_name_order_decreasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2);
+ PART_ERROR(H5Aget_name_by_idx_name_order_decreasing);
+ }
+
+ *name_buf = '\0';
+ if (H5Aget_name_by_idx(container_group, ATTRIBUTE_GET_NAME_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 0, name_buf, (size_t)name_buf_size, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name of attribute at index %lld using H5Aget_name_by_index "
+ "by alphabetical order in decreasing order\n",
+ 0);
+ PART_ERROR(H5Aget_name_by_idx_name_order_decreasing);
+ }
+
+ if (HDstrncmp(name_buf, ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3,
+ strlen(ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name '%s' didn't match '%s'\n", name_buf,
+ ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3);
+ PART_ERROR(H5Aget_name_by_idx_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aget_name_by_idx_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Aget_name_by_idx_name_order_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (name_buf) {
+ HDfree(name_buf);
+ name_buf = NULL;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (name_buf)
+ HDfree(name_buf);
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute's name can't be
+ * retrieved when H5Aget_name(_by_idx) is passed invalid
+ * parameters.
+ */
+static int
+test_get_attribute_name_invalid_params(void)
+{
+ ssize_t name_buf_size;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ char *name_buf = NULL;
+
+ TESTING_MULTIPART("retrieval of an attribute's name with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL,
+ TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_ATTRIBUTE_NAME, attr_dtype,
+ space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_ATTRIBUTE_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ /*
+ * Allocate an actual buffer for the tests.
+ */
+
+ if ((name_buf_size = H5Aget_name(attr_id, 0, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve name buf size\n");
+ goto error;
+ }
+
+ if (NULL == (name_buf = (char *)HDmalloc((size_t)name_buf_size + 1)))
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aget_name_invalid_attr_id)
+ {
+ TESTING_2("H5Aget_name with an invalid attr_id");
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name(H5I_INVALID_HID, (size_t)name_buf_size + 1, name_buf);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name with an invalid attr_id!\n");
+ PART_ERROR(H5Aget_name_invalid_attr_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_invalid_attr_id);
+
+ PART_BEGIN(H5Aget_name_invalid_name_buf)
+ {
+ TESTING_2("H5Aget_name with an invalid name buffer");
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = 1;
+ name_buf_size = H5Aget_name(attr_id, (size_t)name_buf_size, NULL);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name with an invalid name buffer!\n");
+ PART_ERROR(H5Aget_name_invalid_name_buf);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_invalid_name_buf);
+
+ PART_BEGIN(H5Aget_name_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Aget_name_by_idx with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name_by_idx(
+ H5I_INVALID_HID, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name_by_idx with an invalid loc_id!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Aget_name_by_idx_invalid_obj_name)
+ {
+ TESTING_2("H5Aget_name_by_idx with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name_by_idx(container_group, NULL, H5_INDEX_NAME, H5_ITER_INC, 0,
+ name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name_by_idx with a NULL object name!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name_by_idx(container_group, "", H5_INDEX_NAME, H5_ITER_INC, 0,
+ name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name_by_idx with an invalid object name "
+ "of ''!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_invalid_obj_name);
+
+ PART_BEGIN(H5Aget_name_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Aget_name_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name_by_idx(
+ container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_UNKNOWN,
+ H5_ITER_INC, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name_by_idx with invalid index type "
+ "H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name_by_idx(
+ container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_N,
+ H5_ITER_INC, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name_by_idx with invalid index type "
+ "H5_INDEX_N!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Aget_name_by_idx_invalid_iter_order)
+ {
+ TESTING_2("H5Aget_name_by_idx with an invalid iteration order");
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name_by_idx(
+ container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_UNKNOWN, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name_by_idx with invalid iteration order "
+ "H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name_by_idx(
+ container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_N, 0, name_buf, (size_t)name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name_by_idx with invalid iteration order "
+ "H5_ITER_N!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_invalid_iter_order);
+
+ PART_BEGIN(H5Aget_name_by_idx_invalid_name_buf)
+ {
+ TESTING_2("H5Aget_name_by_idx with an invalid name buffer");
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = 1;
+ name_buf_size = H5Aget_name_by_idx(
+ container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, NULL, (size_t)name_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " retrieved attribute name using H5Aget_name_by_idx with an invalid name buffer!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_name_buf);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_invalid_name_buf);
+
+ PART_BEGIN(H5Aget_name_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Aget_name_by_idx with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ name_buf_size = H5Aget_name_by_idx(
+ container_group, ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, name_buf, (size_t)name_buf_size + 1, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (name_buf_size >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute name using H5Aget_name_by_idx with an invalid LAPL!\n");
+ PART_ERROR(H5Aget_name_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_name_by_idx_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (name_buf) {
+ HDfree(name_buf);
+ name_buf = NULL;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (name_buf)
+ HDfree(name_buf);
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Aget_storage_size.
+ */
+static int
+test_get_attribute_storage_size(void)
+{
+ TESTING("H5Aget_storage_size");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check the functionality of H5Aget_info(_by_idx).
+ */
+static int
+test_get_attribute_info(void)
+{
+ H5A_info_t attr_info;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("retrieval of attribute info");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or creation order aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for attribute creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_INFO_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_GET_INFO_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_INFO_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ /* Create several attributes */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME, attr_dtype, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2, attr_dtype, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3, attr_dtype, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2);
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aget_info)
+ {
+ TESTING_2("H5Aget_info");
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_GET_INFO_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME);
+ PART_ERROR(H5Aget_info);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info(attr_id, &attr_info) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get attribute info\n");
+ PART_ERROR(H5Aget_info);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)0);
+ PART_ERROR(H5Aget_info);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME);
+ PART_ERROR(H5Aget_info);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info);
+
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ attr_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Aget_info_by_name)
+ {
+ TESTING_2("H5Aget_info_by_name");
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get attribute info by name '%s'\n", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME);
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)0);
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get attribute info by name '%s'\n",
+ ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2);
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)1);
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get attribute info by name '%s'\n",
+ ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3);
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)2);
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_name);
+
+ PART_BEGIN(H5Aget_info_by_idx_crt_order_increasing)
+ {
+ TESTING_2("H5Aget_info_by_idx by creation order in increasing order");
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "creation order in increasing order\n",
+ 0);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)0);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "creation order in increasing order\n",
+ 1);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)1);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "creation order in increasing order\n",
+ 2);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)2);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_crt_order_increasing);
+
+ PART_BEGIN(H5Aget_info_by_idx_crt_order_decreasing)
+ {
+ TESTING_2("H5Aget_info_by_idx by creation order in decreasing order");
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "creation order in decreasing order\n",
+ 2);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)0);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "creation order in decreasing order\n",
+ 1);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)1);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &attr_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "creation order in decreasing order\n",
+ 0);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)2);
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_crt_order_decreasing);
+
+ PART_BEGIN(H5Aget_info_by_idx_name_order_increasing)
+ {
+ TESTING_2("H5Aget_info_by_idx by alphabetical order in increasing order");
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "alphabetical order in increasing order\n",
+ 0);
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)0);
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &attr_info, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "alphabetical order in increasing order\n",
+ 1);
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)1);
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &attr_info, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %d using H5Aget_info_by_idx by "
+ "alphabetical order in increasing order\n",
+ 2);
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)2);
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_name_order_increasing);
+
+ PART_BEGIN(H5Aget_info_by_idx_name_order_decreasing)
+ {
+ TESTING_2("H5Aget_info_by_idx by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &attr_info, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %lld using H5Aget_info_by_idx by "
+ "alphabetical order in decreasing order\n",
+ 2);
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)0);
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &attr_info, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %lld using H5Aget_info_by_idx by "
+ "alphabetical order in decreasing order\n",
+ 1);
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)1);
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ HDmemset(&attr_info, 0, sizeof(attr_info));
+ if (H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &attr_info, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get info for attribute at index %lld using H5Aget_info_by_idx by "
+ "alphabetical order in decreasing order\n",
+ 0);
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ if (attr_info.corder_valid && (attr_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)attr_info.corder, (long long)2);
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ /* Ensure that the cset field is at least set to a meaningful value */
+ if (attr_info.cset != H5T_CSET_ASCII && attr_info.cset != H5T_CSET_UTF8 &&
+ attr_info.cset != H5T_CSET_ERROR) {
+ H5_FAILED();
+ HDprintf(" attribute info's 'cset' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Aget_info_by_idx_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aget_info_by_idx_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Aget_info_by_idx_name_order_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Aget_info(_by_name/_by_idx)
+ * doesn't succeed when passed invalid parameters.
+ */
+static int
+test_get_attribute_info_invalid_params(void)
+{
+ H5A_info_t attr_info;
+ htri_t attr_exists;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("retrieval of attribute info with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL,
+ TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype,
+ space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aget_info_invalid_attr_id)
+ {
+ TESTING_2("H5Aget_info with an invalid attr_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info(H5I_INVALID_HID, &attr_info);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info with an invalid attr_id!\n");
+ PART_ERROR(H5Aget_info_invalid_attr_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_invalid_attr_id);
+
+ PART_BEGIN(H5Aget_info_invalid_attr_info_pointer)
+ {
+ TESTING_2("H5Aget_info with an invalid attribute info pointer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info(attr_id, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info with an invalid attr_id!\n");
+ PART_ERROR(H5Aget_info_invalid_attr_info_pointer);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_invalid_attr_info_pointer);
+
+ PART_BEGIN(H5Aget_info_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Aget_info_by_name with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_name(H5I_INVALID_HID, ".",
+ ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME, &attr_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid loc_id!\n");
+ PART_ERROR(H5Aget_info_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Aget_info_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Aget_info_by_name with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Aget_info_by_name(group_id, NULL, ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME,
+ &attr_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_name with a NULL object name!\n");
+ PART_ERROR(H5Aget_info_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_name(group_id, "", ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME,
+ &attr_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid object name "
+ "of ''!\n");
+ PART_ERROR(H5Aget_info_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Aget_info_by_name_invalid_attr_name)
+ {
+ TESTING_2("H5Aget_info_by_name with an invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_name(group_id, ".", NULL, &attr_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " retrieved attribute info using H5Aget_info_by_name with a NULL attribute name!\n");
+ PART_ERROR(H5Aget_info_by_name_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_name(group_id, ".", "", &attr_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid attribute "
+ "name of ''!\n");
+ PART_ERROR(H5Aget_info_by_name_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_name_invalid_attr_name);
+
+ PART_BEGIN(H5Aget_info_by_name_invalid_attr_info_pointer)
+ {
+ TESTING_2("H5Aget_info_by_name with an invalid attribute info pointer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME,
+ NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid attribute "
+ "info pointer!\n");
+ PART_ERROR(H5Aget_info_by_name_invalid_attr_info_pointer);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_name_invalid_attr_info_pointer);
+
+ PART_BEGIN(H5Aget_info_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Aget_info_by_name with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_name(group_id, ".", ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME,
+ &attr_info, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_name with an invalid LAPL!\n");
+ PART_ERROR(H5Aget_info_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_name_invalid_lapl);
+
+ PART_BEGIN(H5Aget_info_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Aget_info_by_idx with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with an invalid loc_id!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Aget_info_by_idx_invalid_obj_name)
+ {
+ TESTING_2("H5Aget_info_by_idx with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_idx(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with a NULL object name!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Aget_info_by_idx(group_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with an invalid object name "
+ "of ''!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_invalid_obj_name);
+
+ PART_BEGIN(H5Aget_info_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Aget_info_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, &attr_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with invalid index type "
+ "H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Aget_info_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, &attr_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with invalid index type "
+ "H5_INDEX_N!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Aget_info_by_idx_invalid_iter_order)
+ {
+ TESTING_2("H5Aget_info_by_idx with an invalid iteration order");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, &attr_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with invalid iteration order "
+ "H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, &attr_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with invalid iteration order "
+ "H5_ITER_N!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_invalid_iter_order);
+
+ PART_BEGIN(H5Aget_info_by_idx_invalid_attr_info_pointer)
+ {
+ TESTING_2("H5Aget_info_by_idx with an invalid attribute info pointer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with an invalid attribute "
+ "info pointer!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_attr_info_pointer);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_invalid_attr_info_pointer);
+
+ PART_BEGIN(H5Aget_info_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Aget_info_by_idx with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &attr_info,
+ H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved attribute info using H5Aget_info_by_idx with an invalid LAPL!\n");
+ PART_ERROR(H5Aget_info_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aget_info_by_idx_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can be renamed
+ * with H5Arename and H5Arename_by_name.
+ */
+static int
+test_rename_attribute(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute renaming");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_RENAME_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_RENAME_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((attr_space_id = generate_random_dataspace(ATTRIBUTE_RENAME_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME, attr_dtype, attr_space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ if ((attr_id2 = H5Acreate2(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME2, attr_dtype, attr_space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Arename)
+ {
+ TESTING_2("H5Arename");
+
+ if (H5Arename(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME, ATTRIBUTE_RENAME_TEST_NEW_NAME) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't rename attribute '%s' to '%s' using H5Arename\n",
+ ATTRIBUTE_RENAME_TEST_ATTR_NAME, ATTRIBUTE_RENAME_TEST_NEW_NAME);
+ PART_ERROR(H5Arename);
+ }
+
+ /* Verify the attribute has been renamed */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ PART_ERROR(H5Arename);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not get renamed!\n");
+ PART_ERROR(H5Arename);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_NEW_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ PART_ERROR(H5Arename);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not get renamed!\n");
+ PART_ERROR(H5Arename);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename);
+
+ PART_BEGIN(H5Arename_by_name)
+ {
+ TESTING_2("H5Arename_by_name");
+
+ if (H5Arename_by_name(container_group, ATTRIBUTE_RENAME_TEST_GROUP_NAME,
+ ATTRIBUTE_RENAME_TEST_ATTR_NAME2, ATTRIBUTE_RENAME_TEST_NEW_NAME2,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't rename attribute '%s' to '%s' using H5Arename_by_name\n",
+ ATTRIBUTE_RENAME_TEST_ATTR_NAME2, ATTRIBUTE_RENAME_TEST_NEW_NAME2);
+ PART_ERROR(H5Arename_by_name);
+ }
+
+ /* Verify the attribute has been renamed */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ PART_ERROR(H5Arename_by_name);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not get renamed!\n");
+ PART_ERROR(H5Arename_by_name);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_TEST_NEW_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ PART_ERROR(H5Arename_by_name);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not get renamed!\n");
+ PART_ERROR(H5Arename_by_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_by_name);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(attr_space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Aclose(attr_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can't be renamed
+ * when H5Arename(_by_name) is passed invalid parameters.
+ */
+static int
+test_rename_attribute_invalid_params(void)
+{
+ htri_t attr_exists;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute renaming with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((attr_space_id = generate_random_dataspace(ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_SPACE_RANK, NULL,
+ NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ if ((attr_id2 = H5Acreate2(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME2, attr_dtype,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Arename_invalid_loc_id)
+ {
+ TESTING_2("H5Arename with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(H5I_INVALID_HID, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME,
+ ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename with an invalid loc_id!\n");
+ PART_ERROR(H5Arename_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_invalid_loc_id);
+
+ PART_BEGIN(H5Arename_invalid_old_attr_name)
+ {
+ TESTING_2("H5Arename with an invalid old attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(group_id, NULL, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename with a NULL old attribute name!\n");
+ PART_ERROR(H5Arename_invalid_old_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(group_id, "", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename with an invalid old attribute name of ''!\n");
+ PART_ERROR(H5Arename_invalid_old_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_invalid_old_attr_name);
+
+ PART_BEGIN(H5Arename_invalid_new_attr_name)
+ {
+ TESTING_2("H5Arename with an invalid new attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename with a NULL new attribute name!\n");
+ PART_ERROR(H5Arename_invalid_new_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(group_id, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, "");
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename with an invalid new attribute name of ''!\n");
+ PART_ERROR(H5Arename_invalid_new_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_invalid_new_attr_name);
+
+ PART_BEGIN(H5Arename_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Arename_by_name with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Arename_by_name(H5I_INVALID_HID, ".", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME,
+ ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename_by_name with an invalid loc_id!\n");
+ PART_ERROR(H5Arename_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Arename_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Arename_by_name with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(group_id, NULL, ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME,
+ ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename_by_name with a NULL object name!\n");
+ PART_ERROR(H5Arename_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(group_id, "", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME,
+ ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " renamed attribute using H5Arename_by_name with an invalid object name of ''!\n");
+ PART_ERROR(H5Arename_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Arename_by_name_invalid_old_attr_name)
+ {
+ TESTING_2("H5Arename_by_name with an invalid old attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(group_id, ".", NULL,
+ ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename_by_name with a NULL old attribute name!\n");
+ PART_ERROR(H5Arename_by_name_invalid_old_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(group_id, ".", "", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename_by_name with an invalid old attribute name "
+ "of ''!\n");
+ PART_ERROR(H5Arename_by_name_invalid_old_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_by_name_invalid_old_attr_name);
+
+ PART_BEGIN(H5Arename_by_name_invalid_new_attr_name)
+ {
+ TESTING_2("H5Arename_by_name with an invalid new attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(group_id, ".", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME,
+ NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename_by_name with a NULL new attribute name!\n");
+ PART_ERROR(H5Arename_by_name_invalid_new_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(group_id, ".", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME, "",
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename_by_name with an invalid new attribute name "
+ "of ''!\n");
+ PART_ERROR(H5Arename_by_name_invalid_new_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_by_name_invalid_new_attr_name);
+
+ PART_BEGIN(H5Arename_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Arename_by_name with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(group_id, ".", ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME,
+ ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" renamed attribute using H5Arename_by_name with an invalid LAPL!\n");
+ PART_ERROR(H5Arename_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Arename_by_name_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(attr_space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Aclose(attr_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of attribute
+ * iteration using H5Aiterate(_by_name) on a group.
+ * Iteration is done in increasing and decreasing
+ * order of both attribute name and attribute
+ * creation order.
+ */
+static int
+test_attribute_iterate_group(void)
+{
+ size_t link_counter;
+ size_t i;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute iteration on a group");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, iterate, or creation order aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for attribute creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+ if ((attr_space_id =
+ generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_ATTR_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ /* Create some attributes with a reverse-ordering naming scheme to test creation order */
+ for (i = 0; i < ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; i++) {
+ char attr_name[ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE];
+
+ HDsnprintf(attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE,
+ ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d", (int)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - i - 1));
+
+ if ((attr_id = H5Acreate2(group_id, attr_name, attr_dtype, attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", attr_name);
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, attr_name)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n", attr_name);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", attr_name);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected attributes with a given step throughout all of the following
+ * iterations. Since the only information we can count on in the attribute
+ * iteration callback is the attribute's name, we need some other way of
+ * ensuring that the attributes are coming back in the correct order.
+ */
+
+ PART_BEGIN(H5Aiterate2_name_increasing)
+ {
+ TESTING_2("H5Aiterate by attribute name in increasing order");
+
+ link_counter = 0;
+
+ /* Test basic attribute iteration capability using both index types and both index orders */
+ if (H5Aiterate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Aiterate2_name_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_name_increasing);
+
+ PART_BEGIN(H5Aiterate2_name_decreasing)
+ {
+ TESTING_2("H5Aiterate by attribute name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Aiterate2_name_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aiterate2_name_decreasing);
+#endif
+ }
+ PART_END(H5Aiterate2_name_decreasing);
+
+ PART_BEGIN(H5Aiterate2_creation_increasing)
+ {
+ TESTING_2("H5Aiterate by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1,
+ &link_counter) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Aiterate2_creation_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_creation_increasing);
+
+ PART_BEGIN(H5Aiterate2_creation_decreasing)
+ {
+ TESTING_2("H5Aiterate by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1,
+ &link_counter) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Aiterate2_creation_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_creation_decreasing);
+
+ PART_BEGIN(H5Aiterate_by_name_name_increasing)
+ {
+ TESTING_2("H5Aiterate_by_name by attribute name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 0;
+
+ if (H5Aiterate_by_name(
+ file_id, "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type name in increasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_name_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_name_increasing);
+
+ PART_BEGIN(H5Aiterate_by_name_name_decreasing)
+ {
+ TESTING_2("H5Aiterate_by_name by attribute name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(
+ file_id, "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type name in decreasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_name_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aiterate_by_name_name_decreasing);
+#endif
+ }
+ PART_END(H5Aiterate_by_name_name_decreasing);
+
+ PART_BEGIN(H5Aiterate_by_name_creation_increasing)
+ {
+ TESTING_2("H5Aiterate_by_name by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_creation_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_creation_increasing);
+
+ PART_BEGIN(H5Aiterate_by_name_creation_decreasing)
+ {
+ TESTING_2("H5Aiterate_by_name by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_creation_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(attr_space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of attribute
+ * iteration using H5Aiterate(_by_name) on a dataset.
+ * Iteration is done in increasing and decreasing
+ * order of both attribute name and attribute
+ * creation order.
+ */
+static int
+test_attribute_iterate_dataset(void)
+{
+ size_t link_counter;
+ size_t i;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t dset_space_id = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute iteration on a dataset");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, attribute, iterate, or creation order "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL for attribute creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_attr_creation_order(dcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute creation order tracking\n");
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_space_id =
+ generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+ if ((attr_space_id =
+ generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_ATTR_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, ATTRIBUTE_ITERATE_TEST_DSET_NAME, dset_dtype, dset_space_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", ATTRIBUTE_ITERATE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /* Create some attributes with a reverse-ordering naming scheme to test creation order */
+ for (i = 0; i < ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; i++) {
+ char attr_name[ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE];
+
+ HDsnprintf(attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE,
+ ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d", (int)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - i - 1));
+
+ if ((attr_id = H5Acreate2(dset_id, attr_name, attr_dtype, attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", attr_name);
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(dset_id, attr_name)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n", attr_name);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", attr_name);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected attributes with a given step throughout all of the following
+ * iterations. Since the only information we can count on in the attribute
+ * iteration callback is the attribute's name, we need some other way of
+ * ensuring that the attributes are coming back in the correct order.
+ */
+
+ PART_BEGIN(H5Aiterate2_name_increasing)
+ {
+ TESTING_2("H5Aiterate by attribute name in increasing order");
+
+ link_counter = 0;
+
+ /* Test basic attribute iteration capability using both index types and both index orders */
+ if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Aiterate2_name_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_name_increasing);
+
+ PART_BEGIN(H5Aiterate2_name_decreasing)
+ {
+ TESTING_2("H5Aiterate by attribute name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Aiterate2_name_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aiterate2_name_decreasing);
+#endif
+ }
+ PART_END(H5Aiterate2_name_decreasing);
+
+ PART_BEGIN(H5Aiterate2_creation_increasing)
+ {
+ TESTING_2("H5Aiterate by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(dset_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1,
+ &link_counter) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Aiterate2_creation_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_creation_increasing);
+
+ PART_BEGIN(H5Aiterate2_creation_decreasing)
+ {
+ TESTING_2("H5Aiterate by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(dset_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1,
+ &link_counter) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Aiterate2_creation_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_creation_decreasing);
+
+ PART_BEGIN(H5Aiterate_by_name_name_increasing)
+ {
+ TESTING_2("H5Aiterate_by_name by attribute name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 0;
+
+ if (H5Aiterate_by_name(file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DSET_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type name in increasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_name_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_name_increasing);
+
+ PART_BEGIN(H5Aiterate_by_name_name_decreasing)
+ {
+ TESTING_2("H5Aiterate_by_name by attribute name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DSET_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type name in decreasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_name_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aiterate_by_name_name_decreasing);
+#endif
+ }
+ PART_END(H5Aiterate_by_name_name_decreasing);
+
+ PART_BEGIN(H5Aiterate_by_name_creation_increasing)
+ {
+ TESTING_2("H5Aiterate_by_name by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DSET_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_creation_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_creation_increasing);
+
+ PART_BEGIN(H5Aiterate_by_name_creation_decreasing)
+ {
+ TESTING_2("H5Aiterate_by_name by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DSET_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_creation_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(dset_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(attr_space_id);
+ H5Sclose(dset_space_id);
+ H5Tclose(attr_dtype);
+ H5Tclose(dset_dtype);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of attribute
+ * iteration using H5Aiterate(_by_name) on a committed
+ * datatype. Iteration is done in increasing and
+ * decreasing order of both attribute name and attribute
+ * creation order.
+ */
+static int
+test_attribute_iterate_datatype(void)
+{
+ size_t link_counter;
+ size_t i;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t tcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute iteration on a committed datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, stored datatype, attribute, iterate, or creation "
+ "order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((tcpl_id = H5Pcreate(H5P_DATATYPE_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create TCPL for attribute creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_attr_creation_order(tcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute creation order tracking\n");
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_space_id =
+ generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_ATTR_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if (H5Tcommit2(group_id, ATTRIBUTE_ITERATE_TEST_DTYPE_NAME, type_id, H5P_DEFAULT, tcpl_id, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", ATTRIBUTE_ITERATE_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ /* Create some attributes with a reverse-ordering naming scheme to test creation order */
+ for (i = 0; i < ATTRIBUTE_ITERATE_TEST_NUM_ATTRS; i++) {
+ char attr_name[ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE];
+
+ HDsnprintf(attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE,
+ ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d", (int)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - i - 1));
+
+ if ((attr_id = H5Acreate2(type_id, attr_name, attr_dtype, attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", attr_name);
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(type_id, attr_name)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n", attr_name);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", attr_name);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected attributes with a given step throughout all of the following
+ * iterations. Since the only information we can count on in the attribute
+ * iteration callback is the attribute's name, we need some other way of
+ * ensuring that the attributes are coming back in the correct order.
+ */
+
+ PART_BEGIN(H5Aiterate2_name_increasing)
+ {
+ TESTING_2("H5Aiterate by attribute name in increasing order");
+
+ link_counter = 0;
+
+ /* Test basic attribute iteration capability using both index types and both index orders */
+ if (H5Aiterate2(type_id, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Aiterate2_name_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_name_increasing);
+
+ PART_BEGIN(H5Aiterate2_name_decreasing)
+ {
+ TESTING_2("H5Aiterate by attribute name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(type_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Aiterate2_name_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aiterate2_name_decreasing);
+#endif
+ }
+ PART_END(H5Aiterate2_name_decreasing);
+
+ PART_BEGIN(H5Aiterate2_creation_increasing)
+ {
+ TESTING_2("H5Aiterate by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(type_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1,
+ &link_counter) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Aiterate2_creation_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_creation_increasing);
+
+ PART_BEGIN(H5Aiterate2_creation_decreasing)
+ {
+ TESTING_2("H5Aiterate by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate2(type_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1,
+ &link_counter) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Aiterate2_creation_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate sentinel value is unchanged; supplied callback function must not "
+ "have been called!\n");
+ PART_ERROR(H5Aiterate2_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate2_creation_decreasing);
+
+ PART_BEGIN(H5Aiterate_by_name_name_increasing)
+ {
+ TESTING_2("H5Aiterate_by_name by attribute name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 0;
+
+ if (H5Aiterate_by_name(
+ file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DTYPE_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type name in increasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_name_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_name_increasing);
+
+ PART_BEGIN(H5Aiterate_by_name_name_decreasing)
+ {
+ TESTING_2("H5Aiterate_by_name by attribute name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(
+ file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME "/" ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DTYPE_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type name in decreasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_name_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aiterate_by_name_name_decreasing);
+#endif
+ }
+ PART_END(H5Aiterate_by_name_name_decreasing);
+
+ PART_BEGIN(H5Aiterate_by_name_creation_increasing)
+ {
+ TESTING_2("H5Aiterate_by_name by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DTYPE_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, attr_iter_callback1, &link_counter,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_creation_increasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 2 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_creation_increasing);
+
+ PART_BEGIN(H5Aiterate_by_name_creation_decreasing)
+ {
+ TESTING_2("H5Aiterate_by_name by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ link_counter = 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS;
+
+ if (H5Aiterate_by_name(file_id,
+ "/" ATTRIBUTE_TEST_GROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME
+ "/" ATTRIBUTE_ITERATE_TEST_DTYPE_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, attr_iter_callback1, &link_counter,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Aiterate_by_name_creation_decreasing);
+ }
+
+ /* Make sure that the attribute iteration callback was actually called */
+ if (link_counter == 3 * ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name sentinel value is unchanged; supplied callback function "
+ "must not have been called!\n");
+ PART_ERROR(H5Aiterate_by_name_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(tcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(tcpl_id);
+ H5Sclose(attr_space_id);
+ H5Tclose(attr_dtype);
+ H5Tclose(type_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of attribute
+ * iteration index saving using H5Aiterate(_by_name).
+ * Iteration is done in increasing and decreasing
+ * order of both attribute name and attribute
+ * creation order.
+ */
+static int
+test_attribute_iterate_index_saving(void)
+{
+ TESTING("attribute iteration index saving capability");
+
+ SKIPPED();
+
+ return 1;
+}
+
+/*
+ * A test to check that an object's attributes can't
+ * be iterated over when H5Aiterate(_by_name) is
+ * passed invalid parameters.
+ */
+static int
+test_attribute_iterate_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID, attr_id3 = H5I_INVALID_HID,
+ attr_id4 = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute iteration with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or iterate aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup\n");
+ goto error;
+ }
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_space_id = generate_random_dataspace(ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_SPACE_RANK,
+ NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ if ((attr_id2 = H5Acreate2(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME2, attr_dtype,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ if ((attr_id3 = H5Acreate2(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME3, attr_dtype,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ if ((attr_id4 = H5Acreate2(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME4, attr_dtype,
+ attr_space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME4)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aiterate_invalid_loc_id)
+ {
+ TESTING_2("H5Aiterate with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Aiterate2(H5I_INVALID_HID, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback2, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate with an invalid loc_id!\n");
+ PART_ERROR(H5Aiterate_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_invalid_loc_id);
+
+ PART_BEGIN(H5Aiterate_invalid_index_type)
+ {
+ TESTING_2("H5Aiterate with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Aiterate2(group_id, H5_INDEX_UNKNOWN, H5_ITER_INC, NULL, attr_iter_callback2, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate with invalid index type "
+ "H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Aiterate_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate2(group_id, H5_INDEX_N, H5_ITER_INC, NULL, attr_iter_callback2, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " iterated over attributes using H5Aiterate with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Aiterate_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_invalid_index_type);
+
+ PART_BEGIN(H5Aiterate_invalid_index_order)
+ {
+ TESTING_2("H5Aiterate with an invalid index ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Aiterate2(group_id, H5_INDEX_NAME, H5_ITER_UNKNOWN, NULL, attr_iter_callback2, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate with invalid index ordering "
+ "H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Aiterate_invalid_index_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate2(group_id, H5_INDEX_NAME, H5_ITER_N, NULL, attr_iter_callback2, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " iterated over attributes using H5Aiterate with invalid index ordering H5_ITER_N!\n");
+ PART_ERROR(H5Aiterate_invalid_index_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_invalid_index_order);
+
+ PART_BEGIN(H5Aiterate_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Aiterate_by_name with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate_by_name(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, NULL,
+ attr_iter_callback2, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate_by_name with an invalid loc_id!\n");
+ PART_ERROR(H5Aiterate_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Aiterate_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Aiterate_by_name with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate_by_name(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ attr_iter_callback2, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate_by_name with a NULL object name!\n");
+ PART_ERROR(H5Aiterate_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate_by_name(group_id, "", H5_INDEX_NAME, H5_ITER_INC, NULL,
+ attr_iter_callback2, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate_by_name with an invalid object name "
+ "of ''!\n");
+ PART_ERROR(H5Aiterate_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Aiterate_by_name_invalid_index_type)
+ {
+ TESTING_2("H5Aiterate_by_name with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, NULL,
+ attr_iter_callback2, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate_by_name with invalid index type "
+ "H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Aiterate_by_name_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_N, H5_ITER_INC, NULL,
+ attr_iter_callback2, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate_by_name with invalid index type "
+ "H5_INDEX_N!\n");
+ PART_ERROR(H5Aiterate_by_name_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_invalid_index_type);
+
+ PART_BEGIN(H5Aiterate_by_name_invalid_index_order)
+ {
+ TESTING_2("H5Aiterate_by_name with an invalid index ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, NULL,
+ attr_iter_callback2, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate_by_name with invalid index ordering "
+ "H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Aiterate_by_name_invalid_index_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_NAME, H5_ITER_N, NULL,
+ attr_iter_callback2, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate_by_name with invalid index ordering "
+ "H5_ITER_N!\n");
+ PART_ERROR(H5Aiterate_by_name_invalid_index_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_invalid_index_order);
+
+ PART_BEGIN(H5Aiterate_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Aiterate_by_name with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aiterate_by_name(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, NULL,
+ attr_iter_callback2, NULL, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" iterated over attributes using H5Aiterate_by_name with an invalid LAPL!\n");
+ PART_ERROR(H5Aiterate_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id2) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id3) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id4) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(attr_space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Aclose(attr_id2);
+ H5Aclose(attr_id3);
+ H5Aclose(attr_id4);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that attribute iteration performed
+ * on an object with no attributes attached to it is
+ * not problematic.
+ */
+static int
+test_attribute_iterate_0_attributes(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dset_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute iteration on object with 0 attributes");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, attribute, or iterate aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_SUBGROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup\n");
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_space_id = generate_random_dataspace(ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_SPACE_RANK, NULL,
+ NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME, dset_dtype,
+ dset_space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aiterate_0_attributes_native)
+ {
+ TESTING_2("H5Aiterate (native order)");
+
+ if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_NATIVE, NULL, attr_iter_callback2, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 on object with 0 attributes failed\n");
+ PART_ERROR(H5Aiterate_0_attributes_native);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_0_attributes_native);
+
+ PART_BEGIN(H5Aiterate_0_attributes_inc)
+ {
+ TESTING_2("H5Aiterate (increasing order)");
+
+ if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_iter_callback2, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 on object with 0 attributes failed\n");
+ PART_ERROR(H5Aiterate_0_attributes_inc);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_0_attributes_inc);
+
+ PART_BEGIN(H5Aiterate_0_attributes_dec)
+ {
+ TESTING_2("H5Aiterate (decreasing order)");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (H5Aiterate2(dset_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, attr_iter_callback2, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate2 on object with 0 attributes failed\n");
+ PART_ERROR(H5Aiterate_0_attributes_dec);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aiterate_0_attributes_dec);
+#endif
+ }
+ PART_END(H5Aiterate_0_attributes_dec);
+
+ PART_BEGIN(H5Aiterate_by_name_0_attributes_native)
+ {
+ TESTING_2("H5Aiterate_by_name (native order)");
+
+ if (H5Aiterate_by_name(group_id, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME, H5_INDEX_NAME,
+ H5_ITER_NATIVE, NULL, attr_iter_callback2, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name on object with 0 attributes failed\n");
+ PART_ERROR(H5Aiterate_by_name_0_attributes_native);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_0_attributes_native);
+
+ PART_BEGIN(H5Aiterate_by_name_0_attributes_inc)
+ {
+ TESTING_2("H5Aiterate_by_name (increasing order)");
+
+ if (H5Aiterate_by_name(group_id, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, NULL, attr_iter_callback2, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name on object with 0 attributes failed\n");
+ PART_ERROR(H5Aiterate_by_name_0_attributes_inc);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aiterate_by_name_0_attributes_inc);
+
+ PART_BEGIN(H5Aiterate_by_name_0_attributes_dec)
+ {
+ TESTING_2("H5Aiterate_by_name (decreasing order)");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (H5Aiterate_by_name(group_id, ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, NULL, attr_iter_callback2, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Aiterate_by_name on object with 0 attributes failed\n");
+ PART_ERROR(H5Aiterate_by_name_0_attributes_dec);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Aiterate_by_name_0_attributes_dec);
+#endif
+ }
+ PART_END(H5Aiterate_by_name_0_attributes_dec);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dset_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dset_space_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can be deleted
+ * using H5Adelete(_by_idx).
+ */
+static int
+test_delete_attribute(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute deletion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or creation order aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for attribute creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_DELETION_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_DELETION_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Adelete)
+ {
+ TESTING_2("H5Adelete");
+
+ /* Test H5Adelete */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete);
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete);
+ }
+
+ /* Delete the attribute */
+ if (H5Adelete(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete);
+ }
+
+ /* Verify the attribute has been deleted */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete);
+
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ attr_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Adelete_by_name)
+ {
+ TESTING_2("H5Adelete_by_name");
+
+ /* Test H5Adelete_by_name */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_name);
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_name);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_name);
+ }
+
+ /* Delete the attribute */
+ if (H5Adelete_by_name(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME,
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_name);
+ }
+
+ /* Verify the attribute has been deleted */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_name);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_name);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_name);
+
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ attr_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Adelete_by_idx_crt_order_increasing)
+ {
+ TESTING_2("H5Adelete_by_idx by creation order in increasing order");
+
+ /* Create several attributes */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ /* Delete an attribute */
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in "
+ "increasing order\n");
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ /* Ensure that the attribute is gone and others remain */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ /* Repeat until all attributes have been deleted */
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in "
+ "increasing order\n");
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in "
+ "increasing order\n");
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_idx_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ attr_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Adelete_by_idx_crt_order_decreasing)
+ {
+ TESTING_2("H5Adelete_by_idx by creation order in decreasing order");
+
+ /* Create several attributes */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ /* Delete an attribute */
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 2, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in "
+ "decreasing order\n");
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ /* Ensure that the attribute is gone and others remain */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ /* Repeat until all attributes have been deleted */
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in "
+ "decreasing order\n");
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by creation order in "
+ "decreasing order\n");
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_idx_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ attr_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Adelete_by_idx_name_order_increasing)
+ {
+ TESTING_2("H5Adelete_by_idx by alphabetical order in increasing order");
+
+ /* Create several attributes */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ /* Delete an attribute */
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in "
+ "increasing order\n");
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ /* Ensure that the attribute is gone and others remain */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ /* Repeat until all attributes have been deleted */
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in "
+ "increasing order\n");
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in "
+ "increasing order\n");
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_idx_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ attr_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Adelete_by_idx_name_order_decreasing)
+ {
+ TESTING_2("H5Adelete_by_idx by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Create several attributes */
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close attribute '%s'\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ /* Verify the attributes have been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' didn't exist before deletion\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ /* Delete an attribute */
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 2, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in "
+ "decreasing order\n");
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ /* Ensure that the attribute is gone and others remain */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ /* Repeat until all attributes have been deleted */
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in "
+ "decreasing order\n");
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' doesn't exist after deletion of a different attribute!\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (H5Adelete_by_idx(container_group, ATTRIBUTE_DELETION_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete attribute using H5Adelete_by_idx by alphabetical order in "
+ "decreasing order\n");
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME2);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_TEST_ATTR_NAME3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ if (attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' exists after deletion!\n", ATTRIBUTE_DELETION_TEST_ATTR_NAME3);
+ PART_ERROR(H5Adelete_by_idx_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Adelete_by_idx_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Adelete_by_idx_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ attr_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can't be deleted
+ * when H5Adelete(_by_name/_by_idx) is passed invalid
+ * parameters.
+ */
+static int
+test_delete_attribute_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute deletion with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL,
+ TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype,
+ space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute didn't exists\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Adelete_invalid_loc_id)
+ {
+ TESTING_2("H5Adelete with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete(H5I_INVALID_HID, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete with an invalid loc_id!\n");
+ PART_ERROR(H5Adelete_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_invalid_loc_id);
+
+ PART_BEGIN(H5Adelete_invalid_attr_name)
+ {
+ TESTING_2("H5Adelete with an invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete(group_id, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete with a NULL attribute name!\n");
+ PART_ERROR(H5Adelete_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete(group_id, "");
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete with an invalid attribute name of ''!\n");
+ PART_ERROR(H5Adelete_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_invalid_attr_name);
+
+ PART_BEGIN(H5Adelete_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Adelete_by_name with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_name(H5I_INVALID_HID, ".",
+ ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_name with an invalid loc_id!\n");
+ PART_ERROR(H5Adelete_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Adelete_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Adelete_by_name with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_name(group_id, NULL, ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_name with a NULL object name!\n");
+ PART_ERROR(H5Adelete_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_name(group_id, "", ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " deleted an attribute using H5Adelete_by_name with an invalid object name of ''!\n");
+ PART_ERROR(H5Adelete_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Adelete_by_name_invalid_attr_name)
+ {
+ TESTING_2("H5Adelete_by_name with an invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_name(group_id, ".", NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_name with a NULL attribute name!\n");
+ PART_ERROR(H5Adelete_by_name_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_name(group_id, ".", "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_name with an invalid attribute name of "
+ "''!\n");
+ PART_ERROR(H5Adelete_by_name_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_name_invalid_attr_name);
+
+ PART_BEGIN(H5Adelete_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Adelete_by_name with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_name(group_id, ".", ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME,
+ H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_name with an invalid LAPL!\n");
+ PART_ERROR(H5Adelete_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_name_invalid_lapl);
+
+ PART_BEGIN(H5Adelete_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Adelete_by_idx with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_idx with an invalid loc_id!\n");
+ PART_ERROR(H5Adelete_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Adelete_by_idx_invalid_obj_name)
+ {
+ TESTING_2("H5Adelete_by_idx with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_idx(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_idx with a NULL object name!\n");
+ PART_ERROR(H5Adelete_by_idx_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_idx(group_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " deleted an attribute using H5Adelete_by_idx with an invalid object name of ''!\n");
+ PART_ERROR(H5Adelete_by_idx_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_idx_invalid_obj_name);
+
+ PART_BEGIN(H5Adelete_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Adelete_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_idx with invalid index type "
+ "H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Adelete_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " deleted an attribute using H5Adelete_by_idx with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Adelete_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Adelete_by_idx_invalid_index_order)
+ {
+ TESTING_2("H5Adelete_by_idx with an invalid index ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_idx with invalid index ordering "
+ "H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Adelete_by_idx_invalid_index_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_idx with invalid index ordering "
+ "H5_ITER_N!\n");
+ PART_ERROR(H5Adelete_by_idx_invalid_index_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_idx_invalid_index_order);
+
+ PART_BEGIN(H5Adelete_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Adelete_by_idx with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Adelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" deleted an attribute using H5Adelete_by_idx with an invalid LAPL!\n");
+ PART_ERROR(H5Adelete_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Adelete_by_idx_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Aexists and H5Aexists_by_name.
+ */
+static int
+test_attribute_exists(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute existence");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_EXISTS_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", ATTRIBUTE_EXISTS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_EXISTS_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_EXISTS_TEST_ATTR_NAME, attr_dtype, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aexists)
+ {
+ TESTING_2("H5Aexists");
+
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_EXISTS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ PART_ERROR(H5Aexists);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist\n", ATTRIBUTE_EXISTS_TEST_ATTR_NAME);
+ PART_ERROR(H5Aexists);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aexists);
+
+ PART_BEGIN(H5Aexists_by_name)
+ {
+ TESTING_2("H5Aexists_by_name");
+
+ if ((attr_exists = H5Aexists_by_name(container_group, ATTRIBUTE_EXISTS_TEST_GROUP_NAME,
+ ATTRIBUTE_EXISTS_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists by name\n");
+ PART_ERROR(H5Aexists_by_name);
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute '%s' did not exist by name\n", ATTRIBUTE_EXISTS_TEST_ATTR_NAME);
+ PART_ERROR(H5Aexists_by_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aexists_by_name);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that H5Aexists(_by_name) will fail when
+ * given invalid parameters.
+ */
+static int
+test_attribute_exists_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("attribute existence with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_SPACE_RANK, NULL, NULL,
+ TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute didn't exists\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Aexists_invalid_loc_id)
+ {
+ TESTING_2("H5Aexists with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists(H5I_INVALID_HID, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists with an invalid loc_id succeeded!\n");
+ PART_ERROR(H5Aexists_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aexists_invalid_loc_id);
+
+ PART_BEGIN(H5Aexists_invalid_attr_name)
+ {
+ TESTING_2("H5Aexists with invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists(group_id, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists with a NULL attribute name succeeded!\n");
+ PART_ERROR(H5Aexists_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists(group_id, "");
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists with an invalid attribute name of '' succeeded!\n");
+ PART_ERROR(H5Aexists_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aexists_invalid_attr_name);
+
+ PART_BEGIN(H5Aexists_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Aexists_by_name with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists_by_name(H5I_INVALID_HID, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME,
+ ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists_by_name with an invalid loc_id succeeded!\n");
+ PART_ERROR(H5Aexists_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aexists_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Aexists_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Aexists_by_name with invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists_by_name(file_id, NULL, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists_by_name with a NULL object name succeeded!\n");
+ PART_ERROR(H5Aexists_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists_by_name(file_id, "", ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists_by_name with an invalid object name of '' succeeded!\n");
+ PART_ERROR(H5Aexists_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aexists_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Aexists_by_name_invalid_attr_name)
+ {
+ TESTING_2("H5Aexists_by_name with invalid attribute name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists_by_name(file_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME, NULL,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists_by_name with a NULL attribute name succeeded!\n");
+ PART_ERROR(H5Aexists_by_name_invalid_attr_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists_by_name(file_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME, "",
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists_by_name with an invalid attribute name of '' succeeded!\n");
+ PART_ERROR(H5Aexists_by_name_invalid_attr_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aexists_by_name_invalid_attr_name);
+
+ PART_BEGIN(H5Aexists_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Aexists_by_name with an invalid link access property list");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aexists_by_name(file_id, ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME,
+ ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Aexists_by_name with an invalid link access property list succeeded!\n");
+ PART_ERROR(H5Aexists_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Aexists_by_name_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to make sure many attributes can be written
+ * to the file
+ */
+static int
+test_attribute_many(void)
+{
+ unsigned u;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ char attrname[ATTRIBUTE_MANY_NAME_BUF_SIZE]; /* Name of attribute */
+
+ TESTING("creating many attributes");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_MANY_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the group '%s'\n", ATTRIBUTE_MANY_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_MANY_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ /* Create many attributes */
+ for (u = 0; u < ATTRIBUTE_MANY_NUMB; u++) {
+ sprintf(attrname, "many-%06u", u);
+
+ if ((attr_id = H5Acreate2(group_id, attrname, attr_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, attrname)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to make sure an attribute can be opened for
+ * a second time
+ */
+static int
+test_attribute_duplicate_id(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID, attr_id2 = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING("duplicated IDs for an attribute");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or attribute aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_DUPLICATE_ID_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the group '%s'\n", ATTRIBUTE_DUPLICATE_ID_GRP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_DUPLICATE_ID_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_DUPLICATE_ID_ATTR_NAME, attr_dtype, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_DUPLICATE_ID_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ /* Open the attribute just created and get a second ID */
+ if ((attr_id2 = H5Aopen(group_id, ATTRIBUTE_DUPLICATE_ID_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" attribute can't be opened for a second time\n");
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id2) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Aclose(attr_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that the number of attributes attached
+ * to an object (group, dataset, datatype) can be retrieved.
+ *
+ * XXX: Cover all of the cases and move to H5O tests.
+ */
+static int
+test_get_number_attributes(void)
+{
+ H5O_info2_t obj_info;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("retrieval of the number of attributes on an object");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or object aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_GET_NUM_ATTRS_TEST_GRP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the group '%s'\n", ATTRIBUTE_GET_NUM_ATTRS_TEST_GRP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_GET_NUM_ATTRS_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_GET_NUM_ATTRS_TEST_ATTR_NAME, attr_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_GET_NUM_ATTRS_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oget_info)
+ {
+ TESTING_2("H5Oget_info");
+
+ /* Now get the number of attributes from the group */
+ if (H5Oget_info3(group_id, &obj_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve group info using H5Oget_info3\n");
+ PART_ERROR(H5Oget_info);
+ }
+
+ if (obj_info.num_attrs != 1) {
+ H5_FAILED();
+ HDprintf(" invalid number of attributes received\n");
+ PART_ERROR(H5Oget_info);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oget_info);
+
+ PART_BEGIN(H5Oget_info_by_name)
+ {
+ TESTING_2("H5Oget_info_by_name");
+
+ if (H5Oget_info_by_name3(container_group, ATTRIBUTE_GET_NUM_ATTRS_TEST_GRP_NAME, &obj_info,
+ H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve group info using H5Oget_info_by_name3\n");
+ PART_ERROR(H5Oget_info_by_name);
+ }
+
+ if (obj_info.num_attrs != 1) {
+ H5_FAILED();
+ HDprintf(" invalid number of attributes received\n");
+ PART_ERROR(H5Oget_info_by_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oget_info_by_name);
+
+ PART_BEGIN(H5Oget_info_by_idx)
+ {
+ TESTING_2("H5Oget_info_by_idx");
+
+ if (H5Oget_info_by_idx3(container_group, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &obj_info,
+ H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve group info using H5Oget_info_by_idx3\n");
+ PART_ERROR(H5Oget_info_by_idx);
+ }
+
+ if (obj_info.num_attrs != 1) {
+ H5_FAILED();
+ HDprintf(" invalid number of attributes received\n");
+ PART_ERROR(H5Oget_info_by_idx);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oget_info_by_idx);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(attr_dtype);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that the reference count of a named datatype used by
+ * attribute and a dataset is correct.
+ *
+ * XXX: May move to H5O tests.
+ */
+static int
+test_attr_shared_dtype(void)
+{
+#ifndef NO_SHARED_DATATYPES
+ H5O_info2_t obj_info;
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+#endif
+
+ TESTING("shared datatype for attributes");
+
+#ifndef NO_SHARED_DATATYPES
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, stored datatype, or object aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_SHARED_DTYPE_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the group '%s'\n", ATTRIBUTE_SHARED_DTYPE_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(ATTRIBUTE_SHARED_DTYPE_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ /* Commit datatype to file */
+ if (H5Tcommit2(group_id, ATTRIBUTE_SHARED_DTYPE_NAME, attr_dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype\n");
+ goto error;
+ }
+
+ if (H5Oget_info_by_name3(group_id, ATTRIBUTE_SHARED_DTYPE_NAME, &obj_info, H5O_INFO_ALL, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve root group info using H5Oget_info_by_name3\n");
+ goto error;
+ }
+
+ if (obj_info.rc != 1) {
+ H5_FAILED();
+ HDprintf(" reference count of the named datatype is wrong: %u\n", obj_info.rc);
+ goto error;
+ }
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_SHARED_DTYPE_ATTR_NAME, attr_dtype, space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute\n");
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_SHARED_DTYPE_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute exists\n");
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if (H5Oget_info_by_name3(group_id, ATTRIBUTE_SHARED_DTYPE_NAME, &obj_info, H5O_INFO_ALL, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve root group info using H5Oget_info_by_name3\n");
+ goto error;
+ }
+
+ if (obj_info.rc != 2) {
+ H5_FAILED();
+ HDprintf(" reference count of the named datatype is wrong: %u\n", obj_info.rc);
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, ATTRIBUTE_SHARED_DTYPE_DSET_NAME, attr_dtype, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ goto error;
+ }
+
+ if (H5Oget_info_by_name3(group_id, ATTRIBUTE_SHARED_DTYPE_NAME, &obj_info, H5O_INFO_ALL, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve root group info using H5Oget_info_by_name3\n");
+ goto error;
+ }
+
+ if (obj_info.rc != 3) {
+ H5_FAILED();
+ HDprintf(" reference count of the named datatype is wrong: %u\n", obj_info.rc);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(attr_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+static herr_t
+attr_iter_callback1(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ char expected_attr_name[ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(location_id);
+ UNUSED(ainfo);
+
+ /*
+ * Four tests are run in the following order per attribute iteration API call:
+ *
+ * - iteration by attribute name in increasing order
+ * - iteration by attribute name in decreasing order
+ * - iteration by attribute creation order in increasing order
+ * - iteration by attribute creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the attribute names
+ * will run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = (counter_val / ATTRIBUTE_ITERATE_TEST_NUM_ATTRS);
+ if (test_iteration == 0 || test_iteration == 3) {
+ HDsnprintf(expected_attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE,
+ ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d",
+ (int)(counter_val % ATTRIBUTE_ITERATE_TEST_NUM_ATTRS));
+ }
+ else {
+ HDsnprintf(
+ expected_attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE,
+ ATTRIBUTE_ITERATE_TEST_ATTR_NAME "%d",
+ (int)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS - (counter_val % ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) - 1));
+ }
+
+ if (HDstrncmp(attr_name, expected_attr_name, ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE)) {
+ HDprintf(" attribute name '%s' didn't match expected name '%s'\n", attr_name, expected_attr_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+ /*
+ * If the attribute's creation order is marked as valid, make sure
+ * that it corresponds to what is expected based on the order that
+ * the attributes were created in.
+ */
+ if (ainfo->corder_valid) {
+ H5O_msg_crt_idx_t expected_crt_order;
+
+ /*
+ * As the attributes are created with a reverse-ordering naming
+ * scheme to test creation order, their creation order values will
+ * be listed in reverse ordering on the first and fourth tests and
+ * in normal ordering on the second and third tests.
+ */
+ if (test_iteration == 0 || test_iteration == 3)
+ expected_crt_order = (H5O_msg_crt_idx_t)(ATTRIBUTE_ITERATE_TEST_NUM_ATTRS -
+ (counter_val % ATTRIBUTE_ITERATE_TEST_NUM_ATTRS) - 1);
+ else
+ expected_crt_order = (H5O_msg_crt_idx_t)(counter_val % ATTRIBUTE_ITERATE_TEST_NUM_ATTRS);
+
+ if (ainfo->corder != expected_crt_order) {
+ H5_FAILED();
+ HDprintf(" attribute's creation order value of %lld didn't match expected value of %lld\n",
+ (long long)ainfo->corder, (long long)expected_crt_order);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+static herr_t
+attr_iter_callback2(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo, void *op_data)
+{
+ UNUSED(location_id);
+ UNUSED(attr_name);
+ UNUSED(ainfo);
+ UNUSED(op_data);
+
+ return 0;
+}
+
+int
+H5_api_attribute_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Attribute Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(attribute_tests); i++) {
+ nerrors += (*attribute_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/test/API/H5_api_attribute_test.h b/test/API/H5_api_attribute_test.h
new file mode 100644
index 0000000..7656263
--- /dev/null
+++ b/test/API/H5_api_attribute_test.h
@@ -0,0 +1,203 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_ATTRIBUTE_TEST_H
+#define H5_API_ATTRIBUTE_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_attribute_test(void);
+
+/**************************************************
+ * *
+ * API Attribute test defines *
+ * *
+ **************************************************/
+
+#define ATTRIBUTE_CREATE_ON_ROOT_SPACE_RANK 1
+#define ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME "attr_on_root"
+#define ATTRIBUTE_CREATE_ON_ROOT_ATTR_NAME2 "attr_on_root2"
+
+#define ATTRIBUTE_CREATE_ON_DATASET_DSET_SPACE_RANK 2
+#define ATTRIBUTE_CREATE_ON_DATASET_ATTR_SPACE_RANK 1
+#define ATTRIBUTE_CREATE_ON_DATASET_GROUP_NAME "attr_on_dataset_test"
+#define ATTRIBUTE_CREATE_ON_DATASET_DSET_NAME "dataset_with_attr"
+#define ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME "attr_on_dataset"
+#define ATTRIBUTE_CREATE_ON_DATASET_ATTR_NAME2 "attr_on_dataset2"
+
+#define ATTRIBUTE_CREATE_ON_DATATYPE_SPACE_RANK 1
+#define ATTRIBUTE_CREATE_ON_DATATYPE_DTYPE_NAME "datatype_with_attr"
+#define ATTRIBUTE_CREATE_ON_DATATYPE_GROUP_NAME "attr_on_datatype_test"
+#define ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME "attr_on_datatype"
+#define ATTRIBUTE_CREATE_ON_DATATYPE_ATTR_NAME2 "attr_on_datatype2"
+
+#define ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME "attr_with_null_space_test"
+#define ATTRIBUTE_CREATE_NULL_DATASPACE_TEST_ATTR_NAME "attr_with_null_space"
+
+#define ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME "attr_with_scalar_space_test"
+#define ATTRIBUTE_CREATE_SCALAR_DATASPACE_TEST_ATTR_NAME "attr_with_scalar_space"
+
+#define ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_SPACE_RANK 1
+#define ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_GROUP_NAME "attr_with_space_in_name_test"
+#define ATTRIBUTE_CREATE_WITH_SPACE_IN_NAME_ATTR_NAME "attr with space in name"
+
+#define ATTRIBUTE_CREATE_INVALID_PARAMS_SPACE_RANK 1
+#define ATTRIBUTE_CREATE_INVALID_PARAMS_GROUP_NAME "attribute_create_invalid_params_test"
+#define ATTRIBUTE_CREATE_INVALID_PARAMS_ATTR_NAME "invalid_params_attr"
+
+#define ATTRIBUTE_OPEN_TEST_SPACE_RANK 1
+#define ATTRIBUTE_OPEN_TEST_GROUP_NAME "attribute_open_test"
+#define ATTRIBUTE_OPEN_TEST_ATTR_NAME "attribute_open_test_attr"
+#define ATTRIBUTE_OPEN_TEST_ATTR_NAME2 ATTRIBUTE_OPEN_TEST_ATTR_NAME "2"
+#define ATTRIBUTE_OPEN_TEST_ATTR_NAME3 ATTRIBUTE_OPEN_TEST_ATTR_NAME "3"
+
+#define ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME "attribute_open_invalid_params_test"
+#define ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_OPEN_INVALID_PARAMS_TEST_ATTR_NAME "attribute_open_invalid_params_attr"
+
+#define ATTRIBUTE_WRITE_TEST_ATTR_DTYPE_SIZE sizeof(int)
+#define ATTRIBUTE_WRITE_TEST_ATTR_DTYPE H5T_NATIVE_INT
+#define ATTRIBUTE_WRITE_TEST_SPACE_RANK 1
+#define ATTRIBUTE_WRITE_TEST_GROUP_NAME "attr_write_test"
+#define ATTRIBUTE_WRITE_TEST_ATTR_NAME "write_test_attr"
+
+#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE sizeof(int)
+#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_DTYPE H5T_NATIVE_INT
+#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_GROUP_NAME "attr_write_invalid_params_test"
+#define ATTRIBUTE_WRITE_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_write_test_attr"
+
+#define ATTRIBUTE_READ_TEST_ATTR_DTYPE_SIZE sizeof(int)
+#define ATTRIBUTE_READ_TEST_ATTR_DTYPE H5T_NATIVE_INT
+#define ATTRIBUTE_READ_TEST_SPACE_RANK 1
+#define ATTRIBUTE_READ_TEST_GROUP_NAME "attr_read_test"
+#define ATTRIBUTE_READ_TEST_ATTR_NAME "read_test_attr"
+
+#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE_SIZE sizeof(int)
+#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_DTYPE H5T_NATIVE_INT
+#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_GROUP_NAME "attr_read_invalid_params_test"
+#define ATTRIBUTE_READ_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_read_test_attr"
+
+#define ATTRIBUTE_READ_EMPTY_SPACE_RANK 1
+#define ATTRIBUTE_READ_EMPTY_ATTR_GROUP_NAME "read_empty_attr_test"
+#define ATTRIBUTE_READ_EMPTY_ATTR_NAME "read_empty_attr"
+#define ATTRIBUTE_READ_EMPTY_DTYPE H5T_NATIVE_INT
+#define ATTRIBUTE_READ_EMPTY_DTYPE_SIZE sizeof(int)
+
+#define ATTRIBUTE_GET_SPACE_TYPE_TEST_SPACE_RANK 1
+#define ATTRIBUTE_GET_SPACE_TYPE_TEST_GROUP_NAME "get_attr_space_type_test"
+#define ATTRIBUTE_GET_SPACE_TYPE_TEST_ATTR_NAME "get_space_type_test_attr"
+
+#define ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME "get_attr_space_type_invalid_params_test"
+#define ATTRIBUTE_GET_SPACE_TYPE_INVALID_PARAMS_TEST_ATTR_NAME "get_space_type_invalid_params_test_attr"
+
+#define ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME1 "property_list_test_attribute1"
+#define ATTRIBUTE_PROPERTY_LIST_TEST_ATTRIBUTE_NAME2 "property_list_test_attribute2"
+#define ATTRIBUTE_PROPERTY_LIST_TEST_SUBGROUP_NAME "attribute_property_list_test_group"
+#define ATTRIBUTE_PROPERTY_LIST_TEST_SPACE_RANK 1
+
+#define ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME "attr_name_retrieval_attr"
+#define ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME2 ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME "2"
+#define ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME3 ATTRIBUTE_GET_NAME_TEST_ATTRIBUTE_NAME "3"
+#define ATTRIBUTE_GET_NAME_TEST_SPACE_RANK 1
+#define ATTRIBUTE_GET_NAME_TEST_GROUP_NAME "retrieve_attr_name_test"
+
+#define ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_ATTRIBUTE_NAME "invalid_params_attr_name_retrieval_attr"
+#define ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_GET_NAME_INVALID_PARAMS_TEST_GROUP_NAME "retrieve_attr_name_invalid_params_test"
+
+#define ATTRIBUTE_GET_INFO_TEST_SPACE_RANK 1
+#define ATTRIBUTE_GET_INFO_TEST_GROUP_NAME "attr_get_info_test"
+#define ATTRIBUTE_GET_INFO_TEST_ATTR_NAME "get_info_test_attr"
+#define ATTRIBUTE_GET_INFO_TEST_ATTR_NAME2 ATTRIBUTE_GET_INFO_TEST_ATTR_NAME "2"
+#define ATTRIBUTE_GET_INFO_TEST_ATTR_NAME3 ATTRIBUTE_GET_INFO_TEST_ATTR_NAME "3"
+
+#define ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_GROUP_NAME "attr_get_info_invalid_params_test"
+#define ATTRIBUTE_GET_INFO_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_get_info_test_attr"
+
+#define ATTRIBUTE_RENAME_TEST_SPACE_RANK 1
+#define ATTRIBUTE_RENAME_TEST_GROUP_NAME "attr_rename_test"
+#define ATTRIBUTE_RENAME_TEST_ATTR_NAME "rename_test_attr"
+#define ATTRIBUTE_RENAME_TEST_ATTR_NAME2 "rename_test_attr2"
+#define ATTRIBUTE_RENAME_TEST_NEW_NAME "renamed_attr"
+#define ATTRIBUTE_RENAME_TEST_NEW_NAME2 "renamed_attr2"
+
+#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_GROUP_NAME "attr_rename_invalid_params_test"
+#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_rename_test_attr"
+#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_ATTR_NAME2 "invalid_params_rename_test_attr2"
+#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME "invalid_params_renamed_attr"
+#define ATTRIBUTE_RENAME_INVALID_PARAMS_TEST_NEW_NAME2 "invalid_params_renamed_attr2"
+
+#define ATTRIBUTE_ITERATE_TEST_ATTR_NAME_BUF_SIZE 256
+#define ATTRIBUTE_ITERATE_TEST_DSET_SPACE_RANK 2
+#define ATTRIBUTE_ITERATE_TEST_ATTR_SPACE_RANK 1
+#define ATTRIBUTE_ITERATE_TEST_GRP_SUBGROUP_NAME "attribute_iterate_group_test"
+#define ATTRIBUTE_ITERATE_TEST_DSET_SUBGROUP_NAME "attribute_iterate_dset_test"
+#define ATTRIBUTE_ITERATE_TEST_DTYPE_SUBGROUP_NAME "attribute_iterate_datatype_test"
+#define ATTRIBUTE_ITERATE_TEST_DSET_NAME "attribute_iterate_dset"
+#define ATTRIBUTE_ITERATE_TEST_DTYPE_NAME "attribute_iterate_dtype"
+#define ATTRIBUTE_ITERATE_TEST_ATTR_NAME "iter_attr"
+#define ATTRIBUTE_ITERATE_TEST_NUM_ATTRS 4
+
+#define ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_SPACE_RANK 2
+#define ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_SUBGROUP_NAME "attribute_iterate_test_0_attributes"
+#define ATTRIBUTE_ITERATE_TEST_0_ATTRIBUTES_DSET_NAME "attribute_iterate_dset"
+
+#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_SPACE_RANK 1
+#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_SUBGROUP_NAME "attribute_iterate_invalid_params_test"
+#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_iter_attr1"
+#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME2 "invalid_params_iter_attr2"
+#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME3 "invalid_params_iter_attr3"
+#define ATTRIBUTE_ITERATE_INVALID_PARAMS_TEST_ATTR_NAME4 "invalid_params_iter_attr4"
+
+#define ATTRIBUTE_DELETION_TEST_SPACE_RANK 1
+#define ATTRIBUTE_DELETION_TEST_GROUP_NAME "attr_deletion_test"
+#define ATTRIBUTE_DELETION_TEST_ATTR_NAME "attr_to_be_deleted"
+#define ATTRIBUTE_DELETION_TEST_ATTR_NAME2 ATTRIBUTE_DELETION_TEST_ATTR_NAME "2"
+#define ATTRIBUTE_DELETION_TEST_ATTR_NAME3 ATTRIBUTE_DELETION_TEST_ATTR_NAME "3"
+
+#define ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_GROUP_NAME "attr_deletion_invalid_params_test"
+#define ATTRIBUTE_DELETION_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_attr_to_be_deleted"
+
+#define ATTRIBUTE_EXISTS_TEST_GROUP_NAME "attr_exists_test"
+#define ATTRIBUTE_EXISTS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_EXISTS_TEST_ATTR_NAME "attr_exists"
+
+#define ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_SPACE_RANK 1
+#define ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_GROUP_NAME "attr_exists_invalid_params_test"
+#define ATTRIBUTE_EXISTS_INVALID_PARAMS_TEST_ATTR_NAME "invalid_params_attr_exists"
+
+#define ATTRIBUTE_MANY_GROUP_NAME "group_for_many_attributes"
+#define ATTRIBUTE_MANY_NAME_BUF_SIZE 32U
+#define ATTRIBUTE_MANY_NUMB 64U
+#define ATTRIBUTE_MANY_SPACE_RANK 1
+
+#define ATTRIBUTE_DUPLICATE_ID_GRP_NAME "attr_duplicate_open_test"
+#define ATTRIBUTE_DUPLICATE_ID_ATTR_NAME "attr_duplicated_id"
+#define ATTRIBUTE_DUPLICATE_ID_SPACE_RANK 1
+
+#define ATTRIBUTE_GET_NUM_ATTRS_TEST_GRP_NAME "get_num_attrs_test"
+#define ATTRIBUTE_GET_NUM_ATTRS_TEST_ATTR_NAME "get_num_attrs_test_attribute"
+#define ATTRIBUTE_GET_NUM_ATTRS_TEST_SPACE_RANK 1
+
+#define ATTRIBUTE_SHARED_DTYPE_NAME "Datatype"
+#define ATTRIBUTE_SHARED_DTYPE_GROUP_NAME "shared_dtype_group"
+#define ATTRIBUTE_SHARED_DTYPE_ATTR_NAME "shared_dtype_attr"
+#define ATTRIBUTE_SHARED_DTYPE_DSET_NAME "shared_dtype_dset"
+#define ATTRIBUTE_SHARED_DTYPE_SPACE_RANK 1
+
+#endif
diff --git a/test/API/H5_api_dataset_test.c b/test/API/H5_api_dataset_test.c
new file mode 100644
index 0000000..35a19f3
--- /dev/null
+++ b/test/API/H5_api_dataset_test.c
@@ -0,0 +1,11683 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_dataset_test.h"
+
+/*
+ * XXX: H5Dread_chunk/H5Dwrite_chunk, H5Dfill/scatter/gather
+ */
+
+static int test_create_dataset_under_root(void);
+static int test_create_dataset_under_existing_group(void);
+static int test_create_dataset_invalid_params(void);
+static int test_create_anonymous_dataset(void);
+static int test_create_anonymous_dataset_invalid_params(void);
+static int test_create_dataset_null_space(void);
+static int test_create_dataset_scalar_space(void);
+static int test_create_zero_dim_dset(void);
+static int test_create_dataset_random_shapes(void);
+static int test_create_dataset_predefined_types(void);
+static int test_create_dataset_string_types(void);
+static int test_create_dataset_compound_types(void);
+static int test_create_dataset_enum_types(void);
+static int test_create_dataset_array_types(void);
+static int test_create_dataset_creation_properties(void);
+static int test_create_many_dataset(void);
+static int test_open_dataset(void);
+static int test_open_dataset_invalid_params(void);
+static int test_close_dataset_invalid_params(void);
+static int test_get_dataset_space_and_type(void);
+static int test_get_dataset_space_and_type_invalid_params(void);
+static int test_get_dataset_space_status(void);
+static int test_get_dataset_space_status_invalid_params(void);
+static int test_dataset_property_lists(void);
+static int test_get_dataset_storage_size(void);
+static int test_get_dataset_storage_size_invalid_params(void);
+static int test_get_dataset_chunk_storage_size(void);
+static int test_get_dataset_chunk_storage_size_invalid_params(void);
+static int test_get_dataset_offset(void);
+static int test_get_dataset_offset_invalid_params(void);
+static int test_read_dataset_small_all(void);
+static int test_read_dataset_small_hyperslab(void);
+static int test_read_dataset_small_point_selection(void);
+static int test_dataset_io_point_selections(void);
+#ifndef NO_LARGE_TESTS
+static int test_read_dataset_large_all(void);
+static int test_read_dataset_large_hyperslab(void);
+static int test_read_dataset_large_point_selection(void);
+#endif
+static int test_read_dataset_invalid_params(void);
+static int test_write_dataset_small_all(void);
+static int test_write_dataset_small_hyperslab(void);
+static int test_write_dataset_small_point_selection(void);
+#ifndef NO_LARGE_TESTS
+static int test_write_dataset_large_all(void);
+static int test_write_dataset_large_hyperslab(void);
+static int test_write_dataset_large_point_selection(void);
+#endif
+static int test_write_dataset_data_verification(void);
+static int test_write_dataset_invalid_params(void);
+static int test_dataset_builtin_type_conversion(void);
+static int test_dataset_compound_partial_io(void);
+static int test_dataset_set_extent_chunked_unlimited(void);
+static int test_dataset_set_extent_chunked_fixed(void);
+static int test_dataset_set_extent_data(void);
+static int test_dataset_set_extent_double_handles(void);
+static int test_dataset_set_extent_invalid_params(void);
+static int test_flush_dataset(void);
+static int test_flush_dataset_invalid_params(void);
+static int test_refresh_dataset(void);
+static int test_refresh_dataset_invalid_params(void);
+
+/*
+ * Chunking tests
+ */
+static int test_create_single_chunk_dataset(void);
+static int test_write_single_chunk_dataset(void);
+static int test_create_multi_chunk_dataset(void);
+static int test_write_multi_chunk_dataset_same_shape_read(void);
+static int test_write_multi_chunk_dataset_diff_shape_read(void);
+static int test_overwrite_multi_chunk_dataset_same_shape_read(void);
+static int test_overwrite_multi_chunk_dataset_diff_shape_read(void);
+static int test_read_partial_chunk_all_selection(void);
+static int test_read_partial_chunk_hyperslab_selection(void);
+static int test_read_partial_chunk_point_selection(void);
+
+static int test_get_vlen_buf_size(void);
+
+/*
+ * The array of dataset tests to be performed.
+ */
+static int (*dataset_tests[])(void) = {
+ test_create_dataset_under_root,
+ test_create_dataset_under_existing_group,
+ test_create_dataset_invalid_params,
+ test_create_anonymous_dataset,
+ test_create_anonymous_dataset_invalid_params,
+ test_create_dataset_null_space,
+ test_create_dataset_scalar_space,
+ test_create_zero_dim_dset,
+ test_create_dataset_random_shapes,
+ test_create_dataset_predefined_types,
+ test_create_dataset_string_types,
+ test_create_dataset_compound_types,
+ test_create_dataset_enum_types,
+ test_create_dataset_array_types,
+ test_create_dataset_creation_properties,
+ test_create_many_dataset,
+ test_open_dataset,
+ test_open_dataset_invalid_params,
+ test_close_dataset_invalid_params,
+ test_get_dataset_space_and_type,
+ test_get_dataset_space_and_type_invalid_params,
+ test_get_dataset_space_status,
+ test_get_dataset_space_status_invalid_params,
+ test_dataset_property_lists,
+ test_get_dataset_storage_size,
+ test_get_dataset_storage_size_invalid_params,
+ test_get_dataset_chunk_storage_size,
+ test_get_dataset_chunk_storage_size_invalid_params,
+ test_get_dataset_offset,
+ test_get_dataset_offset_invalid_params,
+ test_read_dataset_small_all,
+ test_read_dataset_small_hyperslab,
+ test_read_dataset_small_point_selection,
+ test_dataset_io_point_selections,
+#ifndef NO_LARGE_TESTS
+ test_read_dataset_large_all,
+ test_read_dataset_large_hyperslab,
+ test_read_dataset_large_point_selection,
+#endif
+ test_read_dataset_invalid_params,
+ test_write_dataset_small_all,
+ test_write_dataset_small_hyperslab,
+ test_write_dataset_small_point_selection,
+#ifndef NO_LARGE_TESTS
+ test_write_dataset_large_all,
+ test_write_dataset_large_hyperslab,
+ test_write_dataset_large_point_selection,
+#endif
+ test_write_dataset_data_verification,
+ test_write_dataset_invalid_params,
+ test_dataset_builtin_type_conversion,
+ test_dataset_compound_partial_io,
+ test_dataset_set_extent_chunked_unlimited,
+ test_dataset_set_extent_chunked_fixed,
+ test_dataset_set_extent_data,
+ test_dataset_set_extent_double_handles,
+ test_dataset_set_extent_invalid_params,
+ test_flush_dataset,
+ test_flush_dataset_invalid_params,
+ test_refresh_dataset,
+ test_refresh_dataset_invalid_params,
+ test_create_single_chunk_dataset,
+ test_write_single_chunk_dataset,
+ test_create_multi_chunk_dataset,
+ test_write_multi_chunk_dataset_same_shape_read,
+ test_write_multi_chunk_dataset_diff_shape_read,
+ test_overwrite_multi_chunk_dataset_same_shape_read,
+ test_overwrite_multi_chunk_dataset_diff_shape_read,
+ test_read_partial_chunk_all_selection,
+ test_read_partial_chunk_hyperslab_selection,
+ test_read_partial_chunk_point_selection,
+ test_get_vlen_buf_size,
+};
+
+/*
+ * A test to check that a dataset can be
+ * created under the root group.
+ */
+static int
+test_create_dataset_under_root(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("dataset creation under root group");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_CREATE_UNDER_ROOT_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ /* Create the Dataset under the root group of the file */
+ if ((dset_id = H5Dcreate2(file_id, DATASET_CREATE_UNDER_ROOT_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_CREATE_UNDER_ROOT_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can be created
+ * under a group that is not the root group.
+ */
+static int
+test_create_dataset_under_existing_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("dataset creation under an existing group");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_UNDER_EXISTING_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", DATASET_CREATE_UNDER_EXISTING_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_CREATE_UNDER_EXISTING_SPACE_RANK, NULL, NULL, FALSE)) <
+ 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_CREATE_UNDER_EXISTING_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_CREATE_UNDER_EXISTING_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can't be created
+ * when H5Dcreate is passed invalid parameters.
+ */
+static int
+test_create_dataset_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Dcreate with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", DATASET_CREATE_INVALID_PARAMS_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_CREATE_INVALID_PARAMS_SPACE_RANK, NULL, NULL, FALSE)) <
+ 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dcreate_invalid_loc_id)
+ {
+ TESTING_2("H5Dcreate with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate2(H5I_INVALID_HID, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created dataset using H5Dcreate with an invalid loc_id!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_invalid_loc_id);
+
+ PART_BEGIN(H5Dcreate_invalid_dataset_name)
+ {
+ TESTING_2("H5Dcreate with an invalid dataset name");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id =
+ H5Dcreate2(group_id, NULL, dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created dataset using H5Dcreate with a NULL dataset name!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_invalid_dataset_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ dset_id =
+ H5Dcreate2(group_id, "", dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created dataset using H5Dcreate with an invalid dataset name of ''!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_invalid_dataset_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_invalid_dataset_name);
+
+ PART_BEGIN(H5Dcreate_invalid_datatype)
+ {
+ TESTING_2("H5Dcreate with an invalid datatype");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, H5I_INVALID_HID,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created dataset using H5Dcreate with an invalid datatype!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_invalid_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_invalid_datatype);
+
+ PART_BEGIN(H5Dcreate_invalid_dataspace)
+ {
+ TESTING_2("H5Dcreate with an invalid dataspace");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype,
+ H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created dataset using H5Dcreate with an invalid dataspace!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_invalid_dataspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_invalid_dataspace);
+
+ PART_BEGIN(H5Dcreate_invalid_lcpl)
+ {
+ TESTING_2("H5Dcreate with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype, fspace_id,
+ H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created dataset using H5Dcreate with an invalid LCPL!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_invalid_lcpl);
+
+ PART_BEGIN(H5Dcreate_invalid_dcpl)
+ {
+ TESTING_2("H5Dcreate with an invalid DCPL");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created dataset using H5Dcreate with an invalid DCPL!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_invalid_dcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_invalid_dcpl);
+
+ PART_BEGIN(H5Dcreate_invalid_dapl)
+ {
+ TESTING_2("H5Dcreate with an invalid DAPL");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate2(group_id, DATASET_CREATE_INVALID_PARAMS_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created dataset using H5Dcreate with an invalid DAPL!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_invalid_dapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_invalid_dapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an anonymous dataset can be created.
+ */
+static int
+test_create_anonymous_dataset(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("anonymous dataset creation");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_ANONYMOUS_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", DATASET_CREATE_ANONYMOUS_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_CREATE_ANONYMOUS_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate_anon(group_id, dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create anonymous dataset\n");
+ goto error;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an anonymous dataset can't
+ * be created when H5Dcreate_anon is passed invalid
+ * parameters.
+ */
+static int
+test_create_anonymous_dataset_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("anonymous dataset creation with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_SPACE_RANK, NULL, NULL,
+ FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dcreate_anon_invalid_loc_id)
+ {
+ TESTING_2("H5Dcreate_anon with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate_anon(H5I_INVALID_HID, dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created anonymous dataset using an invalid loc_id!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_anon_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_anon_invalid_loc_id);
+
+ PART_BEGIN(H5Dcreate_anon_invalid_datatype)
+ {
+ TESTING_2("H5Dcreate_anon with an invalid dataset datatype");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate_anon(group_id, H5I_INVALID_HID, fspace_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created anonymous dataset using an invalid dataset datatype!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_anon_invalid_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_anon_invalid_datatype);
+
+ PART_BEGIN(H5Dcreate_anon_invalid_dataspace)
+ {
+ TESTING_2("H5Dcreate_anon with an invalid dataset dataspace");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate_anon(group_id, dset_dtype, H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created anonymous dataset using an invalid dataset dataspace!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_anon_invalid_dataspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_anon_invalid_dataspace);
+
+ PART_BEGIN(H5Dcreate_anon_invalid_dcpl)
+ {
+ TESTING_2("H5Dcreate_anon with an invalid DCPL");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate_anon(group_id, dset_dtype, fspace_id, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created anonymous dataset using an invalid DCPL!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_anon_invalid_dcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_anon_invalid_dcpl);
+
+ PART_BEGIN(H5Dcreate_anon_invalid_dapl)
+ {
+ TESTING_2("H5Dcreate_anon with an invalid DAPL");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate_anon(group_id, dset_dtype, fspace_id, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created anonymous dataset using an invalid DAPL!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dcreate_anon_invalid_dapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_anon_invalid_dapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that creating a dataset with a NULL
+ * dataspace is not problematic.
+ */
+static int
+test_create_dataset_null_space(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("dataset creation with a NULL dataspace");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ DATASET_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate(H5S_NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that creating a dataset with a scalar
+ * dataspace is not problematic.
+ */
+static int
+test_create_dataset_scalar_space(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("dataset creation with a SCALAR dataspace");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ DATASET_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate(H5S_SCALAR)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that creating a dataset with a dataspace
+ * which contains a 0-sized dimension is not problematic.
+ */
+static int
+test_create_zero_dim_dset(void)
+{
+ hsize_t dims[ZERO_DIM_DSET_TEST_SPACE_RANK] = {0};
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ int data[1];
+
+ TESTING("creation of 0-sized dataset");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ZERO_DIM_DSET_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", ZERO_DIM_DSET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(1, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, ZERO_DIM_DSET_TEST_DSET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create 0-sized dataset\n");
+ goto error;
+ }
+
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set none selection in dataset's file dataspace\n");
+ goto error;
+ }
+
+ /* Attempt to write 0 elements to dataset */
+ if (H5Dwrite(dset_id, H5T_NATIVE_INT, fspace_id, fspace_id, H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to write 0 elements to 0-sized dataset\n");
+ goto error;
+ }
+
+ /* Attempt to read 0 elements from dataset */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, fspace_id, fspace_id, H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to read 0 elements from 0-sized dataset\n");
+ goto error;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can be created with
+ * a variety of different dataspace shapes.
+ */
+static int
+test_create_dataset_random_shapes(void)
+{
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID, space_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+
+ TESTING("dataset creation with random dimension sizes");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SHAPE_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group\n");
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ for (i = 0; i < DATASET_SHAPE_TEST_NUM_ITERATIONS; i++) {
+ char name[100];
+ int ndims = rand() % DATASET_SHAPE_TEST_MAX_DIMS + 1;
+
+ if ((space_id = generate_random_dataspace(ndims, NULL, NULL, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataspace\n");
+ goto error;
+ }
+
+ HDsprintf(name, "%s%zu", DATASET_SHAPE_TEST_DSET_BASE_NAME, i + 1);
+
+ if ((dset_id = H5Dcreate2(group_id, name, dset_dtype, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ goto error;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ }
+
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can be created using
+ * each of the predefined integer and floating-point
+ * datatypes.
+ */
+static int
+test_create_dataset_predefined_types(void)
+{
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t predefined_type_test_table[] = {H5T_STD_U8LE, H5T_STD_U8BE, H5T_STD_I8LE, H5T_STD_I8BE,
+ H5T_STD_U16LE, H5T_STD_U16BE, H5T_STD_I16LE, H5T_STD_I16BE,
+ H5T_STD_U32LE, H5T_STD_U32BE, H5T_STD_I32LE, H5T_STD_I32BE,
+ H5T_STD_U64LE, H5T_STD_U64BE, H5T_STD_I64LE, H5T_STD_I64BE,
+ H5T_IEEE_F32LE, H5T_IEEE_F32BE, H5T_IEEE_F64LE, H5T_IEEE_F64BE};
+
+ TESTING("dataset creation with predefined datatypes");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_PREDEFINED_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create sub-container group '%s'\n",
+ DATASET_PREDEFINED_TYPE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < ARRAY_LENGTH(predefined_type_test_table); i++) {
+ char name[100];
+
+ if ((fspace_id =
+ generate_random_dataspace(DATASET_PREDEFINED_TYPE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ HDsprintf(name, "%s%zu", DATASET_PREDEFINED_TYPE_TEST_BASE_NAME, i);
+
+ if ((dset_id = H5Dcreate2(group_id, name, predefined_type_test_table[i], fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", name);
+ goto error;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", name);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can be created using
+ * string datatypes.
+ */
+static int
+test_create_dataset_string_types(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id_fixed = H5I_INVALID_HID, dset_id_variable = H5I_INVALID_HID;
+ hid_t type_id_fixed = H5I_INVALID_HID, type_id_variable = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("dataset creation with string types");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_STRING_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_STRING_TYPE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id_fixed = H5Tcreate(H5T_STRING, DATASET_STRING_TYPE_TEST_STRING_LENGTH)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create fixed-length string type\n");
+ goto error;
+ }
+
+ if ((type_id_variable = H5Tcreate(H5T_STRING, H5T_VARIABLE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create variable-length string type\n");
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_STRING_TYPE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dcreate_fixed_string_type)
+ {
+ TESTING_2("creation of fixed-size string dataset");
+
+ if ((dset_id_fixed = H5Dcreate2(group_id, DATASET_STRING_TYPE_TEST_DSET_NAME1, type_id_fixed,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create fixed-length string dataset '%s'\n",
+ DATASET_STRING_TYPE_TEST_DSET_NAME1);
+ PART_ERROR(H5Dcreate_fixed_string_type);
+ }
+
+ if (dset_id_fixed >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id_fixed);
+ }
+ H5E_END_TRY;
+ dset_id_fixed = H5I_INVALID_HID;
+ }
+
+ if ((dset_id_fixed = H5Dopen2(group_id, DATASET_STRING_TYPE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_STRING_TYPE_TEST_DSET_NAME1);
+ PART_ERROR(H5Dcreate_fixed_string_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_fixed_string_type);
+
+ PART_BEGIN(H5Dcreate_variable_string_type)
+ {
+ TESTING_2("creation of variable-length string dataset");
+
+ if ((dset_id_variable =
+ H5Dcreate2(group_id, DATASET_STRING_TYPE_TEST_DSET_NAME2, type_id_variable, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create variable-length string dataset '%s'\n",
+ DATASET_STRING_TYPE_TEST_DSET_NAME2);
+ PART_ERROR(H5Dcreate_variable_string_type);
+ }
+
+ if (dset_id_variable >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id_variable);
+ }
+ H5E_END_TRY;
+ dset_id_variable = H5I_INVALID_HID;
+ }
+
+ if ((dset_id_variable = H5Dopen2(group_id, DATASET_STRING_TYPE_TEST_DSET_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_STRING_TYPE_TEST_DSET_NAME2);
+ PART_ERROR(H5Dcreate_variable_string_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_variable_string_type);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Tclose(type_id_fixed) < 0)
+ TEST_ERROR;
+ if (H5Tclose(type_id_variable) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id_fixed) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id_variable) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id_fixed);
+ H5Tclose(type_id_variable);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id_fixed);
+ H5Dclose(dset_id_variable);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can be created using
+ * a variety of compound datatypes.
+ */
+static int
+test_create_dataset_compound_types(void)
+{
+ size_t i, j;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t compound_type = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t type_pool[DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES];
+ int num_passes;
+
+ TESTING("dataset creation with compound datatypes");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ /*
+ * Make sure to pre-initialize all the compound field IDs
+ * so we don't try to close an uninitialized ID value;
+ * memory checkers will likely complain.
+ */
+ for (j = 0; j < DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES; j++)
+ type_pool[j] = H5I_INVALID_HID;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_COMPOUND_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_COMPOUND_TYPE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_COMPOUND_TYPE_TEST_DSET_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ num_passes = (rand() % DATASET_COMPOUND_TYPE_TEST_MAX_PASSES) + 1;
+
+ for (i = 0; i < (size_t)num_passes; i++) {
+ size_t num_subtypes;
+ size_t compound_size = 0;
+ size_t next_offset = 0;
+ char dset_name[256];
+
+ /*
+ * Also pre-initialize all of the compound field IDs at the
+ * beginning of each loop so that we don't try to close an
+ * invalid ID.
+ */
+ for (j = 0; j < DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES; j++)
+ type_pool[j] = H5I_INVALID_HID;
+
+ num_subtypes = (size_t)(rand() % DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES) + 1;
+
+ if ((compound_type = H5Tcreate(H5T_COMPOUND, 1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create compound datatype\n");
+ goto error;
+ }
+
+ /* Start adding subtypes to the compound type */
+ for (j = 0; j < num_subtypes; j++) {
+ size_t member_size;
+ char member_name[256];
+
+ HDsnprintf(member_name, 256, "member%zu", j);
+
+ if ((type_pool[j] = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create compound datatype member %zu\n", j);
+ goto error;
+ }
+
+ if (!(member_size = H5Tget_size(type_pool[j]))) {
+ H5_FAILED();
+ HDprintf(" couldn't get compound member %zu size\n", j);
+ goto error;
+ }
+
+ compound_size += member_size;
+
+ if (H5Tset_size(compound_type, compound_size) < 0)
+ TEST_ERROR;
+
+ if (H5Tinsert(compound_type, member_name, next_offset, type_pool[j]) < 0)
+ TEST_ERROR;
+
+ next_offset += member_size;
+ }
+
+ if (H5Tpack(compound_type) < 0)
+ TEST_ERROR;
+
+ HDsnprintf(dset_name, sizeof(dset_name), "%s%zu", DATASET_COMPOUND_TYPE_TEST_DSET_NAME, i);
+
+ if ((dset_id = H5Dcreate2(group_id, dset_name, compound_type, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", dset_name);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, dset_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", dset_name);
+ goto error;
+ }
+
+ for (j = 0; j < num_subtypes; j++)
+ if (type_pool[j] >= 0 && H5Tclose(type_pool[j]) < 0)
+ TEST_ERROR;
+ if (H5Tclose(compound_type) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ for (i = 0; i < DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES; i++)
+ H5Tclose(type_pool[i]);
+ H5Tclose(compound_type);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can be created with
+ * enum datatypes.
+ */
+static int
+test_create_dataset_enum_types(void)
+{
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id_native = H5I_INVALID_HID, dset_id_non_native = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t enum_native = H5I_INVALID_HID, enum_non_native = H5I_INVALID_HID;
+ const char *enum_type_test_table[] = {"RED", "GREEN", "BLUE", "BLACK", "WHITE",
+ "PURPLE", "ORANGE", "YELLOW", "BROWN"};
+
+ TESTING("dataset creation with enum types");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_ENUM_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_ENUM_TYPE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((enum_native = H5Tcreate(H5T_ENUM, sizeof(int))) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create native enum type\n");
+ goto error;
+ }
+
+ for (i = 0; i < ARRAY_LENGTH(enum_type_test_table); i++)
+ if (H5Tenum_insert(enum_native, enum_type_test_table[i], &i) < 0)
+ TEST_ERROR;
+
+ if ((enum_non_native = H5Tenum_create(H5T_STD_U32LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create non-native enum type\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_ENUM_TYPE_TEST_NUM_MEMBERS; i++) {
+ char val_name[15];
+
+ HDsprintf(val_name, "%s%zu", DATASET_ENUM_TYPE_TEST_VAL_BASE_NAME, i);
+
+ if (H5Tenum_insert(enum_non_native, val_name, &i) < 0)
+ TEST_ERROR;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_ENUM_TYPE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id_native = H5Dcreate2(group_id, DATASET_ENUM_TYPE_TEST_DSET_NAME1, enum_native, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create native enum dataset '%s'\n", DATASET_ENUM_TYPE_TEST_DSET_NAME1);
+ goto error;
+ }
+
+ if ((dset_id_non_native = H5Dcreate2(group_id, DATASET_ENUM_TYPE_TEST_DSET_NAME2, enum_non_native,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create non-native enum dataset '%s'\n", DATASET_ENUM_TYPE_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id_native) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id_non_native) < 0)
+ TEST_ERROR;
+
+ if ((dset_id_native = H5Dopen2(group_id, DATASET_ENUM_TYPE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_ENUM_TYPE_TEST_DSET_NAME1);
+ goto error;
+ }
+
+ if ((dset_id_non_native = H5Dopen2(group_id, DATASET_ENUM_TYPE_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_ENUM_TYPE_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ if (H5Tclose(enum_native) < 0)
+ TEST_ERROR;
+ if (H5Tclose(enum_non_native) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id_native) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id_non_native) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(enum_native);
+ H5Tclose(enum_non_native);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id_native);
+ H5Dclose(dset_id_non_native);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can be created using
+ * array datatypes.
+ */
+static int
+test_create_dataset_array_types(void)
+{
+ hsize_t array_dims1[DATASET_ARRAY_TYPE_TEST_RANK1];
+ hsize_t array_dims2[DATASET_ARRAY_TYPE_TEST_RANK2];
+ hsize_t array_dims3[DATASET_ARRAY_TYPE_TEST_RANK3];
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID, dset_id3 = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t array_type_id1 = H5I_INVALID_HID, array_type_id2 = H5I_INVALID_HID,
+ array_type_id3 = H5I_INVALID_HID;
+ hid_t array_base_type_id1 = H5I_INVALID_HID, array_base_type_id2 = H5I_INVALID_HID,
+ array_base_type_id3 = H5I_INVALID_HID;
+ hid_t nested_type_id = H5I_INVALID_HID;
+
+ TESTING("dataset creation with array types");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_ARRAY_TYPE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_ARRAY_TYPE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ /* Test creation of array with some different types */
+ for (i = 0; i < DATASET_ARRAY_TYPE_TEST_RANK1; i++)
+ array_dims1[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+
+ if ((array_base_type_id1 = generate_random_datatype(H5T_ARRAY, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((array_type_id1 = H5Tarray_create2(array_base_type_id1, DATASET_ARRAY_TYPE_TEST_RANK1, array_dims1)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first array type\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_ARRAY_TYPE_TEST_RANK2; i++)
+ array_dims2[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+
+ if ((array_base_type_id2 = generate_random_datatype(H5T_ARRAY, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((array_type_id2 = H5Tarray_create2(array_base_type_id2, DATASET_ARRAY_TYPE_TEST_RANK2, array_dims2)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second array type\n");
+ goto error;
+ }
+
+ /* Test nested arrays */
+ for (i = 0; i < DATASET_ARRAY_TYPE_TEST_RANK3; i++)
+ array_dims3[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+
+ if ((array_base_type_id3 = generate_random_datatype(H5T_ARRAY, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((nested_type_id = H5Tarray_create2(array_base_type_id3, DATASET_ARRAY_TYPE_TEST_RANK3, array_dims3)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create nested array base type\n");
+ goto error;
+ }
+
+ if ((array_type_id3 = H5Tarray_create2(nested_type_id, DATASET_ARRAY_TYPE_TEST_RANK3, array_dims3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create nested array type\n");
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_ARRAY_TYPE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id1 = H5Dcreate2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME1, array_type_id1, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create array type dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME1);
+ goto error;
+ }
+
+ if ((dset_id2 = H5Dcreate2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME2, array_type_id2, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create array type dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ if ((dset_id3 = H5Dcreate2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME3, array_type_id3, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create nested array type dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME3);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id1) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id3) < 0)
+ TEST_ERROR;
+
+ if ((dset_id1 = H5Dopen2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME1);
+ goto error;
+ }
+
+ if ((dset_id2 = H5Dopen2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ if ((dset_id3 = H5Dopen2(group_id, DATASET_ARRAY_TYPE_TEST_DSET_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_ARRAY_TYPE_TEST_DSET_NAME3);
+ goto error;
+ }
+
+ if (H5Tclose(array_base_type_id1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(array_base_type_id2) < 0)
+ TEST_ERROR;
+ if (H5Tclose(array_base_type_id3) < 0)
+ TEST_ERROR;
+ if (H5Tclose(nested_type_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(array_type_id1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(array_type_id2) < 0)
+ TEST_ERROR;
+ if (H5Tclose(array_type_id3) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id1) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id3) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(array_base_type_id1);
+ H5Tclose(array_base_type_id2);
+ H5Tclose(array_base_type_id3);
+ H5Tclose(nested_type_id);
+ H5Tclose(array_type_id1);
+ H5Tclose(array_type_id2);
+ H5Tclose(array_type_id3);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id1);
+ H5Dclose(dset_id2);
+ H5Dclose(dset_id3);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of the different
+ * dataset creation properties.
+ */
+static int
+test_create_dataset_creation_properties(void)
+{
+ hsize_t dims[DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK];
+ hsize_t chunk_dims[DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK];
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID, dcpl_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID, compact_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID, compact_fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("dataset creation properties");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) || !(vol_cap_flags_g & H5VL_CAP_FLAG_TRACK_TIMES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, creation order, track time, or filter "
+ "pipeline aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_CREATION_PROPERTIES_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", DATASET_CREATION_PROPERTIES_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id =
+ generate_random_dataspace(DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK, NULL, dims, FALSE)) < 0)
+ TEST_ERROR;
+ if ((compact_fspace_id =
+ generate_random_dataspace(DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ /* Set chunk dims to be size of dataset - for filters test */
+ for (i = 0; i < DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK; i++)
+ chunk_dims[i] = dims[i];
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((compact_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /* Test the alloc time property */
+ PART_BEGIN(DCPL_alloc_time_test)
+ {
+ H5D_alloc_time_t alloc_times[] = {H5D_ALLOC_TIME_DEFAULT, H5D_ALLOC_TIME_EARLY,
+ H5D_ALLOC_TIME_INCR, H5D_ALLOC_TIME_LATE};
+
+ TESTING_2("dataset storage space allocation time property");
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL\n");
+ PART_ERROR(DCPL_alloc_time_test);
+ }
+
+ for (i = 0; i < ARRAY_LENGTH(alloc_times); i++) {
+ char name[100];
+
+ if (H5Pset_alloc_time(dcpl_id, alloc_times[i]) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set alloc time property value\n");
+ PART_ERROR(DCPL_alloc_time_test);
+ }
+
+ HDsprintf(name, "%s%zu", DATASET_CREATION_PROPERTIES_TEST_ALLOC_TIMES_BASE_NAME, i);
+
+ if ((dset_id = H5Dcreate2(group_id, name, dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", name);
+ PART_ERROR(DCPL_alloc_time_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", name);
+ PART_ERROR(DCPL_alloc_time_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ }
+
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(DCPL_alloc_time_test);
+
+ /* Test the attribute creation order property */
+ PART_BEGIN(DCPL_attr_crt_order_test)
+ {
+ unsigned creation_orders[] = {H5P_CRT_ORDER_TRACKED,
+ H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED};
+
+ TESTING_2("attribute creation order property for DCPL");
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL\n");
+ PART_ERROR(DCPL_attr_crt_order_test);
+ }
+
+ for (i = 0; i < ARRAY_LENGTH(creation_orders); i++) {
+ char name[100];
+
+ if (H5Pset_attr_creation_order(dcpl_id, creation_orders[i]) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute creation order property\n");
+ PART_ERROR(DCPL_attr_crt_order_test);
+ }
+
+ HDsprintf(name, "%s%zu", DATASET_CREATION_PROPERTIES_TEST_CRT_ORDER_BASE_NAME, i);
+
+ if ((dset_id = H5Dcreate2(group_id, name, dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", name);
+ PART_ERROR(DCPL_attr_crt_order_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", name);
+ PART_ERROR(DCPL_attr_crt_order_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ }
+
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(DCPL_attr_crt_order_test);
+
+ /* Test the attribute phase change property */
+ PART_BEGIN(DCPL_attr_phase_change_test)
+ {
+ TESTING_2("attribute phase change property for DCPL");
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL\n");
+ PART_ERROR(DCPL_attr_phase_change_test);
+ }
+
+ if (H5Pset_attr_phase_change(dcpl_id, DATASET_CREATION_PROPERTIES_TEST_MAX_COMPACT,
+ DATASET_CREATION_PROPERTIES_TEST_MIN_DENSE) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set attribute phase change property\n");
+ PART_ERROR(DCPL_attr_phase_change_test);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME,
+ dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME);
+ PART_ERROR(DCPL_attr_phase_change_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME);
+ PART_ERROR(DCPL_attr_phase_change_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(DCPL_attr_phase_change_test);
+
+ /* Test the fill time property */
+ PART_BEGIN(DCPL_fill_time_property_test)
+ {
+ H5D_fill_time_t fill_times[] = {H5D_FILL_TIME_IFSET, H5D_FILL_TIME_ALLOC, H5D_FILL_TIME_NEVER};
+
+ TESTING_2("dataset fill time property");
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL\n");
+ PART_ERROR(DCPL_fill_time_property_test);
+ }
+
+ for (i = 0; i < ARRAY_LENGTH(fill_times); i++) {
+ char name[100];
+
+ if (H5Pset_fill_time(dcpl_id, fill_times[i]) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set dataset fill time property\n");
+ PART_ERROR(DCPL_fill_time_property_test);
+ }
+
+ HDsprintf(name, "%s%zu", DATASET_CREATION_PROPERTIES_TEST_FILL_TIMES_BASE_NAME, i);
+
+ if ((dset_id = H5Dcreate2(group_id, name, dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", name);
+ PART_ERROR(DCPL_fill_time_property_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", name);
+ PART_ERROR(DCPL_fill_time_property_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ }
+
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(DCPL_fill_time_property_test);
+
+ /* TODO: Test the fill value property */
+
+ /* Test filters */
+ PART_BEGIN(DCPL_filters_test)
+ {
+ TESTING_2("dataset filters");
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL\n");
+ PART_ERROR(DCPL_filters_test);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK, chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set chunking on DCPL\n");
+ PART_ERROR(DCPL_filters_test);
+ }
+
+ /* Set all of the available filters on the DCPL */
+ if (H5Pset_deflate(dcpl_id, 7) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set deflate filter on DCPL\n");
+ PART_ERROR(DCPL_filters_test);
+ }
+ if (H5Pset_shuffle(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set shuffle filter on DCPL\n");
+ PART_ERROR(DCPL_filters_test);
+ }
+ if (H5Pset_fletcher32(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set fletcher32 filter on DCPL\n");
+ PART_ERROR(DCPL_filters_test);
+ }
+ if (H5Pset_nbit(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set nbit filter on DCPL\n");
+ PART_ERROR(DCPL_filters_test);
+ }
+ if (H5Pset_scaleoffset(dcpl_id, H5Z_SO_FLOAT_ESCALE, 2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set scaleoffset filter on DCPL\n");
+ PART_ERROR(DCPL_filters_test);
+ }
+
+ /*
+ * Use a simple datatype, as not all filters support all datatypes.
+ */
+ if ((dset_id = H5Dcreate2(group_id, DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME,
+ H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME);
+ PART_ERROR(DCPL_filters_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME);
+ PART_ERROR(DCPL_filters_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(DCPL_filters_test);
+
+ /* Test the dataset storage layout property */
+ PART_BEGIN(DCPL_storage_layout_test)
+ {
+ H5D_layout_t layouts[] = {H5D_COMPACT, H5D_CONTIGUOUS, H5D_CHUNKED};
+
+ TESTING_2("dataset storage layouts");
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL\n");
+ PART_ERROR(DCPL_storage_layout_test);
+ }
+
+ for (i = 0; i < ARRAY_LENGTH(layouts); i++) {
+ char name[100];
+
+ if (H5Pset_layout(dcpl_id, layouts[i]) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set storage layout property\n");
+ PART_ERROR(DCPL_storage_layout_test);
+ }
+
+ if (H5D_CHUNKED == layouts[i]) {
+ hsize_t local_chunk_dims[DATASET_CREATION_PROPERTIES_TEST_CHUNK_DIM_RANK];
+ size_t j;
+
+ for (j = 0; j < DATASET_CREATION_PROPERTIES_TEST_CHUNK_DIM_RANK; j++)
+ local_chunk_dims[j] = (hsize_t)(rand() % (int)dims[j] + 1);
+
+ if (H5Pset_chunk(dcpl_id, DATASET_CREATION_PROPERTIES_TEST_CHUNK_DIM_RANK,
+ local_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set chunk dimensionality\n");
+ PART_ERROR(DCPL_storage_layout_test);
+ }
+ }
+
+ HDsprintf(name, "%s%zu", DATASET_CREATION_PROPERTIES_TEST_LAYOUTS_BASE_NAME, i);
+
+ if ((dset_id =
+ H5Dcreate2(group_id, name, (H5D_COMPACT == layouts[i]) ? compact_dtype : dset_dtype,
+ (H5D_COMPACT == layouts[i]) ? compact_fspace_id : fspace_id, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", name);
+ PART_ERROR(DCPL_storage_layout_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", name);
+ PART_ERROR(DCPL_storage_layout_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ }
+
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(DCPL_storage_layout_test);
+
+ /* Test the "track object times" property */
+ PART_BEGIN(DCPL_track_obj_times_test)
+ {
+ TESTING_2("object time tracking property for DCPL");
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL\n");
+ PART_ERROR(DCPL_track_obj_times_test);
+ }
+
+ if (H5Pset_obj_track_times(dcpl_id, true) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set object time tracking property\n");
+ PART_ERROR(DCPL_track_obj_times_test);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME,
+ dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME);
+ PART_ERROR(DCPL_track_obj_times_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME);
+ PART_ERROR(DCPL_track_obj_times_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (H5Pset_obj_track_times(dcpl_id, false) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set object time tracking property\n");
+ PART_ERROR(DCPL_track_obj_times_test);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME,
+ dset_dtype, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME);
+ PART_ERROR(DCPL_track_obj_times_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME);
+ PART_ERROR(DCPL_track_obj_times_test);
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(DCPL_track_obj_times_test);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(compact_fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(compact_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(compact_fspace_id);
+ H5Sclose(fspace_id);
+ H5Tclose(compact_dtype);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Pclose(dcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to create many small datasets (100,000)
+ */
+static int
+test_create_many_dataset(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dataspace_id = H5I_INVALID_HID;
+ char dset_name[DSET_NAME_BUF_SIZE];
+ unsigned char data;
+ unsigned int i;
+
+ TESTING("creating many datasets");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_MANY_CREATE_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", DATASET_MANY_CREATE_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dataspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create scalar data space\n");
+ goto error;
+ }
+
+ HDprintf("\n");
+ for (i = 0; i < DATASET_NUMB; i++) {
+ HDprintf("\r %u/%u", i + 1, DATASET_NUMB);
+ sprintf(dset_name, "dset_%02u", i);
+ data = i % 256;
+
+ if ((dset_id = H5Dcreate2(group_id, dset_name, H5T_NATIVE_UCHAR, dataspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", dset_name);
+ goto error;
+ }
+
+ if (H5Dwrite(dset_id, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", dset_name);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close dataset '%s'\n", dset_name);
+ goto error;
+ }
+ }
+
+ if (H5Sclose(dataspace_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ H5Sclose(dataspace_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that re-opening a dataset with
+ * H5Dopen succeeds.
+ */
+static int
+test_open_dataset(void)
+{
+ TESTING("H5Dopen");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that H5Dopen fails when it is
+ * passed invalid parameters.
+ */
+static int
+test_open_dataset_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Dopen with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_OPEN_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", DATASET_OPEN_INVALID_PARAMS_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_OPEN_INVALID_PARAMS_SPACE_RANK, NULL, NULL, FALSE)) <
+ 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_OPEN_INVALID_PARAMS_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_OPEN_INVALID_PARAMS_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dopen_invalid_loc_id)
+ {
+ TESTING_2("H5Dopen with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dopen2(H5I_INVALID_HID, DATASET_OPEN_INVALID_PARAMS_DSET_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened dataset using H5Dopen2 with an invalid loc_id!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dopen_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dopen_invalid_loc_id);
+
+ PART_BEGIN(H5Dopen_invalid_dataset_name)
+ {
+ TESTING_2("H5Dopen with an invalid dataset name");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dopen2(group_id, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened dataset using H5Dopen2 with a NULL dataset name!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dopen_invalid_dataset_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dopen2(group_id, "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened dataset using H5Dopen2 with an invalid dataset name of ''!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dopen_invalid_dataset_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dopen_invalid_dataset_name);
+
+ PART_BEGIN(H5Dopen_invalid_dapl)
+ {
+ TESTING_2("H5Dopen with an invalid DAPL");
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dopen2(group_id, DATASET_OPEN_INVALID_PARAMS_DSET_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened dataset using H5Dopen2 with an invalid DAPL!\n");
+ H5Dclose(dset_id);
+ PART_ERROR(H5Dopen_invalid_dapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dopen_invalid_dapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Dclose fails when it is
+ * passed an invalid dataset ID.
+ */
+static int
+test_close_dataset_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+
+ TESTING("H5Dclose with an invalid dataset ID");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dclose(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Dclose succeeded with an invalid dataset ID!\n");
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that valid copies of a dataset's dataspace
+ * and datatype can be retrieved with H5Dget_space and
+ * H5Dget_type, respectively.
+ */
+static int
+test_get_dataset_space_and_type(void)
+{
+ hsize_t dset_dims[DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK];
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dset_space_id = H5I_INVALID_HID;
+ hid_t tmp_type_id = H5I_INVALID_HID;
+ hid_t tmp_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("retrieval of a dataset's dataspace and datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_GET_SPACE_TYPE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", DATASET_GET_SPACE_TYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_space_id =
+ generate_random_dataspace(DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK, NULL, dset_dims, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_GET_SPACE_TYPE_TEST_DSET_NAME, dset_dtype, dset_space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_GET_SPACE_TYPE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /* Retrieve the dataset's datatype and dataspace and verify them */
+ PART_BEGIN(H5Dget_type)
+ {
+ TESTING_2("H5Dget_type");
+
+ if ((tmp_type_id = H5Dget_type(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve dataset's datatype\n");
+ PART_ERROR(H5Dget_type);
+ }
+
+ {
+ htri_t types_equal = H5Tequal(tmp_type_id, dset_dtype);
+
+ if (types_equal < 0) {
+ H5_FAILED();
+ HDprintf(" datatype was invalid\n");
+ PART_ERROR(H5Dget_type);
+ }
+
+ if (!types_equal) {
+ H5_FAILED();
+ HDprintf(" dataset's datatype did not match\n");
+ PART_ERROR(H5Dget_type);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_type);
+
+ PART_BEGIN(H5Dget_space)
+ {
+ TESTING_2("H5Dget_space");
+
+ if ((tmp_space_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve dataset's dataspace\n");
+ PART_ERROR(H5Dget_space);
+ }
+
+ {
+ hsize_t space_dims[DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK];
+
+ if (H5Sget_simple_extent_dims(tmp_space_id, space_dims, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve dataspace dimensions\n");
+ PART_ERROR(H5Dget_space);
+ }
+
+ for (i = 0; i < DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK; i++)
+ if (space_dims[i] != dset_dims[i]) {
+ H5_FAILED();
+ HDprintf(" dataset's dataspace dims didn't match\n");
+ PART_ERROR(H5Dget_space);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_space);
+
+ /* Now close the dataset and verify that this still works after
+ * opening an attribute instead of creating it.
+ */
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ if (tmp_type_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(tmp_type_id);
+ }
+ H5E_END_TRY;
+ tmp_type_id = H5I_INVALID_HID;
+ }
+ if (tmp_space_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(tmp_space_id);
+ }
+ H5E_END_TRY;
+ tmp_space_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Dget_type_reopened)
+ {
+ TESTING_2("H5Dget_type after re-opening a dataset");
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_GET_SPACE_TYPE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_GET_SPACE_TYPE_TEST_DSET_NAME);
+ PART_ERROR(H5Dget_type_reopened);
+ }
+
+ if ((tmp_type_id = H5Dget_type(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve dataset's datatype\n");
+ PART_ERROR(H5Dget_type_reopened);
+ }
+
+ {
+ htri_t types_equal = H5Tequal(tmp_type_id, dset_dtype);
+
+ if (types_equal < 0) {
+ H5_FAILED();
+ HDprintf(" datatype was invalid\n");
+ PART_ERROR(H5Dget_type_reopened);
+ }
+
+ if (!types_equal) {
+ H5_FAILED();
+ HDprintf(" dataset's datatype did not match\n");
+ PART_ERROR(H5Dget_type_reopened);
+ }
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_type_reopened);
+
+ PART_BEGIN(H5Dget_space_reopened)
+ {
+ TESTING_2("H5Dget_space after re-opening a dataset");
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_GET_SPACE_TYPE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_GET_SPACE_TYPE_TEST_DSET_NAME);
+ PART_ERROR(H5Dget_space_reopened);
+ }
+
+ if ((tmp_space_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve dataset's dataspace\n");
+ PART_ERROR(H5Dget_space_reopened);
+ }
+
+ {
+ hsize_t space_dims[DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK];
+
+ if (H5Sget_simple_extent_dims(tmp_space_id, space_dims, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve dataspace dimensions\n");
+ PART_ERROR(H5Dget_space_reopened);
+ }
+
+ for (i = 0; i < DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK; i++) {
+ if (space_dims[i] != dset_dims[i]) {
+ H5_FAILED();
+ HDprintf(" dataset's dataspace dims didn't match!\n");
+ PART_ERROR(H5Dget_space_reopened);
+ }
+ }
+ }
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_space_reopened);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(tmp_space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(dset_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(tmp_type_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(tmp_space_id);
+ H5Sclose(dset_space_id);
+ H5Tclose(tmp_type_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset's dataspace and datatype
+ * can't be retrieved when H5Dget_space and H5Dget_type are passed
+ * invalid parameters, respectively.
+ */
+static int
+test_get_dataset_space_and_type_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dset_space_id = H5I_INVALID_HID;
+ hid_t tmp_type_id = H5I_INVALID_HID;
+ hid_t tmp_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Dget_type/H5Dget_space with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", ATTRIBUTE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_space_id = generate_random_dataspace(DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_SPACE_RANK,
+ NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_DSET_NAME, dset_dtype,
+ dset_space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_DSET_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dget_type_invalid_dset_id)
+ {
+ TESTING_2("H5Dget_type with an invalid dset_id");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_type_id = H5Dget_type(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (tmp_type_id >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved copy of dataset's datatype using an invalid dataset ID!\n");
+ PART_ERROR(H5Dget_type_invalid_dset_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_type_invalid_dset_id);
+
+ PART_BEGIN(H5Dget_space_invalid_dset_id)
+ {
+ TESTING_2("H5Dget_space with an invalid dset_id");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_space_id = H5Dget_space(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (tmp_space_id >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved copy of dataset's dataspace using an invalid dataset ID!\n");
+ PART_ERROR(H5Dget_space_invalid_dset_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_space_invalid_dset_id);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dset_space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(tmp_space_id);
+ H5Sclose(dset_space_id);
+ H5Tclose(tmp_type_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Dget_space_status.
+ */
+static int
+test_get_dataset_space_status(void)
+{
+ TESTING("H5Dget_space_status");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that a dataset's dataspace allocation
+ * status can't be retrieved with H5Dget_space_status when
+ * it is passed invalid parameters.
+ */
+static int
+test_get_dataset_space_status_invalid_params(void)
+{
+ TESTING("H5Dget_space_status with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that a DCPL used for dataset creation
+ * can be persisted and that a valid copy of that DCPL can
+ * be retrieved later with a call to H5Dget_create_plist.
+ * Also tests that a valid copy of a DAPL used for dataset
+ * access can be retrieved with a call to H5Dget_access_plist.
+ */
+static int
+test_dataset_property_lists(void)
+{
+ const char *path_prefix = "/test_prefix";
+ hsize_t dims[DATASET_PROPERTY_LIST_TEST_SPACE_RANK];
+ hsize_t chunk_dims[DATASET_PROPERTY_LIST_TEST_SPACE_RANK];
+ size_t i;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID, dset_id3 = H5I_INVALID_HID,
+ dset_id4 = H5I_INVALID_HID;
+ hid_t dcpl_id1 = H5I_INVALID_HID, dcpl_id2 = H5I_INVALID_HID;
+ hid_t dapl_id1 = H5I_INVALID_HID, dapl_id2 = H5I_INVALID_HID;
+ hid_t dset_dtype1 = H5I_INVALID_HID, dset_dtype2 = H5I_INVALID_HID, dset_dtype3 = H5I_INVALID_HID,
+ dset_dtype4 = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ char *tmp_prefix = NULL;
+ char vol_name[5];
+
+ TESTING_MULTIPART("dataset property list operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ /** for DAOS VOL, this test is problematic since auto chunking can be selected, so skip for now */
+ if (H5VLget_connector_name(file_id, vol_name, 5) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get VOL connector name\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_PROPERTY_LIST_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_PROPERTY_LIST_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(DATASET_PROPERTY_LIST_TEST_SPACE_RANK, NULL, dims, FALSE)) < 0)
+ TEST_ERROR;
+
+ for (i = 0; i < DATASET_PROPERTY_LIST_TEST_SPACE_RANK; i++)
+ chunk_dims[i] = (hsize_t)(rand() % (int)dims[i] + 1);
+
+ if ((dset_dtype1 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype2 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype3 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype4 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id1 = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_chunk(dcpl_id1, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set DCPL property\n");
+ goto error;
+ }
+
+ if ((dset_id1 = H5Dcreate2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME1, dset_dtype1, space_id,
+ H5P_DEFAULT, dcpl_id1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_PROPERTY_LIST_TEST_DSET_NAME1);
+ goto error;
+ }
+
+ if ((dset_id2 = H5Dcreate2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME2, dset_dtype2, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_PROPERTY_LIST_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ if (H5Pclose(dcpl_id1) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dget_create_plist)
+ {
+ TESTING_2("H5Dget_create_plist");
+
+ /* Try to receive copies of the two property lists, one which has the property set and one which
+ * does not */
+ if ((dcpl_id1 = H5Dget_create_plist(dset_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Dget_create_plist);
+ }
+
+ if ((dcpl_id2 = H5Dget_create_plist(dset_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Dget_create_plist);
+ }
+
+ /* Ensure that property list 1 has the property set and property list 2 does not */
+ {
+ hsize_t tmp_chunk_dims[DATASET_PROPERTY_LIST_TEST_SPACE_RANK];
+
+ HDmemset(tmp_chunk_dims, 0, sizeof(tmp_chunk_dims));
+
+ if (H5Pget_chunk(dcpl_id1, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, tmp_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get DCPL property value\n");
+ PART_ERROR(H5Dget_create_plist);
+ }
+
+ for (i = 0; i < DATASET_PROPERTY_LIST_TEST_SPACE_RANK; i++)
+ if (tmp_chunk_dims[i] != chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" DCPL property values were incorrect\n");
+ PART_ERROR(H5Dget_create_plist);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Pget_chunk(dcpl_id2, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, tmp_chunk_dims);
+ }
+ H5E_END_TRY;
+
+ /* DAOS VOL can auto chunk, so don't fail */
+ if (err_ret >= 0 && strcmp(vol_name, "daos") != 0) {
+ H5_FAILED();
+ HDprintf(" property list 2 shouldn't have had chunk dimensionality set (not a chunked "
+ "layout)\n");
+ PART_ERROR(H5Dget_create_plist);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_create_plist);
+
+ PART_BEGIN(H5Dget_access_plist)
+ {
+ TESTING_2("H5Dget_access_plist");
+
+ if ((dapl_id1 = H5Pcreate(H5P_DATASET_ACCESS)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create DAPL\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if (H5Pset_efile_prefix(dapl_id1, path_prefix) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set DAPL property\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if ((dset_id3 = H5Dcreate2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME3, dset_dtype3, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, dapl_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if ((dset_id4 = H5Dcreate2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME4, dset_dtype4, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if (dapl_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dapl_id1);
+ }
+ H5E_END_TRY;
+ dapl_id1 = H5I_INVALID_HID;
+ }
+
+ /* Try to receive copies of the two property lists, one which has the property set and one which
+ * does not */
+ if ((dapl_id1 = H5Dget_access_plist(dset_id3)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if ((dapl_id2 = H5Dget_access_plist(dset_id4)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ /* Ensure that property list 1 has the property set and property list 2 does not */
+ {
+ ssize_t buf_size = 0;
+
+ if ((buf_size = H5Pget_efile_prefix(dapl_id1, NULL, 0)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve size for property value buffer\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if (NULL == (tmp_prefix = (char *)HDcalloc(1, (size_t)buf_size + 1))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for property value\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if (H5Pget_efile_prefix(dapl_id1, tmp_prefix, (size_t)buf_size + 1) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve property list value\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if (HDstrncmp(tmp_prefix, path_prefix, (size_t)buf_size + 1)) {
+ H5_FAILED();
+ HDprintf(" DAPL values were incorrect!\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ HDmemset(tmp_prefix, 0, (size_t)buf_size + 1);
+
+ if (H5Pget_efile_prefix(dapl_id2, tmp_prefix, (size_t)buf_size) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve property list value\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+
+ if (!HDstrncmp(tmp_prefix, path_prefix, (size_t)buf_size + 1)) {
+ H5_FAILED();
+ HDprintf(" DAPL property value was set!\n");
+ PART_ERROR(H5Dget_access_plist);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_access_plist);
+
+ /* Now close the property lists and datasets and see if we can still retrieve copies of
+ * the property lists upon opening (instead of creating) a dataset
+ */
+ if (dcpl_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id1);
+ }
+ H5E_END_TRY;
+ dcpl_id1 = H5I_INVALID_HID;
+ }
+ if (dcpl_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id2);
+ }
+ H5E_END_TRY;
+ dcpl_id2 = H5I_INVALID_HID;
+ }
+ if (dset_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id1);
+ }
+ H5E_END_TRY;
+ dset_id1 = H5I_INVALID_HID;
+ }
+ if (dset_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id2);
+ }
+ H5E_END_TRY;
+ dset_id2 = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Dget_create_plist_reopened)
+ {
+ TESTING_2("H5Dget_create_plist after re-opening a dataset");
+
+ if ((dset_id1 = H5Dopen2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_PROPERTY_LIST_TEST_DSET_NAME1);
+ PART_ERROR(H5Dget_create_plist_reopened);
+ }
+
+ if ((dset_id2 = H5Dopen2(group_id, DATASET_PROPERTY_LIST_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_PROPERTY_LIST_TEST_DSET_NAME2);
+ PART_ERROR(H5Dget_create_plist_reopened);
+ }
+
+ if ((dcpl_id1 = H5Dget_create_plist(dset_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Dget_create_plist_reopened);
+ }
+
+ if ((dcpl_id2 = H5Dget_create_plist(dset_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Dget_create_plist_reopened);
+ }
+
+ /* Ensure that property list 1 has the property set and property list 2 does not */
+ {
+ hsize_t tmp_chunk_dims[DATASET_PROPERTY_LIST_TEST_SPACE_RANK];
+
+ HDmemset(tmp_chunk_dims, 0, sizeof(tmp_chunk_dims));
+
+ if (H5Pget_chunk(dcpl_id1, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, tmp_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get DCPL property value\n");
+ PART_ERROR(H5Dget_create_plist_reopened);
+ }
+
+ for (i = 0; i < DATASET_PROPERTY_LIST_TEST_SPACE_RANK; i++)
+ if (tmp_chunk_dims[i] != chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" DCPL property values were incorrect\n");
+ PART_ERROR(H5Dget_create_plist_reopened);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Pget_chunk(dcpl_id2, DATASET_PROPERTY_LIST_TEST_SPACE_RANK, tmp_chunk_dims);
+ }
+ H5E_END_TRY;
+
+ /* DAOS VOL can auto chunk, so don't fail */
+ if (err_ret >= 0 && strcmp(vol_name, "daos") != 0) {
+ H5_FAILED();
+ HDprintf(" property list 2 shouldn't have had chunk dimensionality set (not a chunked "
+ "layout)\n");
+ PART_ERROR(H5Dget_create_plist_reopened);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dget_create_plist_reopened);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (tmp_prefix) {
+ HDfree(tmp_prefix);
+ tmp_prefix = NULL;
+ }
+
+ if (H5Pclose(dcpl_id1) < 0)
+ TEST_ERROR;
+ if (H5Pclose(dcpl_id2) < 0)
+ TEST_ERROR;
+ if (H5Pclose(dapl_id1) < 0)
+ TEST_ERROR;
+ if (H5Pclose(dapl_id2) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype2) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype3) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype4) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id1) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id3) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id4) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (tmp_prefix)
+ HDfree(tmp_prefix);
+ H5Pclose(dcpl_id1);
+ H5Pclose(dcpl_id2);
+ H5Pclose(dapl_id1);
+ H5Pclose(dapl_id2);
+ H5Sclose(space_id);
+ H5Tclose(dset_dtype1);
+ H5Tclose(dset_dtype2);
+ H5Tclose(dset_dtype3);
+ H5Tclose(dset_dtype4);
+ H5Dclose(dset_id1);
+ H5Dclose(dset_id2);
+ H5Dclose(dset_id3);
+ H5Dclose(dset_id4);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Dget_storage_size.
+ */
+static int
+test_get_dataset_storage_size(void)
+{
+ TESTING("H5Dget_storage_size");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that a dataset's storage size can't
+ * be retrieved when H5Dget_storage_size is passed
+ * invalid parameters.
+ */
+static int
+test_get_dataset_storage_size_invalid_params(void)
+{
+ TESTING("H5Dget_storage_size with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test for H5Dget_chunk_storage_size.
+ */
+static int
+test_get_dataset_chunk_storage_size(void)
+{
+ TESTING("H5Dget_chunk_storage_size");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that the size of an allocated chunk in
+ * a dataset can't be retrieved when H5Dget_chunk_storage_size
+ * is passed invalid parameters.
+ */
+static int
+test_get_dataset_chunk_storage_size_invalid_params(void)
+{
+ TESTING("H5Dget_chunk_storage_size with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test for H5Dget_offset.
+ */
+static int
+test_get_dataset_offset(void)
+{
+ TESTING("H5Dget_offset");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that a dataset's offset can't be
+ * retrieved when H5Dget_offset is passed invalid
+ * parameters.
+ */
+static int
+test_get_dataset_offset_invalid_params(void)
+{
+ TESTING("H5Dget_offset with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that a small amount of data can be
+ * read back from a dataset using an H5S_ALL selection.
+ */
+static int
+test_read_dataset_small_all(void)
+{
+ hsize_t dims[DATASET_SMALL_READ_TEST_ALL_DSET_SPACE_RANK] = {10, 5, 3};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+
+ TESTING("small read from dataset with H5S_ALL");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_READ_TEST_ALL_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SMALL_READ_TEST_ALL_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_SMALL_READ_TEST_ALL_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_READ_TEST_ALL_DSET_NAME,
+ DATASET_SMALL_READ_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_READ_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_SMALL_READ_TEST_ALL_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_SMALL_READ_TEST_ALL_DSET_DTYPESIZE;
+
+ if (NULL == (read_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ if (H5Dread(dset_id, DATASET_SMALL_READ_TEST_ALL_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_SMALL_READ_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a small amount of data can be
+ * read back from a dataset using a hyperslab selection.
+ */
+static int
+test_read_dataset_small_hyperslab(void)
+{
+ hsize_t start[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t stride[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t count[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t block[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t dims[DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK] = {10, 5, 3};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+
+ TESTING("small read from dataset with a hyperslab selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_READ_TEST_HYPERSLAB_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_SMALL_READ_TEST_HYPERSLAB_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+ if ((mspace_id = H5Screate_simple(DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK - 1, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_NAME,
+ DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) {
+ start[i] = 0;
+ stride[i] = 1;
+ count[i] = dims[i];
+ block[i] = 1;
+ }
+
+ count[2] = 1;
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR;
+
+ for (i = 0, data_size = 1; i < DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK - 1; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPESIZE;
+
+ if (NULL == (read_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ if (H5Dread(dset_id, DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_NAME);
+ goto error;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a small amount of data can be
+ * read back from a dataset using a point selection.
+ */
+static int
+test_read_dataset_small_point_selection(void)
+{
+ hsize_t points[DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS *
+ DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK];
+ hsize_t dims[DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK] = {10, 10, 10};
+ hsize_t mspace_dims[] = {DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING("small read from dataset with a point selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_READ_TEST_POINT_SELECTION_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_SMALL_READ_TEST_POINT_SELECTION_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK, dims, NULL)) <
+ 0)
+ TEST_ERROR;
+ if ((mspace_id = H5Screate_simple(1, mspace_dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_NAME,
+ DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_NAME);
+ goto error;
+ }
+
+ data_size = DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS *
+ DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK; j++)
+ points[(i * DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK) + j] = i;
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS,
+ points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select points\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_NAME);
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests point selection I/O with different patterns
+ */
+#define DATASET_IO_POINT_DIM_0 6
+#define DATASET_IO_POINT_DIM_1 9
+#define DATASET_IO_POINT_CDIM_0 4
+#define DATASET_IO_POINT_CDIM_1 3
+#define DATASET_IO_POINT_NPOINTS 10
+#define DATASET_IO_POINT_GEN_POINTS(POINTS, I, J) \
+ { \
+ for ((I) = 0; (I) < DATASET_IO_POINT_NPOINTS; (I)++) \
+ do { \
+ (POINTS)[2 * (I)] = (hsize_t)(rand() % DATASET_IO_POINT_DIM_0); \
+ (POINTS)[2 * (I) + 1] = (hsize_t)(rand() % DATASET_IO_POINT_DIM_1); \
+ for ((J) = 0; ((J) < (I)) && (((POINTS)[2 * (I)] != (POINTS)[2 * (J)]) || \
+ ((POINTS)[2 * (I) + 1] != (POINTS)[2 * (J) + 1])); \
+ (J)++) \
+ ; \
+ } while ((J) < (I)); \
+ }
+static int
+test_dataset_io_point_selections(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t mspace_id_full = H5I_INVALID_HID, mspace_id_all = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID;
+ hid_t dcpl_id_chunk = H5I_INVALID_HID;
+ hsize_t dims[2] = {DATASET_IO_POINT_DIM_0, DATASET_IO_POINT_DIM_1};
+ hsize_t cdims[2] = {DATASET_IO_POINT_CDIM_0, DATASET_IO_POINT_CDIM_1};
+ hsize_t points[DATASET_IO_POINT_NPOINTS * 2];
+ hsize_t points2[DATASET_IO_POINT_NPOINTS * 2];
+ hsize_t npoints = DATASET_IO_POINT_NPOINTS;
+ hsize_t start[2] = {1, 2};
+ hsize_t stride[2] = {2, 5};
+ hsize_t count[2] = {2, 1};
+ hsize_t block[2] = {1, 5};
+ int buf_all[DATASET_IO_POINT_DIM_0][DATASET_IO_POINT_DIM_1];
+ int file_state[DATASET_IO_POINT_DIM_0][DATASET_IO_POINT_DIM_1];
+ int erbuf[DATASET_IO_POINT_DIM_0][DATASET_IO_POINT_DIM_1];
+ int buf_point[DATASET_IO_POINT_NPOINTS];
+ hbool_t do_chunk;
+ int i, j;
+
+ TESTING("point selection I/O with all selection in memory and points in file");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ /* Create dataspaces and DCPL */
+ if ((mspace_id_full = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+ if ((mspace_id_all = H5Screate_simple(1, &npoints, NULL)) < 0)
+ TEST_ERROR;
+ if ((fspace_id = H5Screate_simple(2, dims, NULL)) < 0)
+ TEST_ERROR;
+ if ((dcpl_id_chunk = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Enable chunking on chunk DCPL */
+ if (H5Pset_chunk(dcpl_id_chunk, 2, cdims) < 0)
+ TEST_ERROR;
+
+ /* Open file */
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Open container group */
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Create group */
+ if ((group_id = H5Gcreate2(container_group, DATASET_IO_POINT_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Perform with and without chunking */
+ for (do_chunk = FALSE;; do_chunk = TRUE) {
+ if (do_chunk) {
+ TESTING("point selection I/O with all selection in memory and points in file with chunking");
+
+ /* Create chunked dataset */
+ if ((dset_id = H5Dcreate2(group_id, DATASET_IO_POINT_DSET_NAME_CHUNK, H5T_NATIVE_INT, fspace_id,
+ H5P_DEFAULT, dcpl_id_chunk, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ } /* end if */
+ else
+ /* Create non-chunked dataset */
+ if ((dset_id = H5Dcreate2(group_id, DATASET_IO_POINT_DSET_NAME_NOCHUNK, H5T_NATIVE_INT, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Fill write buffer */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ buf_all[i][j] = rand();
+
+ /* Write data */
+ if (H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to write entire dataset");
+
+ /* Update file_state */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ file_state[i][j] = buf_all[i][j];
+
+ /* Generate points to read */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+
+ /* Wipe read buffer */
+ memset(buf_point, 0, sizeof(buf_point));
+
+ /* Read points to "all" memory buffer */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, mspace_id_all, fspace_id, H5P_DEFAULT, buf_point) < 0)
+ FAIL_PUTS_ERROR("Failed to read points from dataset to all memory buffer");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ if (buf_point[i] != file_state[points[2 * i]][points[2 * i + 1]])
+ FAIL_PUTS_ERROR("Incorrect data read from points to all memory buffer");
+
+ /* Generate points to write */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+
+ /* Fill write buffer */
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ buf_point[i] = rand();
+
+ /* Write points from "all" memory buffer */
+ if (H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id_all, fspace_id, H5P_DEFAULT, buf_point) < 0)
+ FAIL_PUTS_ERROR("Failed to write points to dataset from all memory buffer");
+
+ /* Update file state */
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ file_state[points[2 * i]][points[2 * i + 1]] = buf_point[i];
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Read entire dataset */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read entire dataset");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != file_state[i][j])
+ FAIL_PUTS_ERROR("Incorrect data found after writing from all memory buffer to points");
+
+ PASSED();
+
+ if (do_chunk)
+ TESTING("point selection I/O with points in memory and file (same shape) with chunking");
+ else
+ TESTING("point selection I/O with points in memory and file (same shape)");
+
+ /* Generate points to read */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Generate expected read buffer */
+ memset(erbuf, 0, sizeof(erbuf));
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ erbuf[points[2 * i]][points[2 * i + 1]] = file_state[points[2 * i]][points[2 * i + 1]];
+
+ /* Read data points->points */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, fspace_id, fspace_id, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read points from dataset to points in memory buffer");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != erbuf[i][j])
+ FAIL_PUTS_ERROR("Incorrect data found read from points in file to points in memory");
+
+ /* Generate points to write */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+
+ /* Fill write buffer */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ buf_all[i][j] = rand();
+
+ /* Write data points->points */
+ if (H5Dwrite(dset_id, H5T_NATIVE_INT, fspace_id, fspace_id, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to write from in memory to points in dataset");
+
+ /* Update file_state */
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ file_state[points[2 * i]][points[2 * i + 1]] = buf_all[points[2 * i]][points[2 * i + 1]];
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Read entire dataset */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read entire dataset");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != file_state[i][j])
+ FAIL_PUTS_ERROR(
+ "Incorrect data found after writing from points in memory to points in dataset");
+
+ PASSED();
+
+ if (do_chunk)
+ TESTING("point selection I/O with points in memory and file (different shape) with chunking");
+ else
+ TESTING("point selection I/O with points in memory and file (different shape)");
+
+ /* Generate points to read */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+ DATASET_IO_POINT_GEN_POINTS(points2, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+ if (H5Sselect_elements(mspace_id_full, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points2) < 0)
+ TEST_ERROR;
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Generate expected read buffer */
+ memset(erbuf, 0, sizeof(erbuf));
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ erbuf[points2[2 * i]][points2[2 * i + 1]] = file_state[points[2 * i]][points[2 * i + 1]];
+
+ /* Read data points->points */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read points from dataset to points in memory buffer");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != erbuf[i][j])
+ FAIL_PUTS_ERROR(
+ "Incorrect data found after reading from points in file to points in memory");
+
+ /* Generate points to write */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+ DATASET_IO_POINT_GEN_POINTS(points2, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+ if (H5Sselect_elements(mspace_id_full, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points2) < 0)
+ TEST_ERROR;
+
+ /* Fill write buffer */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ buf_all[i][j] = rand();
+
+ /* Write data points->points */
+ if (H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to write from points in memory to points in dataset");
+
+ /* Update file_state */
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ file_state[points[2 * i]][points[2 * i + 1]] = buf_all[points2[2 * i]][points2[2 * i + 1]];
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Read entire dataset */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read entire dataset");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != file_state[i][j])
+ FAIL_PUTS_ERROR(
+ "Incorrect data found after writing from points in memory to points in dataset");
+
+ PASSED();
+
+ if (do_chunk)
+ TESTING("point selection I/O with hyperslab in memory and points in file with chunking");
+ else
+ TESTING("point selection I/O with hyperslab in memory and points in file");
+
+ /* Generate points to read */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+
+ /* Select hyperslab */
+ if (H5Sselect_hyperslab(mspace_id_full, H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR;
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Generate expected read buffer */
+ memset(erbuf, 0, sizeof(erbuf));
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ erbuf[start[0] + (stride[0] * ((hsize_t)i / block[1]))][start[1] + ((hsize_t)i % block[1])] =
+ file_state[points[2 * i]][points[2 * i + 1]];
+
+ /* Read data points->hslab */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read points from dataset to hyperslab in memory buffer");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != erbuf[i][j])
+ FAIL_PUTS_ERROR(
+ "Incorrect data found after reading from points in file to hyperslab in memory");
+
+ /* Generate points to write */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+
+ /* Fill write buffer */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ buf_all[i][j] = rand();
+
+ /* Write data hlsab->points */
+ if (H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to write from hyperslab in memory to points in dataset");
+
+ /* Update file_state */
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ file_state[points[2 * i]][points[2 * i + 1]] =
+ buf_all[start[0] + (stride[0] * ((hsize_t)i / block[1]))][start[1] + ((hsize_t)i % block[1])];
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Read entire dataset */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read entire dataset");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != file_state[i][j])
+ FAIL_PUTS_ERROR(
+ "Incorrect data found after writing from hyperslab in memory to points in dataset");
+
+ PASSED();
+
+ if (do_chunk)
+ TESTING("point selection I/O with points in memory and hyperslab in file with chunking");
+ else
+ TESTING("point selection I/O with points in memory and hyperslab in file");
+
+ /* Generate points to read */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(mspace_id_full, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+
+ /* Select hyperslab */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR;
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Generate expected read buffer */
+ memset(erbuf, 0, sizeof(erbuf));
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ erbuf[points[2 * i]][points[2 * i + 1]] =
+ file_state[start[0] + (stride[0] * ((hsize_t)i / block[1]))]
+ [start[1] + ((hsize_t)i % block[1])];
+
+ /* Read data hslab->points */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read hyperslab from dataset to points in memory buffer");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != erbuf[i][j])
+ FAIL_PUTS_ERROR(
+ "Incorrect data found after reading from hyperslab in file to points in memory");
+
+ /* Generate points to write */
+ DATASET_IO_POINT_GEN_POINTS(points, i, j);
+
+ /* Select points */
+ if (H5Sselect_elements(mspace_id_full, H5S_SELECT_SET, DATASET_IO_POINT_NPOINTS, points) < 0)
+ TEST_ERROR;
+
+ /* Fill write buffer */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ buf_all[i][j] = rand();
+
+ /* Write data points->hslab */
+ if (H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id_full, fspace_id, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to write from points in memory to hyperslab in dataset");
+
+ /* Update file_state */
+ for (i = 0; i < DATASET_IO_POINT_NPOINTS; i++)
+ file_state[start[0] + (stride[0] * ((hsize_t)i / block[1]))][start[1] + ((hsize_t)i % block[1])] =
+ buf_all[points[2 * i]][points[2 * i + 1]];
+
+ /* Wipe read buffer */
+ memset(buf_all, 0, sizeof(buf_all));
+
+ /* Read entire dataset */
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_all) < 0)
+ FAIL_PUTS_ERROR("Failed to read entire dataset");
+
+ /* Verify data */
+ for (i = 0; i < DATASET_IO_POINT_DIM_0; i++)
+ for (j = 0; j < DATASET_IO_POINT_DIM_1; j++)
+ if (buf_all[i][j] != file_state[i][j])
+ FAIL_PUTS_ERROR(
+ "Incorrect data found after writing from points in memory to hyperslab in dataset");
+
+ if (!do_chunk)
+ PASSED();
+
+ /* Close dataset */
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ /* Exit after chunked run */
+ if (do_chunk)
+ break;
+ } /* end for */
+
+ /* Close */
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(dcpl_id_chunk) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id_full) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id_all) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Sclose(mspace_id_full);
+ H5Sclose(mspace_id_all);
+ H5Pclose(dcpl_id_chunk);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_dataset_io_point_selections() */
+
+#ifndef NO_LARGE_TESTS
+/*
+ * A test to check that a large amount of data can be
+ * read back from a dataset using an H5S_ALL selection.
+ */
+static int
+test_read_dataset_large_all(void)
+{
+ hsize_t dims[DATASET_LARGE_READ_TEST_ALL_DSET_SPACE_RANK] = {600, 600, 600};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+
+ TESTING("large read from dataset with H5S_ALL");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_READ_TEST_ALL_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_LARGE_READ_TEST_ALL_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_LARGE_READ_TEST_ALL_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_READ_TEST_ALL_DSET_NAME,
+ DATASET_LARGE_READ_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_READ_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_LARGE_READ_TEST_ALL_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_LARGE_READ_TEST_ALL_DSET_DTYPESIZE;
+
+ if (NULL == (read_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ if (H5Dread(dset_id, DATASET_LARGE_READ_TEST_ALL_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_LARGE_READ_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a large amount of data can be
+ * read back from a dataset using a hyperslab selection.
+ */
+static int
+test_read_dataset_large_hyperslab(void)
+{
+ hsize_t start[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t stride[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t count[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t block[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t dims[DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK] = {600, 600, 600};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+
+ TESTING("large read from dataset with a hyperslab selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_READ_TEST_HYPERSLAB_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_LARGE_READ_TEST_HYPERSLAB_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+ if ((mspace_id = H5Screate_simple(DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_NAME,
+ DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) {
+ start[i] = 0;
+ stride[i] = 1;
+ count[i] = dims[i];
+ block[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR;
+
+ for (i = 0, data_size = 1; i < DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPESIZE;
+
+ if (NULL == (read_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ if (H5Dread(dset_id, DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_NAME);
+ goto error;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a large amount of data can be
+ * read back from a dataset using a large point selection.
+ *
+ * XXX: Test takes up significant amounts of memory.
+ */
+static int
+test_read_dataset_large_point_selection(void)
+{
+ hsize_t *points = NULL;
+ hsize_t dims[DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK] = {225000000};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING("large read from dataset with a point selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_READ_TEST_POINT_SELECTION_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_LARGE_READ_TEST_POINT_SELECTION_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK, dims, NULL)) <
+ 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_NAME,
+ DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+ if (NULL ==
+ (points = HDmalloc((data_size / DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE) *
+ ((DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK) * (sizeof(hsize_t))))))
+ TEST_ERROR;
+
+ /* Select the entire dataspace */
+ for (i = 0; i < data_size / DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE; i++) {
+ points[i] = i;
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET,
+ data_size / DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select points\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPE, H5S_ALL, fspace_id, H5P_DEFAULT,
+ data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_NAME);
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ if (points)
+ HDfree(points);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#endif
+
+/*
+ * A test to check that data can't be read from a
+ * dataset when H5Dread is passed invalid parameters.
+ */
+static int
+test_read_dataset_invalid_params(void)
+{
+ hsize_t dims[DATASET_READ_INVALID_PARAMS_TEST_DSET_SPACE_RANK] = {10, 5, 3};
+ herr_t err_ret = -1;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+
+ TESTING_MULTIPART("H5Dread with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_INVALID_PARAMS_TEST_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_NAME,
+ DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_INVALID_PARAMS_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_INVALID_PARAMS_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (read_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dread_invalid_dset_id)
+ {
+ TESTING_2("H5Dread with an invalid dataset ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dread(H5I_INVALID_HID, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL,
+ H5S_ALL, H5P_DEFAULT, read_buf);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read from dataset using H5Dread with an invalid dataset ID!\n");
+ PART_ERROR(H5Dread_invalid_dset_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dread_invalid_dset_id);
+
+ PART_BEGIN(H5Dread_invalid_datatype)
+ {
+ TESTING_2("H5Dread with an invalid memory datatype");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dread(dset_id, H5I_INVALID_HID, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read from dataset using H5Dread with an invalid memory datatype!\n");
+ PART_ERROR(H5Dread_invalid_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dread_invalid_datatype);
+
+ PART_BEGIN(H5Dread_invalid_mem_dataspace)
+ {
+ TESTING_2("H5Dread with an invalid memory dataspace");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dread(dset_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5I_INVALID_HID,
+ H5S_ALL, H5P_DEFAULT, read_buf);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read from dataset using H5Dread with an invalid memory dataspace!\n");
+ PART_ERROR(H5Dread_invalid_mem_dataspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dread_invalid_mem_dataspace);
+
+ PART_BEGIN(H5Dread_invalid_file_dataspace)
+ {
+ TESTING_2("H5Dread with an invalid file dataspace");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dread(dset_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL,
+ H5I_INVALID_HID, H5P_DEFAULT, read_buf);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read from dataset using H5Dread with an invalid file dataspace!\n");
+ PART_ERROR(H5Dread_invalid_file_dataspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dread_invalid_file_dataspace);
+
+ PART_BEGIN(H5Dread_invalid_dxpl)
+ {
+ TESTING_2("H5Dread with an invalid DXPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dread(dset_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5I_INVALID_HID, read_buf);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read from dataset using H5Dread with an invalid DXPL!\n");
+ PART_ERROR(H5Dread_invalid_dxpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dread_invalid_dxpl);
+
+ PART_BEGIN(H5Dread_invalid_data_buf)
+ {
+ TESTING_2("H5Dread with an invalid data buffer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dread(dset_id, DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" read from dataset using H5Dread with an invalid data buffer!\n");
+ PART_ERROR(H5Dread_invalid_data_buf);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dread_invalid_data_buf);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a small write can be
+ * made to a dataset using an H5S_ALL selection.
+ */
+static int
+test_write_dataset_small_all(void)
+{
+ hssize_t space_npoints;
+ hsize_t dims[DATASET_SMALL_WRITE_TEST_ALL_DSET_SPACE_RANK] = {10, 5, 3};
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING("small write to dataset with H5S_ALL");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_WRITE_TEST_ALL_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SMALL_WRITE_TEST_ALL_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_SMALL_WRITE_TEST_ALL_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME,
+ DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ /* Close the dataset and dataspace to ensure that writing works correctly in this manner */
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL == (data = HDmalloc((hsize_t)space_npoints * DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPESIZE)))
+ TEST_ERROR;
+
+ for (i = 0; i < (hsize_t)space_npoints; i++)
+ ((int *)data)[i] = (int)i;
+
+ if (H5Dwrite(dset_id, DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a small write can be made
+ * to a dataset using a hyperslab selection.
+ */
+static int
+test_write_dataset_small_hyperslab(void)
+{
+ hsize_t start[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t stride[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t count[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t block[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t dims[DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK] = {10, 5, 3};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING("small write to dataset with a hyperslab selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_WRITE_TEST_HYPERSLAB_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_SMALL_WRITE_TEST_HYPERSLAB_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+ if ((mspace_id = H5Screate_simple(DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK - 1, dims, NULL)) <
+ 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_NAME,
+ DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK - 1; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ for (i = 0; i < DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) {
+ start[i] = 0;
+ stride[i] = 1;
+ count[i] = dims[i];
+ block[i] = 1;
+ }
+
+ count[2] = 1;
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR;
+
+ if (H5Dwrite(dset_id, DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_NAME);
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a small write can be made
+ * to a dataset using a point selection.
+ */
+static int
+test_write_dataset_small_point_selection(void)
+{
+ hsize_t points[DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS *
+ DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK];
+ hsize_t dims[DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK] = {10, 10, 10};
+ hsize_t mdims[] = {DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING("small write to dataset with a point selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SMALL_WRITE_TEST_POINT_SELECTION_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_SMALL_WRITE_TEST_POINT_SELECTION_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK, dims, NULL)) <
+ 0)
+ TEST_ERROR;
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_NAME,
+ DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_NAME);
+ goto error;
+ }
+
+ data_size = DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS *
+ DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPESIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ for (i = 0; i < DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK; j++)
+ points[(i * DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK) + j] = i;
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS,
+ points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select points\n");
+ goto error;
+ }
+
+ if (H5Dwrite(dset_id, DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_NAME);
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+#ifndef NO_LARGE_TESTS
+/*
+ * A test to check that a large write can be made
+ * to a dataset using an H5S_ALL selection.
+ */
+static int
+test_write_dataset_large_all(void)
+{
+ hssize_t space_npoints;
+ hsize_t dims[DATASET_LARGE_WRITE_TEST_ALL_DSET_SPACE_RANK] = {600, 600, 600};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING("large write to dataset with H5S_ALL");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_WRITE_TEST_ALL_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_LARGE_WRITE_TEST_ALL_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_LARGE_WRITE_TEST_ALL_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME,
+ DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ /* Close the dataset and dataspace to ensure that retrieval of file space ID is working */
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL == (data = HDmalloc((hsize_t)space_npoints * DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPESIZE)))
+ TEST_ERROR;
+
+ for (i = 0; i < (hsize_t)space_npoints; i++)
+ ((int *)data)[i] = (int)i;
+
+ if (H5Dwrite(dset_id, DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME);
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a large write can be made
+ * to a dataset using a hyperslab selection.
+ */
+static int
+test_write_dataset_large_hyperslab(void)
+{
+ hsize_t start[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t stride[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t count[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t block[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK];
+ hsize_t dims[DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK] = {600, 600, 600};
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID, fspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING("large write to dataset with a hyperslab selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_LARGE_WRITE_TEST_HYPERSLAB_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_LARGE_WRITE_TEST_HYPERSLAB_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+ if ((mspace_id = H5Screate_simple(DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_NAME,
+ DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ for (i = 0; i < DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK; i++) {
+ start[i] = 0;
+ stride[i] = 1;
+ count[i] = dims[i];
+ block[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0)
+ TEST_ERROR;
+
+ if (H5Dwrite(dset_id, DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_NAME);
+ goto error;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a large write can be made
+ * to a dataset using a point selection.
+ */
+static int
+test_write_dataset_large_point_selection(void)
+{
+ TESTING("large write to dataset with a point selection");
+
+ SKIPPED();
+
+ return 0;
+
+error:
+ return 1;
+}
+#endif
+
+/*
+ * A test to ensure that data is read back correctly from
+ * a dataset after it has been written.
+ */
+static int
+test_write_dataset_data_verification(void)
+{
+ hssize_t space_npoints;
+ hsize_t dims[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK] = {10, 10, 10};
+ hsize_t start[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK];
+ hsize_t stride[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK];
+ hsize_t block[DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK];
+ hsize_t
+ points[DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS * DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING_MULTIPART("verification of dataset data using H5Dwrite then H5Dread");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_DATA_VERIFY_WRITE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME,
+ DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dwrite_all_read)
+ {
+ TESTING_2("H5Dwrite using H5S_ALL then H5Dread");
+
+ if (H5Dwrite(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (NULL ==
+ (data = HDmalloc((hsize_t)space_npoints * DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ for (i = 0; i < (hsize_t)space_npoints; i++)
+ if (((int *)data)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" H5S_ALL selection data verification failed\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_all_read);
+
+ PART_BEGIN(H5Dwrite_hyperslab_read)
+ {
+ TESTING_2("H5Dwrite using hyperslab selection then H5Dread");
+
+ data_size = dims[1] * 2 * DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ for (i = 0; i < data_size / DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; i++)
+ ((int *)write_buf)[i] = 56;
+
+ for (i = 0, data_size = 1; i < DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset data verification\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ for (i = 0; i < 2; i++) {
+ size_t j;
+
+ for (j = 0; j < dims[1]; j++)
+ ((int *)data)[(i * dims[1] * dims[2]) + (j * dims[2])] = 56;
+ }
+
+ /* Write to first two rows of dataset */
+ start[0] = start[1] = start[2] = 0;
+ stride[0] = stride[1] = stride[2] = 1;
+ count[0] = 2;
+ count[1] = dims[1];
+ count[2] = 1;
+ block[0] = block[1] = block[2] = 1;
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ {
+ hsize_t mdims[] = {(hsize_t)2 * dims[1]};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints *
+ DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (memcmp(data, read_buf, data_size)) {
+ H5_FAILED();
+ HDprintf(" hyperslab selection data verification failed\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_hyperslab_read);
+
+ PART_BEGIN(H5Dwrite_point_sel_read)
+ {
+ TESTING_2("H5Dwrite using point selection then H5Dread");
+
+ data_size =
+ DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS * DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ for (i = 0; i < data_size / DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE; i++)
+ ((int *)write_buf)[i] = 13;
+
+ for (i = 0, data_size = 1; i < DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset data verification\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ for (i = 0; i < dims[0]; i++) {
+ size_t j;
+
+ for (j = 0; j < dims[1]; j++) {
+ size_t k;
+
+ for (k = 0; k < dims[2]; k++) {
+ if (i == j && j == k)
+ ((int *)data)[(i * dims[1] * dims[2]) + (j * dims[2]) + k] = 13;
+ }
+ }
+ }
+
+ /* Select a series of 10 points in the dataset */
+ for (i = 0; i < DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK; j++)
+ points[(i * DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK) + j] = i;
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS,
+ points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select elements in dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ {
+ hsize_t mdims[] = {(hsize_t)DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints *
+ DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (memcmp(data, read_buf, data_size)) {
+ H5_FAILED();
+ HDprintf(" point selection data verification failed\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_point_sel_read);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can't be written to
+ * when H5Dwrite is passed invalid parameters.
+ */
+static int
+test_write_dataset_invalid_params(void)
+{
+ hssize_t space_npoints;
+ hsize_t dims[DATASET_WRITE_INVALID_PARAMS_TEST_DSET_SPACE_RANK] = {10, 5, 3};
+ herr_t err_ret = -1;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ void *data = NULL;
+
+ TESTING_MULTIPART("H5Dwrite with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_INVALID_PARAMS_TEST_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_NAME,
+ DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_INVALID_PARAMS_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL == (data = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPESIZE)))
+ TEST_ERROR;
+
+ for (i = 0; i < (hsize_t)space_npoints; i++)
+ ((int *)data)[i] = (int)i;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dwrite_invalid_dset_id)
+ {
+ TESTING_2("H5Dwrite with an invalid dataset ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dwrite(H5I_INVALID_HID, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL,
+ H5S_ALL, H5P_DEFAULT, data);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to dataset using H5Dwrite with an invalid dataset ID!\n");
+ PART_ERROR(H5Dwrite_invalid_dset_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_invalid_dset_id);
+
+ PART_BEGIN(H5Dwrite_invalid_datatype)
+ {
+ TESTING_2("H5Dwrite with an invalid memory datatype");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dwrite(dset_id, H5I_INVALID_HID, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to dataset using H5Dwrite with an invalid memory datatype!\n");
+ PART_ERROR(H5Dwrite_invalid_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_invalid_datatype);
+
+ PART_BEGIN(H5Dwrite_invalid_mem_dataspace)
+ {
+ TESTING_2("H5Dwrite with an invalid memory dataspace");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dwrite(dset_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5I_INVALID_HID,
+ H5S_ALL, H5P_DEFAULT, data);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to dataset using H5Dwrite with an invalid memory dataspace!\n");
+ PART_ERROR(H5Dwrite_invalid_mem_dataspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_invalid_mem_dataspace);
+
+ PART_BEGIN(H5Dwrite_invalid_file_dataspace)
+ {
+ TESTING_2("H5Dwrite with an invalid file dataspace");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dwrite(dset_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL,
+ H5I_INVALID_HID, H5P_DEFAULT, data);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to dataset using H5Dwrite with an invalid file dataspace!\n");
+ PART_ERROR(H5Dwrite_invalid_file_dataspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_invalid_file_dataspace);
+
+ PART_BEGIN(H5Dwrite_invalid_dxpl)
+ {
+ TESTING_2("H5Dwrite with an invalid DXPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dwrite(dset_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5I_INVALID_HID, data);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to dataset using H5Dwrite with an invalid DXPL!\n");
+ PART_ERROR(H5Dwrite_invalid_dxpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_invalid_dxpl);
+
+ PART_BEGIN(H5Dwrite_invalid_data_buf)
+ {
+ TESTING_2("H5Dwrite with an invalid data buffer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dwrite(dset_id, DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" wrote to dataset using H5Dwrite with an invalid data buffer!\n");
+ PART_ERROR(H5Dwrite_invalid_data_buf);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_invalid_data_buf);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that data is read back correctly from a dataset after it has
+ * been written, using type conversion with builtin types.
+ */
+static int
+test_dataset_builtin_type_conversion(void)
+{
+ hssize_t space_npoints;
+ hsize_t dims[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK] = {10, 10, 10};
+ hsize_t start[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK];
+ hsize_t stride[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK];
+ hsize_t block[DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK];
+ hsize_t points[DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS *
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t file_type_id = H5I_INVALID_HID;
+ H5T_order_t native_order;
+ void *data = NULL;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING_MULTIPART(
+ "verification of dataset data using H5Dwrite then H5Dread with type conversion of builtin types");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((native_order = H5Tget_order(DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get native byte order\n");
+ goto error;
+ }
+ if (native_order == H5T_ORDER_LE)
+ file_type_id = H5T_STD_I32BE;
+ else
+ file_type_id = H5T_STD_I32LE;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_DATA_BUILTIN_CONVERSION_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_DATA_VERIFY_WRITE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME, file_type_id,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; i++)
+ ((int *)data)[i] = (int)i;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dwrite_all_read)
+ {
+ TESTING_2("H5Dwrite then H5Dread with H5S_ALL selection");
+
+ if (H5Dwrite(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (NULL == (data = HDmalloc((hsize_t)space_npoints *
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ for (i = 0; i < (hsize_t)space_npoints; i++)
+ if (((int *)data)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" H5S_ALL selection data verification failed\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_all_read);
+
+ PART_BEGIN(H5Dwrite_hyperslab_read)
+ {
+ TESTING_2("H5Dwrite using hyperslab selection then H5Dread");
+
+ data_size = dims[1] * 2 * DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ for (i = 0; i < data_size / DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; i++)
+ ((int *)write_buf)[i] = 56;
+
+ for (i = 0, data_size = 1; i < DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset data verification\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ for (i = 0; i < 2; i++) {
+ size_t j;
+
+ for (j = 0; j < dims[1]; j++)
+ ((int *)data)[(i * dims[1] * dims[2]) + (j * dims[2])] = 56;
+ }
+
+ /* Write to first two rows of dataset */
+ start[0] = start[1] = start[2] = 0;
+ stride[0] = stride[1] = stride[2] = 1;
+ count[0] = 2;
+ count[1] = dims[1];
+ count[2] = 1;
+ block[0] = block[1] = block[2] = 1;
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ {
+ hsize_t mdims[] = {(hsize_t)2 * dims[1]};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints *
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (memcmp(data, read_buf, data_size)) {
+ H5_FAILED();
+ HDprintf(" hyperslab selection data verification failed\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_hyperslab_read);
+
+ PART_BEGIN(H5Dwrite_point_sel_read)
+ {
+ TESTING_2("H5Dwrite using point selection then H5Dread");
+
+ data_size = DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS *
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ for (i = 0; i < data_size / DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE; i++)
+ ((int *)write_buf)[i] = 13;
+
+ for (i = 0, data_size = 1; i < DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE;
+
+ if (NULL == (data = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset data verification\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, data) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ for (i = 0; i < dims[0]; i++) {
+ size_t j;
+
+ for (j = 0; j < dims[1]; j++) {
+ size_t k;
+
+ for (k = 0; k < dims[2]; k++) {
+ if (i == j && j == k)
+ ((int *)data)[(i * dims[1] * dims[2]) + (j * dims[2]) + k] = 13;
+ }
+ }
+ }
+
+ /* Select a series of 10 points in the dataset */
+ for (i = 0; i < DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK; j++)
+ points[(i * DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK) + j] = i;
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS,
+ points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select elements in dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ {
+ hsize_t mdims[] = {(hsize_t)DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints *
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (memcmp(data, read_buf, data_size)) {
+ H5_FAILED();
+ HDprintf(" point selection data verification failed\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_point_sel_read);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (data)
+ HDfree(data);
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that data is read back correctly from a dataset after it has
+ * been written, using partial element I/O with compound types
+ */
+typedef struct dataset_compount_partial_io_t {
+ int a;
+ int b;
+} dataset_compount_partial_io_t;
+
+static int
+test_dataset_compound_partial_io(void)
+{
+ hsize_t dims[1] = {DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS};
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t full_type_id = H5I_INVALID_HID;
+ hid_t a_type_id = H5I_INVALID_HID;
+ hid_t b_type_id = H5I_INVALID_HID;
+ dataset_compount_partial_io_t wbuf[DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS];
+ dataset_compount_partial_io_t rbuf[DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS];
+ dataset_compount_partial_io_t fbuf[DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS];
+ dataset_compount_partial_io_t erbuf[DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS];
+
+ TESTING_MULTIPART(
+ "verification of dataset data using H5Dwrite then H5Dread with partial element compound type I/O");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = H5Screate_simple(1, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((full_type_id = H5Tcreate(H5T_COMPOUND, sizeof(dataset_compount_partial_io_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(full_type_id, "a", HOFFSET(dataset_compount_partial_io_t, a), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(full_type_id, "b", HOFFSET(dataset_compount_partial_io_t, b), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((a_type_id = H5Tcreate(H5T_COMPOUND, sizeof(dataset_compount_partial_io_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(a_type_id, "a", HOFFSET(dataset_compount_partial_io_t, a), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((b_type_id = H5Tcreate(H5T_COMPOUND, sizeof(dataset_compount_partial_io_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(b_type_id, "b", HOFFSET(dataset_compount_partial_io_t, b), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_DSET_NAME, full_type_id,
+ space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_DSET_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(write_full_read_full)
+ {
+ TESTING_2("H5Dwrite then H5Dread with all compound members");
+
+ /* Initialize wbuf */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ wbuf[i].a = (int)(2 * i);
+ wbuf[i].b = (int)(2 * i + 1);
+ }
+
+ /* Write data */
+ if (H5Dwrite(dset_id, full_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ PART_TEST_ERROR(write_full_read_full);
+
+ /* Update fbuf to match file state */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ fbuf[i].a = wbuf[i].a;
+ fbuf[i].b = wbuf[i].b;
+ }
+
+ /* Initialize rbuf to -1 */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ rbuf[i].a = -1;
+ rbuf[i].b = -1;
+ }
+
+ /* Set erbuf (simply match file state since we're reading the whole
+ * thing) */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ erbuf[i].a = fbuf[i].a;
+ erbuf[i].b = fbuf[i].b;
+ }
+
+ /* Read data */
+ if (H5Dread(dset_id, full_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ PART_TEST_ERROR(write_full_read_full);
+
+ /* Verify data */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ if (rbuf[i].a != erbuf[i].a)
+ PART_TEST_ERROR(write_full_read_full);
+ if (rbuf[i].b != erbuf[i].b)
+ PART_TEST_ERROR(write_full_read_full);
+ }
+
+ PASSED();
+ }
+ PART_END(write_full_read_full);
+
+ PART_BEGIN(read_a)
+ {
+ TESTING_2("H5Dread with compound member a");
+
+ /* Initialize rbuf to -1 */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ rbuf[i].a = -1;
+ rbuf[i].b = -1;
+ }
+
+ /* Set erbuf (element a comes from the file, element b in untouched)
+ */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ erbuf[i].a = fbuf[i].a;
+ erbuf[i].b = rbuf[i].b;
+ }
+
+ /* Read data */
+ if (H5Dread(dset_id, a_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ PART_TEST_ERROR(read_a);
+
+ /* Verify data */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ if (rbuf[i].a != erbuf[i].a)
+ PART_TEST_ERROR(read_a);
+ if (rbuf[i].b != erbuf[i].b)
+ PART_TEST_ERROR(read_a);
+ }
+
+ PASSED();
+ }
+ PART_END(read_a);
+
+ PART_BEGIN(write_b_read_full)
+ {
+ TESTING_2("H5Dwrite with compound member b then H5Dread with all compound members");
+
+ /* Initialize wbuf */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ wbuf[i].a = (int)(2 * DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS + 2 * i);
+ wbuf[i].b = (int)(2 * DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS + 2 * i + 1);
+ }
+
+ /* Write data */
+ if (H5Dwrite(dset_id, b_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ PART_TEST_ERROR(write_full_read_full);
+
+ /* Update fbuf to match file state - only element b was updated */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ fbuf[i].b = wbuf[i].b;
+ }
+
+ /* Initialize rbuf to -1 */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ rbuf[i].a = -1;
+ rbuf[i].b = -1;
+ }
+
+ /* Set erbuf (simply match file state since we're reading the whole
+ * thing) */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ erbuf[i].a = fbuf[i].a;
+ erbuf[i].b = fbuf[i].b;
+ }
+
+ /* Read data */
+ if (H5Dread(dset_id, full_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf) < 0)
+ PART_TEST_ERROR(write_b_read_full);
+
+ /* Verify data */
+ for (i = 0; i < DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS; i++) {
+ if (rbuf[i].a != erbuf[i].a)
+ PART_TEST_ERROR(write_b_read_full);
+ if (rbuf[i].b != erbuf[i].b)
+ PART_TEST_ERROR(write_b_read_full);
+ }
+
+ PASSED();
+ }
+ PART_END(write_b_read_full);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(full_type_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(a_type_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(b_type_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ H5Tclose(full_type_id);
+ H5Tclose(a_type_id);
+ H5Tclose(b_type_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a chunked dataset's extent can be
+ * changed by using H5Dset_extent. This test uses unlimited
+ * dimensions for the dataset, so the dimensionality of the
+ * dataset may both shrink and grow.
+ */
+static int
+test_dataset_set_extent_chunked_unlimited(void)
+{
+ hsize_t dims[DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK];
+ hsize_t max_dims[DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK];
+ hsize_t chunk_dims[DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK];
+ hsize_t new_dims[DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK];
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("H5Dset_extent on chunked dataset with unlimited dimensions");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; i++) {
+ max_dims[i] = H5S_UNLIMITED;
+ chunk_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK, max_dims,
+ dims, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK, chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to set dataset chunk dimensionality\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME, dset_dtype,
+ fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_NUM_PASSES; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; j++) {
+ /* Ensure that the new dimensionality doesn't match the old dimensionality. */
+ do {
+ new_dims[j] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ } while (new_dims[j] == dims[j]);
+ }
+
+ if (H5Dset_extent(dset_id, new_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set dataset extent\n");
+ goto error;
+ }
+
+ /* Retrieve the new dimensions of the dataset and ensure they
+ * are different from the original.
+ */
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataset's dataspace\n");
+ goto error;
+ }
+
+ if (H5Sget_simple_extent_dims(fspace_id, new_dims, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataset dimensionality\n");
+ goto error;
+ }
+
+ /*
+ * Make sure the dimensions have been changed.
+ */
+ for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; j++) {
+ if (dims[j] == new_dims[j]) {
+ H5_FAILED();
+ HDprintf(" dataset dimension %llu wasn't changed!\n", (unsigned long long)j);
+ goto error;
+ }
+ }
+
+ /*
+ * Remember the current dimensionality of the dataset before
+ * changing them again.
+ */
+ HDmemcpy(dims, new_dims, sizeof(new_dims));
+ }
+
+ /*
+ * Now close and re-open the dataset each pass to check the persistence
+ * of the changes to the dataset's dimensionality.
+ */
+ for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_NUM_PASSES; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; j++) {
+ /* Ensure that the new dimensionality doesn't match the old dimensionality. */
+ do {
+ new_dims[j] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ } while (new_dims[j] == dims[j]);
+ }
+
+ if (H5Dset_extent(dset_id, new_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set dataset extent\n");
+ goto error;
+ }
+
+ /* Retrieve the new dimensions of the dataset and ensure they
+ * are different from the original.
+ */
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n",
+ DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataset's dataspace\n");
+ goto error;
+ }
+
+ if (H5Sget_simple_extent_dims(fspace_id, new_dims, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataset dimensionality\n");
+ goto error;
+ }
+
+ /*
+ * Make sure the dimensions have been changed.
+ */
+ for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK; j++) {
+ if (dims[j] == new_dims[j]) {
+ H5_FAILED();
+ HDprintf(" dataset dimension %llu wasn't changed!\n", (unsigned long long)j);
+ goto error;
+ }
+ }
+
+ /*
+ * Remember the current dimensionality of the dataset before
+ * changing them again.
+ */
+ HDmemcpy(dims, new_dims, sizeof(new_dims));
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a chunked dataset's extent can be
+ * changed by using H5Dset_extent. This test uses fixed-size
+ * dimensions for the dataset, so the dimensionality of the
+ * dataset may only shrink.
+ */
+static int
+test_dataset_set_extent_chunked_fixed(void)
+{
+ hsize_t dims[DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK];
+ hsize_t dims2[DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK];
+ hsize_t chunk_dims[DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK];
+ hsize_t new_dims[DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK];
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID, fspace_id2 = H5I_INVALID_HID;
+
+ TESTING("H5Dset_extent on chunked dataset with fixed dimensions");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; i++) {
+ dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ dims2[i] = dims[i];
+ do {
+ chunk_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ } while (chunk_dims[i] > dims[i]);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+ if ((fspace_id2 = H5Screate_simple(DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK, dims2, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK, chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to set dataset chunk dimensionality\n");
+ goto error;
+ }
+
+ /*
+ * NOTE: Since shrinking the dimension size can quickly end in a situation
+ * where the dimensions are of size 1 and we can't shrink them further, we
+ * use two datasets here to ensure the second test can run at least once.
+ */
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME, dset_dtype,
+ fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((dset_id2 = H5Dcreate2(group_id, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2, dset_dtype,
+ fspace_id2, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_NUM_PASSES; i++) {
+ hbool_t skip_iterations = FALSE;
+ size_t j;
+
+ for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; j++) {
+ /* Ensure that the new dimensionality is less than the old dimensionality. */
+ do {
+ if (dims[j] == 1) {
+ skip_iterations = TRUE;
+ break;
+ }
+ else
+ new_dims[j] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ } while (new_dims[j] >= dims[j]);
+ }
+
+ /*
+ * If we've shrunk one of the dimensions to size 1, skip the rest of
+ * the iterations.
+ */
+ if (skip_iterations)
+ break;
+
+ if (H5Dset_extent(dset_id, new_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set dataset extent\n");
+ goto error;
+ }
+
+ /* Retrieve the new dimensions of the dataset and ensure they
+ * are different from the original.
+ */
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataset's dataspace\n");
+ goto error;
+ }
+
+ if (H5Sget_simple_extent_dims(fspace_id, new_dims, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataset dimensionality\n");
+ goto error;
+ }
+
+ /*
+ * Make sure the dimensions have been changed.
+ */
+ for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; j++) {
+ if (dims[j] == new_dims[j]) {
+ H5_FAILED();
+ HDprintf(" dataset dimension %llu wasn't changed!\n", (unsigned long long)j);
+ goto error;
+ }
+ }
+
+ /*
+ * Remember the current dimensionality of the dataset before
+ * changing them again.
+ */
+ HDmemcpy(dims, new_dims, sizeof(new_dims));
+ }
+
+ /*
+ * Now close and re-open the dataset each pass to check the persistence
+ * of the changes to the dataset's dimensionality.
+ */
+ for (i = 0; i < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_NUM_PASSES; i++) {
+ hbool_t skip_iterations = FALSE;
+ size_t j;
+
+ for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; j++) {
+ /* Ensure that the new dimensionality is less than the old dimensionality. */
+ do {
+ if (dims2[j] == 1) {
+ skip_iterations = TRUE;
+ break;
+ }
+ else
+ new_dims[j] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ } while (new_dims[j] >= dims2[j]);
+ }
+
+ /*
+ * If we've shrunk one of the dimensions to size 1, skip the rest of
+ * the iterations.
+ */
+ if (skip_iterations)
+ break;
+
+ if (H5Dset_extent(dset_id2, new_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set dataset extent2\n");
+ goto error;
+ }
+
+ /* Retrieve the new dimensions of the dataset and ensure they
+ * are different from the original.
+ */
+ if (H5Sclose(fspace_id2) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+
+ if ((dset_id2 = H5Dopen2(group_id, DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ if ((fspace_id2 = H5Dget_space(dset_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataset's dataspace\n");
+ goto error;
+ }
+
+ if (H5Sget_simple_extent_dims(fspace_id2, new_dims, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataset dimensionality\n");
+ goto error;
+ }
+
+ /*
+ * Make sure the dimensions have been changed.
+ */
+ for (j = 0; j < DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK; j++) {
+ if (dims2[j] == new_dims[j]) {
+ H5_FAILED();
+ HDprintf(" dataset dimension %llu wasn't changed!\n", (unsigned long long)j);
+ goto error;
+ }
+ }
+
+ /*
+ * Remember the current dimensionality of the dataset before
+ * changing them again.
+ */
+ HDmemcpy(dims2, new_dims, sizeof(new_dims));
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id2) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(fspace_id);
+ H5Sclose(fspace_id2);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Dclose(dset_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the data is correct after expanding
+ * and shrinking the dataset with H5Dset_extent
+ */
+static int
+test_dataset_set_extent_data(void)
+{
+ hsize_t dims_origin[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = {DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM,
+ DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM};
+ hsize_t dims_expand[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = {
+ DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM * 2 - 1, DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM * 2 - 1};
+ hsize_t dims_shrink[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = {
+ DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM / 2 + 1, DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM / 2 + 1};
+ hsize_t dims_chunk[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = {DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM,
+ DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM};
+ hsize_t dims_max[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t dims_out[DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID, dset_space_id = H5I_INVALID_HID;
+ int buf_origin[DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM][DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM];
+#ifndef NO_CLEAR_ON_SHRINK
+ int buf_expand2[DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM][DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM];
+#endif
+ int buf_expand[DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM * 2 - 1]
+ [DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM * 2 - 1];
+ int buf_shrink[DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM / 2 + 1]
+ [DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM / 2 + 1];
+ int i, j;
+
+ TESTING_MULTIPART("H5Dset_extent on data correctness");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_DATA_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SET_EXTENT_DATA_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK, dims_origin, dims_max)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK, dims_chunk) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to set dataset chunk dimensionality\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_DATA_TEST_DSET_NAME, H5T_NATIVE_INT, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_DATA_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM; i++)
+ for (j = 0; j < DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM; j++)
+ buf_origin[i][j] = i + j;
+
+ /* Write the original data
+ * X X X X X X X X
+ * X X X X X X X X
+ * X X X X X X X X
+ * X X X X X X X X
+ * X X X X X X X X
+ * X X X X X X X X
+ * X X X X X X X X
+ * X X X X X X X X
+ */
+ if (H5Dwrite(dset_id, H5T_NATIVE_INT, fspace_id, H5S_ALL, H5P_DEFAULT, buf_origin) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dset_extent_data_expand)
+ {
+ TESTING_2("H5Dset_extent for data expansion");
+
+ /* Expand the dataset. The extended space should be initialized with the
+ * the default value (0)
+ * X X X X X X X X 0 0 0 0 0 0 0
+ * X X X X X X X X 0 0 0 0 0 0 0
+ * X X X X X X X X 0 0 0 0 0 0 0
+ * X X X X X X X X 0 0 0 0 0 0 0
+ * X X X X X X X X 0 0 0 0 0 0 0
+ * X X X X X X X X 0 0 0 0 0 0 0
+ * X X X X X X X X 0 0 0 0 0 0 0
+ * X X X X X X X X 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ */
+ if (H5Dset_extent(dset_id, dims_expand) < 0)
+ PART_ERROR(H5Dset_extent_data_expand);
+
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_expand) < 0)
+ PART_ERROR(H5Dset_extent_data_expand);
+
+ /* compare the expanded data */
+ for (i = 0; i < (int)dims_expand[0]; i++) {
+ for (j = 0; j < (int)dims_expand[1]; j++) {
+ if (i >= (int)dims_origin[0] || j >= (int)dims_origin[1]) {
+ if (buf_expand[i][j] != 0) {
+ H5_FAILED();
+ HDprintf(" buf_expand[%d][%d] = %d. It should be 0\n", i, j, buf_expand[i][j]);
+ PART_ERROR(H5Dset_extent_data_expand);
+ }
+ }
+ else {
+ if (buf_expand[i][j] != buf_origin[i][j]) {
+ H5_FAILED();
+ HDprintf(" buf_expand[%d][%d] = %d. It should be %d\n", i, j, buf_expand[i][j],
+ buf_origin[i][j]);
+ PART_ERROR(H5Dset_extent_data_expand);
+ }
+ }
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dset_extent_data_expand);
+
+ PART_BEGIN(H5Dset_extent_data_shrink)
+ {
+ TESTING_2("H5Dset_extent for data shrinking");
+
+ /* Shrink the dataset.
+ * X X X X X
+ * X X X X X
+ * X X X X X
+ * X X X X X
+ * X X X X X
+ */
+ if (H5Dset_extent(dset_id, dims_shrink) < 0)
+ PART_ERROR(H5Dset_extent_data_shrink);
+
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_shrink) < 0)
+ PART_ERROR(H5Dset_extent_data_shrink);
+
+ /* compare the shrunk data */
+ for (i = 0; i < (int)dims_shrink[0]; i++) {
+ for (j = 0; j < (int)dims_shrink[1]; j++) {
+ if (buf_shrink[i][j] != buf_origin[i][j]) {
+ H5_FAILED();
+ HDprintf(" buf_shrink[%d][%d] = %d. It should be %d\n", i, j, buf_shrink[i][j],
+ buf_origin[i][j]);
+ PART_ERROR(H5Dset_extent_data_shrink);
+ }
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dset_extent_data_shrink);
+
+ PART_BEGIN(H5Dset_extent_data_expand_to_origin)
+ {
+ TESTING_2("H5Dset_extent for data back to the original size");
+#ifndef NO_CLEAR_ON_SHRINK
+ /* Expand the dataset back to the original size. The data should look like this:
+ * X X X X X 0 0 0
+ * X X X X X 0 0 0
+ * X X X X X 0 0 0
+ * X X X X X 0 0 0
+ * X X X X X 0 0 0
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ */
+ if (H5Dset_extent(dset_id, dims_origin) < 0)
+ PART_ERROR(H5Dset_extent_data_expand_to_origin);
+
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_expand2) < 0)
+ PART_ERROR(H5Dset_extent_data_expand_to_origin);
+
+ /* compare the expanded data */
+ for (i = 0; i < (int)dims_origin[0]; i++) {
+ for (j = 0; j < (int)dims_origin[1]; j++) {
+ if (i >= (int)dims_shrink[0] || j >= (int)dims_shrink[1]) {
+ if (buf_expand2[i][j] != 0) {
+ H5_FAILED();
+ HDprintf(" buf_expand2[%d][%d] = %d. It should be 0\n", i, j,
+ buf_expand2[i][j]);
+ PART_ERROR(H5Dset_extent_data_expand_to_origin);
+ }
+ }
+ else {
+ if (buf_expand2[i][j] != buf_origin[i][j]) {
+ H5_FAILED();
+ HDprintf(" buf_expand2[%d][%d] = %d. It should be %d.\n", i, j,
+ buf_expand2[i][j], buf_origin[i][j]);
+ PART_ERROR(H5Dset_extent_data_expand_to_origin);
+ }
+ }
+ }
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Dset_extent_data_expand_to_origin);
+#endif
+ }
+ PART_END(H5Dset_extent_data_expand_to_origin);
+
+ PART_BEGIN(H5Dset_extent_data_shrink_to_zero)
+ {
+ TESTING_2("H5Dset_extent for data shrink to zero size");
+
+ /* Shrink the dimensions to 0 and verify it */
+ dims_shrink[0] = dims_shrink[1] = 0;
+
+ if (H5Dset_extent(dset_id, dims_shrink) < 0)
+ PART_ERROR(H5Dset_extent_data_shrink_to_zero);
+
+ /* get the space */
+ if ((dset_space_id = H5Dget_space(dset_id)) < 0)
+ PART_ERROR(H5Dset_extent_data_shrink_to_zero);
+
+ /* get dimensions */
+ if (H5Sget_simple_extent_dims(dset_space_id, dims_out, NULL) < 0)
+ PART_ERROR(H5Dset_extent_data_shrink_to_zero);
+
+ if (H5Sclose(dset_space_id) < 0)
+ PART_ERROR(H5Dset_extent_data_shrink_to_zero);
+
+ /* Verify the dimensions are 0 */
+ for (i = 0; i < DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK; i++)
+ if (dims_out[i] != 0) {
+ H5_FAILED();
+ HDprintf(" dims_out[%d] = %llu. It should be 0.\n", i,
+ (long long unsigned int)dims_out[i]);
+ PART_ERROR(H5Dset_extent_data_shrink_to_zero);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dset_extent_data_shrink_to_zero);
+
+ PART_BEGIN(H5Dset_extent_data_expand_to_origin_again)
+ {
+ TESTING_2("H5Dset_extent for data expansion back to the original again");
+#ifndef NO_CLEAR_ON_SHRINK
+ /* Expand the dataset back to the original size. The data should look like this:
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ * 0 0 0 0 0 0 0 0
+ */
+ if (H5Dset_extent(dset_id, dims_origin) < 0)
+ PART_ERROR(H5Dset_extent_data_expand_to_origin_again);
+
+ if (H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf_expand2) < 0)
+ PART_ERROR(H5Dset_extent_data_expand_to_origin_again);
+
+ /* The data should be all zeros */
+ for (i = 0; i < (int)dims_origin[0]; i++) {
+ for (j = 0; j < (int)dims_origin[1]; j++) {
+ if (buf_expand2[i][j] != 0) {
+ H5_FAILED();
+ HDprintf(" buf_expand2[%d][%d] = %d. It should be 0.\n", i, j, buf_expand2[i][j]);
+ PART_ERROR(H5Dset_extent_data_expand_to_origin_again);
+ }
+ }
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Dset_extent_data_expand_to_origin_again);
+#endif
+ }
+ PART_END(H5Dset_extent_data_expand_to_origin_again);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(fspace_id);
+ H5Sclose(dset_space_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_set_extent_data */
+
+/*
+ * If a dataset is opened twice and one of the handles is
+ * used to extend the dataset, then the other handle should
+ * return the new size when queried.
+ */
+static int
+test_dataset_set_extent_double_handles(void)
+{
+#ifndef NO_DOUBLE_OBJECT_OPENS
+ hsize_t dims_origin[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK] = {
+ DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM};
+ hsize_t dims_expand[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK] = {
+ DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM * 2,
+ DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM * 2};
+ hsize_t dims_chunk[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK] = {
+ DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM / 2,
+ DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM / 2};
+ hsize_t dims_max[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t dims_out[DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID, dset_space_id = H5I_INVALID_HID;
+ int i;
+#endif
+
+ TESTING("H5Dset_extent on double dataset handles");
+
+#ifndef NO_DOUBLE_OBJECT_OPENS
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SET_EXTENT_DATA_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id =
+ H5Screate_simple(DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK, dims_origin, dims_max)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK, dims_chunk) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to set dataset chunk dimensionality\n");
+ goto error;
+ }
+
+ /* Create the dataset */
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME, H5T_NATIVE_INT,
+ fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /* Open the same dataset again */
+ if ((dset_id2 = H5Dopen2(group_id, DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /* Expand the dataset's dimensions with the first dataset handle */
+ if (H5Dset_extent(dset_id, dims_expand) < 0)
+ TEST_ERROR;
+
+ /* Get the data space with the second dataset handle */
+ if ((dset_space_id = H5Dget_space(dset_id2)) < 0)
+ TEST_ERROR;
+
+ /* Get the dimensions with the second dataset handle */
+ if (H5Sget_simple_extent_dims(dset_space_id, dims_out, NULL) < 0)
+ TEST_ERROR;
+
+ if (H5Sclose(dset_space_id) < 0)
+ TEST_ERROR;
+
+ for (i = 0; i < DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK; i++)
+ if (dims_out[i] != dims_expand[i]) {
+ H5_FAILED();
+ HDprintf(" dims_out[%d] = %d. It should be %d.\n", i, dims_out[i], dims_expand[i]);
+ goto error;
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(fspace_id);
+ H5Sclose(dset_space_id);
+ H5Dclose(dset_id);
+ H5Dclose(dset_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+} /* test_dataset_set_extent_double_handles */
+
+/*
+ * A test to check that a dataset's extent can't be
+ * changed when H5Dset_extent is passed invalid parameters.
+ */
+static int
+test_dataset_set_extent_invalid_params(void)
+{
+ hsize_t dims[DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK];
+ hsize_t chunk_dims[DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK];
+ hsize_t new_dims[DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK];
+ hsize_t compact_dims[DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK] = {3, 3};
+ size_t i;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t chunked_dset_id = H5I_INVALID_HID, compact_dset_id = H5I_INVALID_HID,
+ contiguous_dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t chunked_dcpl_id = H5I_INVALID_HID, compact_dcpl_id = H5I_INVALID_HID,
+ contiguous_dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID, compact_fspace_id = H5I_INVALID_HID;
+ char vol_name[5];
+
+ TESTING_MULTIPART("H5Dset_extent with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic or more dataset aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ /** for DAOS VOL, this test is problematic since auto chunking can be selected, so skip for now */
+ if (H5VLget_connector_name(file_id, vol_name, 5) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get VOL connector name\n");
+ goto error;
+ }
+ if (strcmp(vol_name, "daos") == 0) {
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ SKIPPED();
+ return 0;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SET_EXTENT_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_SET_EXTENT_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK, NULL, dims,
+ FALSE)) < 0)
+ TEST_ERROR;
+
+ for (i = 0; i < DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK; i++) {
+ do {
+ new_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ } while (new_dims[i] > dims[i]);
+ do {
+ chunk_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+ } while (chunk_dims[i] > dims[i]);
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ /* Create a compact dataset */
+ if ((compact_dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_layout(compact_dcpl_id, H5D_COMPACT) < 0)
+ TEST_ERROR;
+
+ /* Keep the data space small because the storage size of compact dataset is limited to 64K */
+ if ((compact_fspace_id =
+ H5Screate_simple(DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK, compact_dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((compact_dset_id =
+ H5Dcreate2(group_id, DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_COMPACT_DSET_NAME, H5T_NATIVE_INT,
+ compact_fspace_id, H5P_DEFAULT, compact_dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_COMPACT_DSET_NAME);
+ goto error;
+ }
+
+ /* Create a contiguous dataset */
+ if ((contiguous_dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_layout(contiguous_dcpl_id, H5D_CONTIGUOUS) < 0)
+ TEST_ERROR;
+
+ if ((contiguous_dset_id =
+ H5Dcreate2(group_id, DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_CONTIGUOUS_DSET_NAME, dset_dtype,
+ fspace_id, H5P_DEFAULT, contiguous_dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_CONTIGUOUS_DSET_NAME);
+ goto error;
+ }
+
+ /* Create a chunked dataset */
+ if ((chunked_dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(chunked_dcpl_id, DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK, chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to set dataset chunk dimensionality\n");
+ goto error;
+ }
+
+ if ((chunked_dset_id = H5Dcreate2(group_id, DATASET_SET_EXTENT_INVALID_PARAMS_TEST_DSET_NAME, dset_dtype,
+ fspace_id, H5P_DEFAULT, chunked_dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SET_EXTENT_INVALID_PARAMS_TEST_DSET_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dset_extent_invalid_layout_compact)
+ {
+ TESTING_2("H5Dset_extent with an invalid dataset layout (compact)");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dset_extent(compact_dset_id, new_dims);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" setting dataset extent succeeded with an invalid layout (compact)\n");
+ PART_ERROR(H5Dset_extent_invalid_layout_compact);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dset_extent_invalid_layout_compact);
+
+ PART_BEGIN(H5Dset_extent_invalid_layout_contiguous)
+ {
+ TESTING_2("H5Dset_extent with an invalid dataset layout (contiguous)");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dset_extent(contiguous_dset_id, new_dims);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" setting dataset extent succeeded with an invalid layout (contiguous)\n");
+ PART_ERROR(H5Dset_extent_invalid_layout_contiguous);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dset_extent_invalid_layout_contiguous);
+
+ PART_BEGIN(H5Dset_extent_invalid_dset_id)
+ {
+ TESTING_2("H5Dset_extent with an invalid dataset ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dset_extent(H5I_INVALID_HID, new_dims);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" setting dataset extent succeeded with an invalid dataset ID\n");
+ PART_ERROR(H5Dset_extent_invalid_dset_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dset_extent_invalid_dset_id);
+
+ PART_BEGIN(H5Dset_extent_null_dim_pointer)
+ {
+ TESTING_2("H5Dset_extent with NULL dimension pointer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Dset_extent(chunked_dset_id, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" setting dataset extent succeeded with a NULL dimension pointer\n");
+ PART_ERROR(H5Dset_extent_null_dim_pointer);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dset_extent_null_dim_pointer);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(chunked_dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(compact_dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(contiguous_dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(compact_fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(chunked_dset_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(compact_dset_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(contiguous_dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(chunked_dcpl_id);
+ H5Pclose(compact_dcpl_id);
+ H5Pclose(contiguous_dcpl_id);
+ H5Sclose(fspace_id);
+ H5Sclose(compact_fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(chunked_dset_id);
+ H5Dclose(compact_dset_id);
+ H5Dclose(contiguous_dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* test_dataset_set_extent_invalid_params */
+
+/*
+ * A test for H5Dflush.
+ */
+static int
+test_flush_dataset(void)
+{
+ TESTING("H5Dflush");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that H5Dflush fails when it is
+ * passed invalid parameters.
+ */
+static int
+test_flush_dataset_invalid_params(void)
+{
+ TESTING("H5Dflush with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test for H5Drefresh.
+ */
+static int
+test_refresh_dataset(void)
+{
+ TESTING("H5Drefresh");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that H5Drefresh fails when it is
+ * passed invalid parameters.
+ */
+static int
+test_refresh_dataset_invalid_params(void)
+{
+ TESTING("H5Drefresh");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to create a dataset composed of a single chunk.
+ */
+static int
+test_create_single_chunk_dataset(void)
+{
+ hsize_t dims[DATASET_SINGLE_CHUNK_TEST_SPACE_RANK];
+ hsize_t retrieved_chunk_dims[DATASET_SINGLE_CHUNK_TEST_SPACE_RANK];
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("creation of dataset with single chunk");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SINGLE_CHUNK_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_SINGLE_CHUNK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_SINGLE_CHUNK_TEST_SPACE_RANK, NULL, dims, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_SINGLE_CHUNK_TEST_SPACE_RANK, dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SINGLE_CHUNK_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SINGLE_CHUNK_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_SINGLE_CHUNK_TEST_SPACE_RANK, retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SINGLE_CHUNK_TEST_SPACE_RANK; i++) {
+ if (dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ /*
+ * Now close the dataset and retrieve a copy
+ * of the DCPL after re-opening it.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close dataset\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_SINGLE_CHUNK_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open dataset\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_SINGLE_CHUNK_TEST_SPACE_RANK, retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SINGLE_CHUNK_TEST_SPACE_RANK; i++) {
+ if (dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a single-chunk dataset can be written
+ * and read correctly.
+ */
+static int
+test_write_single_chunk_dataset(void)
+{
+ hssize_t space_npoints;
+ hsize_t dims[DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK];
+ hsize_t retrieved_chunk_dims[DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with single chunk");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_SINGLE_CHUNK_WRITE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_SINGLE_CHUNK_WRITE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK, NULL, dims,
+ FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK, dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME,
+ DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK, retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK; i++) {
+ if (dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ for (i = 0; i < data_size / DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPESIZE; i++)
+ ((int *)write_buf)[i] = (int)i;
+
+ if (H5Dwrite(dset_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPESIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (hsize_t)space_npoints; i++)
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ H5Pclose(dcpl_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to create a dataset composed of multiple chunks.
+ */
+static int
+test_create_multi_chunk_dataset(void)
+{
+ hsize_t dims[DATASET_MULTI_CHUNK_TEST_SPACE_RANK] = {100, 100};
+ hsize_t chunk_dims[DATASET_MULTI_CHUNK_TEST_SPACE_RANK] = {10, 10};
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_TEST_SPACE_RANK];
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("creation of dataset with multiple chunks");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_MULTI_CHUNK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_TEST_SPACE_RANK, chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_MULTI_CHUNK_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_TEST_SPACE_RANK, retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_TEST_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ /*
+ * Now close the dataset and retrieve a copy
+ * of the DCPL after re-opening it.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close dataset\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open dataset\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_TEST_SPACE_RANK, retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_TEST_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly. When reading back the
+ * chunks of the dataset, the file dataspace and memory dataspace
+ * used are the same shape.
+ */
+static int
+test_write_multi_chunk_dataset_same_shape_read(void)
+{
+ hsize_t dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK] = {100, 100};
+ hsize_t chunk_dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK] = {10, 10};
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[10][10];
+
+ TESTING("write to dataset with multiple chunks using same shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, dims,
+ NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, chunk_dims) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id =
+ H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Create 2-dimensional memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]};
+
+ if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ /*
+ * Read every chunk in the dataset, checking the data for each one.
+ */
+ HDprintf("\n");
+ for (i = 0; i < data_size / chunk_size; i++) {
+ size_t j, k;
+
+ HDprintf("\r Reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (dims[j] == chunk_dims[j])
+ start[j] = 0;
+ else if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++)
+ for (k = 0; k < chunk_dims[1]; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++) {
+ for (k = 0; k < chunk_dims[1]; k++) {
+ if (read_buf[j][k] != (int)((j * chunk_dims[0]) + k + i)) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly. When reading back the
+ * chunks of the dataset, the file dataspace and memory dataspace
+ * used are differently shaped.
+ */
+static int
+test_write_multi_chunk_dataset_diff_shape_read(void)
+{
+ hsize_t dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK] = {100, 100};
+ hsize_t chunk_dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK] = {10, 10};
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with multiple chunks using differently shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, dims,
+ NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, chunk_dims) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id =
+ H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Allocate single chunk-sized read buffer.
+ */
+ if (NULL == (read_buf = HDmalloc(chunk_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ /*
+ * Create 1-dimensional memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {chunk_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ /*
+ * Read every chunk in the dataset, checking the data for each one.
+ */
+ HDprintf("\n");
+ for (i = 0; i < data_size / chunk_size; i++) {
+ size_t j;
+
+ HDprintf("\r Reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (dims[j] == chunk_dims[j])
+ start[j] = 0;
+ else if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ memset(read_buf, 0, chunk_size);
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < (hsize_t)chunk_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+ j++)
+ if (((int *)read_buf)[j] != (int)(j + i)) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly several times in a row.
+ * When reading back the chunks of the dataset, the file
+ * dataspace and memory dataspace used are the same shape.
+ */
+static int
+test_overwrite_multi_chunk_dataset_same_shape_read(void)
+{
+ hsize_t dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK] = {100, 100};
+ hsize_t chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK] = {10, 10};
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size;
+ size_t niter;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[10][10];
+
+ TESTING("several overwrites to dataset with multiple chunks using same shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ /*
+ * Create 2-dimensional memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]};
+
+ if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ HDprintf("\n");
+ for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS; niter++) {
+ memset(write_buf, 0, data_size);
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on. On each iteration, we add 1 to the previous
+ * values.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i; j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust +=
+ (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust + niter);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Read every chunk in the dataset, checking the data for each one.
+ */
+ for (i = 0; i < data_size / chunk_size; i++) {
+ size_t j, k;
+
+ HDprintf("\r Reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (dims[j] == chunk_dims[j])
+ start[j] = 0;
+ else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++)
+ for (k = 0; k < chunk_dims[1]; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id,
+ fspace_id, H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++) {
+ for (k = 0; k < chunk_dims[1]; k++) {
+ if (read_buf[j][k] != (int)((j * chunk_dims[0]) + k + i + niter)) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly several times in a row.
+ * When reading back the chunks of the dataset, the file
+ * dataspace and memory dataspace used are differently shaped.
+ */
+static int
+test_overwrite_multi_chunk_dataset_diff_shape_read(void)
+{
+ hsize_t dims[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK] = {100, 100};
+ hsize_t chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK] = {10, 10};
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size;
+ size_t niter;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("several overwrites to dataset with multiple chunks using differently shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size)))
+ TEST_ERROR;
+
+ /*
+ * Allocate single chunk-sized read buffer.
+ */
+ if (NULL == (read_buf = HDmalloc(chunk_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ /*
+ * Create 1-dimensional memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {chunk_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ HDprintf("\n");
+ for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS; niter++) {
+ memset(write_buf, 0, data_size);
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on. On each iteration, we add 1 to the previous
+ * values.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust +=
+ (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust + niter);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Read every chunk in the dataset, checking the data for each one.
+ */
+ for (i = 0; i < data_size / chunk_size; i++) {
+ size_t j;
+
+ HDprintf("\r Reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (dims[j] == chunk_dims[j])
+ start[j] = 0;
+ else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ memset(read_buf, 0, chunk_size);
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id,
+ fspace_id, H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0;
+ j < (hsize_t)chunk_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+ j++)
+ if (((int *)read_buf)[j] != (int)(j + i + niter)) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a partial chunk can be written and
+ * then read correctly when the selection used in a chunked
+ * dataset's file dataspace is H5S_ALL.
+ */
+#define FIXED_DIMSIZE 25
+#define FIXED_CHUNK_DIMSIZE 10
+static int
+test_read_partial_chunk_all_selection(void)
+{
+ DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE write_buf[FIXED_DIMSIZE][FIXED_DIMSIZE];
+ DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE read_buf[FIXED_DIMSIZE][FIXED_DIMSIZE];
+ hsize_t dims[DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK] = {FIXED_DIMSIZE, FIXED_DIMSIZE};
+ hsize_t chunk_dims[DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK] = {FIXED_CHUNK_DIMSIZE,
+ FIXED_CHUNK_DIMSIZE};
+ hsize_t retrieved_chunk_dims[DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK];
+ size_t i, j;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("reading a partial chunk using H5S_ALL for file dataspace");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK, dims, NULL)) <
+ 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK, chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_NAME,
+ DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK, retrieved_chunk_dims) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < FIXED_DIMSIZE; i++)
+ for (j = 0; j < FIXED_DIMSIZE; j++)
+ write_buf[i][j] = (DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE)((i * FIXED_DIMSIZE) + j);
+
+ for (i = 0; i < FIXED_DIMSIZE; i++)
+ for (j = 0; j < FIXED_DIMSIZE; j++)
+ read_buf[i][j] = -1;
+
+ if (H5Dwrite(dset_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to write to dataset\n");
+ goto error;
+ }
+
+ /*
+ * Close and re-open the dataset to ensure that the data is written.
+ */
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if ((dset_id = H5Dopen2(group_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open dataset\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to read from dataset\n");
+ goto error;
+ }
+
+ for (i = 0; i < FIXED_DIMSIZE; i++)
+ for (j = 0; j < FIXED_DIMSIZE; j++)
+ if (read_buf[i][j] != (int)((i * FIXED_DIMSIZE) + j)) {
+ H5_FAILED();
+ HDprintf(
+ " data verification failed for read buffer element %lld: expected %lld but was %lld\n",
+ (long long)((i * FIXED_DIMSIZE) + j), (long long)((i * FIXED_DIMSIZE) + j),
+ (long long)read_buf[i][j]);
+ goto error;
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef FIXED_DIMSIZE
+#undef FIXED_CHUNK_DIMSIZE
+
+/*
+ * A test to ensure that a partial chunk can be written and
+ * then read correctly when the selection used in a chunked
+ * dataset's file dataspace is a hyperslab selection.
+ */
+#define FIXED_DIMSIZE 25
+#define FIXED_CHUNK_DIMSIZE 10
+#define FIXED_NCHUNKS 9 /* For convenience - make sure to adjust this as necessary */
+static int
+test_read_partial_chunk_hyperslab_selection(void)
+{
+ DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_CTYPE write_buf[FIXED_CHUNK_DIMSIZE][FIXED_CHUNK_DIMSIZE];
+ DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_CTYPE read_buf[FIXED_CHUNK_DIMSIZE][FIXED_CHUNK_DIMSIZE];
+ hsize_t dims[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK] = {FIXED_DIMSIZE, FIXED_DIMSIZE};
+ hsize_t chunk_dims[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK] = {FIXED_CHUNK_DIMSIZE,
+ FIXED_CHUNK_DIMSIZE};
+ hsize_t retrieved_chunk_dims[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK];
+ size_t i, j, k;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("reading a partial chunk using a hyperslab selection in file dataspace");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or get property list aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id =
+ H5Screate_simple(DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ if (H5Pset_chunk(dcpl_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK, chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_NAME,
+ DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ goto error;
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ goto error;
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < FIXED_CHUNK_DIMSIZE; i++)
+ for (j = 0; j < FIXED_CHUNK_DIMSIZE; j++)
+ write_buf[i][j] =
+ (DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE)((i * FIXED_CHUNK_DIMSIZE) + j);
+
+ for (i = 0; i < FIXED_CHUNK_DIMSIZE; i++)
+ for (j = 0; j < FIXED_CHUNK_DIMSIZE; j++)
+ read_buf[i][j] = -1;
+
+ /*
+ * Create chunk-sized memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK] = {FIXED_CHUNK_DIMSIZE,
+ FIXED_CHUNK_DIMSIZE};
+
+ if ((mspace_id = H5Screate_simple(DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK, mdims,
+ NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ /*
+ * Write and read each chunk in the dataset.
+ */
+ for (i = 0; i < FIXED_NCHUNKS; i++) {
+ hsize_t start[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK];
+ hbool_t on_partial_edge_chunk = FALSE;
+ size_t n_chunks_per_dim = (dims[1] / chunk_dims[1]) + (((dims[1] % chunk_dims[1]) > 0) ? 1 : 0);
+
+ on_partial_edge_chunk =
+ (i > 0) && (((i + 1) % n_chunks_per_dim == 0) || (i / n_chunks_per_dim == n_chunks_per_dim - 1));
+
+ for (j = 0; j < DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (i / n_chunks_per_dim) * chunk_dims[j];
+ else
+ start[j] = (i % n_chunks_per_dim) * chunk_dims[j];
+
+ if (on_partial_edge_chunk) {
+ /*
+ * Partial edge chunk
+ */
+ if (j == 0) {
+ if (i / n_chunks_per_dim == n_chunks_per_dim - 1)
+ /* This partial edge chunk spans the remainder of the first dimension */
+ count[j] = dims[j] - ((i / n_chunks_per_dim) * chunk_dims[j]);
+ else
+ /* This partial edge chunk spans the whole first dimension */
+ count[j] = chunk_dims[j];
+ }
+ else {
+ if (i % n_chunks_per_dim == n_chunks_per_dim - 1)
+ /* This partial edge chunk spans the remainder of the second dimension */
+ count[j] = dims[j] - ((i % n_chunks_per_dim) * chunk_dims[j]);
+ else
+ /* This partial edge chunk spans the whole second dimension */
+ count[j] = chunk_dims[j];
+ }
+ }
+ else
+ count[j] = chunk_dims[j];
+ }
+
+ if (on_partial_edge_chunk) {
+ hsize_t m_start[DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK] = {0, 0};
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, m_start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to select hyperslab in memory dataspace\n");
+ goto error;
+ }
+ }
+ else {
+ if (H5Sselect_all(mspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to select entire memory dataspace\n");
+ goto error;
+ }
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to select hyperslab\n");
+ goto error;
+ }
+
+ HDprintf("\r Writing chunk %zu", i);
+
+ if (H5Dwrite(dset_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to write to dataset\n");
+ goto error;
+ }
+
+ /*
+ * Close and re-open the dataset to ensure the data gets written.
+ */
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if ((dset_id = H5Dopen2(group_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open dataset\n");
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve dataspace from dataset\n");
+ goto error;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to select hyperslab\n");
+ goto error;
+ }
+
+ HDprintf("\r Reading chunk %zu", i);
+
+ if (H5Dread(dset_id, DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to read from dataset\n");
+ goto error;
+ }
+
+ for (j = 0; j < FIXED_CHUNK_DIMSIZE; j++)
+ for (k = 0; k < FIXED_CHUNK_DIMSIZE; k++)
+ if (read_buf[j][k] != (int)((j * FIXED_CHUNK_DIMSIZE) + k)) {
+ H5_FAILED();
+ HDprintf(" data verification failed for read buffer element %lld: expected %lld but "
+ "was %lld\n",
+ (long long)((j * FIXED_CHUNK_DIMSIZE) + k),
+ (long long)((j * FIXED_CHUNK_DIMSIZE) + k), (long long)read_buf[j][k]);
+ goto error;
+ }
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef FIXED_DIMSIZE
+#undef FIXED_CHUNK_DIMSIZE
+#undef FIXED_NCHUNKS
+
+/*
+ * A test to ensure that a partial chunk can be written and
+ * then read correctly when the selection used in a chunked
+ * dataset's file dataspace is a point selection.
+ */
+#define FIXED_DIMSIZE 25
+#define FIXED_CHUNK_DIMSIZE 10
+static int
+test_read_partial_chunk_point_selection(void)
+{
+ TESTING("reading a partial chunk using a point selection in file dataspace");
+ SKIPPED();
+
+ return 1;
+}
+#undef FIXED_DIMSIZE
+#undef FIXED_CHUNK_DIMSIZE
+
+/*
+ * A test to verify that H5Dvlen_get_buf_size returns
+ * correct size
+ */
+static int
+test_get_vlen_buf_size(void)
+{
+ hvl_t wdata[DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM]; /* Information to write */
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dataset = H5I_INVALID_HID;
+ hid_t dspace_id = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hbool_t freed_wdata = FALSE;
+ hsize_t dims1[] = {DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM};
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j;
+
+ TESTING("H5Dvlen_get_buf_size");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or more aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM; i++) {
+ wdata[i].p = HDmalloc((i + 1) * sizeof(unsigned int));
+ wdata[i].len = i + 1;
+ for (j = 0; j < (i + 1); j++)
+ ((unsigned int *)wdata[i].p)[j] = i * 10 + j;
+ } /* end for */
+
+ /* Open the file */
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_GET_VLEN_BUF_SIZE_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_GET_VLEN_BUF_SIZE_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create dataspace for dataset */
+ if ((dspace_id = H5Screate_simple(DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_RANK, dims1, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create a datatype to refer to */
+ if ((dtype_id = H5Tvlen_create(H5T_NATIVE_UINT)) < 0)
+ TEST_ERROR;
+
+ /* Create a dataset */
+ if ((dataset = H5Dcreate2(group_id, DATASET_GET_VLEN_BUF_SIZE_DSET_NAME, dtype_id, dspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ /* Write dataset to disk */
+ if (H5Dwrite(dataset, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata) < 0)
+ TEST_ERROR;
+
+ /* Make certain the correct amount of memory will be used */
+ if (H5Dvlen_get_buf_size(dataset, dtype_id, dspace_id, &size) < 0)
+ TEST_ERROR;
+
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ if (size !=
+ ((DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM * (DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM + 1)) / 2) *
+ sizeof(unsigned int)) {
+ H5_FAILED();
+ HDprintf(
+ " H5Dvlen_get_buf_size returned wrong size (%lu), compared to the correct size (%lu)\n", size,
+ ((DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM * (DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM + 1)) /
+ 2) *
+ sizeof(unsigned int));
+ goto error;
+ }
+
+ if (H5Treclaim(dtype_id, dspace_id, H5P_DEFAULT, wdata) < 0)
+ TEST_ERROR;
+ freed_wdata = TRUE;
+
+ if (H5Dclose(dataset) < 0)
+ TEST_ERROR;
+
+ if (H5Tclose(dtype_id) < 0)
+ TEST_ERROR;
+
+ if (H5Sclose(dspace_id) < 0)
+ TEST_ERROR;
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (!freed_wdata)
+ H5Treclaim(dtype_id, dspace_id, H5P_DEFAULT, wdata);
+ H5Sclose(dspace_id);
+ H5Tclose(dtype_id);
+ H5Dclose(dataset);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* end test_get_vlen_buf_size() */
+
+int
+H5_api_dataset_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Dataset Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(dataset_tests); i++) {
+ nerrors += (*dataset_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/test/API/H5_api_dataset_test.h b/test/API/H5_api_dataset_test.h
new file mode 100644
index 0000000..5a50a06
--- /dev/null
+++ b/test/API/H5_api_dataset_test.h
@@ -0,0 +1,331 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_DATASET_TEST_H
+#define H5_API_DATASET_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_dataset_test(void);
+
+/************************************************
+ * *
+ * API Dataset test defines *
+ * *
+ ************************************************/
+
+#define DATASET_CREATE_UNDER_ROOT_DSET_NAME "/dset_under_root"
+#define DATASET_CREATE_UNDER_ROOT_SPACE_RANK 2
+
+#define DATASET_CREATE_UNDER_EXISTING_SPACE_RANK 2
+#define DATASET_CREATE_UNDER_EXISTING_GROUP_NAME "dset_under_group_test"
+#define DATASET_CREATE_UNDER_EXISTING_DSET_NAME "nested_dset"
+
+#define DATASET_CREATE_INVALID_PARAMS_SPACE_RANK 2
+#define DATASET_CREATE_INVALID_PARAMS_GROUP_NAME "dset_create_invalid_params_test"
+#define DATASET_CREATE_INVALID_PARAMS_DSET_NAME "invalid_params_dset"
+
+#define DATASET_CREATE_ANONYMOUS_DATASET_NAME "anon_dset"
+#define DATASET_CREATE_ANONYMOUS_GROUP_NAME "anon_dset_creation_test"
+#define DATASET_CREATE_ANONYMOUS_SPACE_RANK 2
+
+#define DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_DATASET_NAME "invalid_params_anon_dset"
+#define DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME "anon_dset_creation_invalid_params_test"
+#define DATASET_CREATE_ANONYMOUS_INVALID_PARAMS_SPACE_RANK 2
+
+#define DATASET_CREATE_NULL_DATASPACE_TEST_SUBGROUP_NAME "dataset_with_null_space_test"
+#define DATASET_CREATE_NULL_DATASPACE_TEST_DSET_NAME "dataset_with_null_space"
+
+#define DATASET_CREATE_SCALAR_DATASPACE_TEST_SUBGROUP_NAME "dataset_with_scalar_space_test"
+#define DATASET_CREATE_SCALAR_DATASPACE_TEST_DSET_NAME "dataset_with_scalar_space"
+
+#define ZERO_DIM_DSET_TEST_GROUP_NAME "zero_dim_dset_test"
+#define ZERO_DIM_DSET_TEST_SPACE_RANK 1
+#define ZERO_DIM_DSET_TEST_DSET_NAME "zero_dim_dset"
+
+#define DATASET_MANY_CREATE_GROUP_NAME "group_for_many_datasets"
+#define DSET_NAME_BUF_SIZE 64u
+#define DATASET_NUMB 100u
+
+#define DATASET_SHAPE_TEST_DSET_BASE_NAME "dataset_shape_test"
+#define DATASET_SHAPE_TEST_SUBGROUP_NAME "dataset_shape_test"
+#define DATASET_SHAPE_TEST_NUM_ITERATIONS 5
+#define DATASET_SHAPE_TEST_MAX_DIMS 5
+
+#define DATASET_PREDEFINED_TYPE_TEST_SPACE_RANK 2
+#define DATASET_PREDEFINED_TYPE_TEST_BASE_NAME "predefined_type_dset"
+#define DATASET_PREDEFINED_TYPE_TEST_SUBGROUP_NAME "predefined_type_dataset_test"
+
+#define DATASET_STRING_TYPE_TEST_STRING_LENGTH 40
+#define DATASET_STRING_TYPE_TEST_SPACE_RANK 2
+#define DATASET_STRING_TYPE_TEST_DSET_NAME1 "fixed_length_string_dset"
+#define DATASET_STRING_TYPE_TEST_DSET_NAME2 "variable_length_string_dset"
+#define DATASET_STRING_TYPE_TEST_SUBGROUP_NAME "string_type_dataset_test"
+
+#define DATASET_COMPOUND_TYPE_TEST_SUBGROUP_NAME "compound_type_dataset_test"
+#define DATASET_COMPOUND_TYPE_TEST_DSET_NAME "compound_type_test"
+#define DATASET_COMPOUND_TYPE_TEST_MAX_SUBTYPES 5
+#define DATASET_COMPOUND_TYPE_TEST_MAX_PASSES 5
+#define DATASET_COMPOUND_TYPE_TEST_DSET_RANK 2
+
+#define DATASET_ENUM_TYPE_TEST_VAL_BASE_NAME "INDEX"
+#define DATASET_ENUM_TYPE_TEST_SUBGROUP_NAME "enum_type_dataset_test"
+#define DATASET_ENUM_TYPE_TEST_NUM_MEMBERS 16
+#define DATASET_ENUM_TYPE_TEST_SPACE_RANK 2
+#define DATASET_ENUM_TYPE_TEST_DSET_NAME1 "enum_native_dset"
+#define DATASET_ENUM_TYPE_TEST_DSET_NAME2 "enum_non_native_dset"
+
+#define DATASET_ARRAY_TYPE_TEST_SUBGROUP_NAME "array_type_dataset_test"
+#define DATASET_ARRAY_TYPE_TEST_DSET_NAME1 "array_type_test1"
+#define DATASET_ARRAY_TYPE_TEST_DSET_NAME2 "array_type_test2"
+#define DATASET_ARRAY_TYPE_TEST_DSET_NAME3 "array_type_test3"
+#define DATASET_ARRAY_TYPE_TEST_SPACE_RANK 2
+#define DATASET_ARRAY_TYPE_TEST_RANK1 2
+#define DATASET_ARRAY_TYPE_TEST_RANK2 2
+#define DATASET_ARRAY_TYPE_TEST_RANK3 2
+
+#define DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_YES_DSET_NAME "track_times_true_test"
+#define DATASET_CREATION_PROPERTIES_TEST_TRACK_TIMES_NO_DSET_NAME "track_times_false_test"
+#define DATASET_CREATION_PROPERTIES_TEST_PHASE_CHANGE_DSET_NAME "attr_phase_change_test"
+#define DATASET_CREATION_PROPERTIES_TEST_ALLOC_TIMES_BASE_NAME "alloc_time_test"
+#define DATASET_CREATION_PROPERTIES_TEST_FILL_TIMES_BASE_NAME "fill_times_test"
+#define DATASET_CREATION_PROPERTIES_TEST_CRT_ORDER_BASE_NAME "creation_order_test"
+#define DATASET_CREATION_PROPERTIES_TEST_LAYOUTS_BASE_NAME "layout_test"
+#define DATASET_CREATION_PROPERTIES_TEST_FILTERS_DSET_NAME "filters_test"
+#define DATASET_CREATION_PROPERTIES_TEST_GROUP_NAME "creation_properties_test"
+#define DATASET_CREATION_PROPERTIES_TEST_CHUNK_DIM_RANK DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK
+#define DATASET_CREATION_PROPERTIES_TEST_MAX_COMPACT 12
+#define DATASET_CREATION_PROPERTIES_TEST_MIN_DENSE 8
+#define DATASET_CREATION_PROPERTIES_TEST_SHAPE_RANK 3
+
+#define DATASET_OPEN_INVALID_PARAMS_SPACE_RANK 2
+#define DATASET_OPEN_INVALID_PARAMS_GROUP_NAME "dataset_open_test"
+#define DATASET_OPEN_INVALID_PARAMS_DSET_NAME "open_test_dataset"
+
+#define DATASET_GET_SPACE_TYPE_TEST_SPACE_RANK 2
+#define DATASET_GET_SPACE_TYPE_TEST_GROUP_NAME "get_dset_space_type_test"
+#define DATASET_GET_SPACE_TYPE_TEST_DSET_NAME "get_space_type_test_dset"
+
+#define DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_SPACE_RANK 2
+#define DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_GROUP_NAME "get_dset_space_type_invalid_params_test"
+#define DATASET_GET_SPACE_TYPE_INVALID_PARAMS_TEST_DSET_NAME "get_space_type_test_invalid_params_dset"
+
+#define DATASET_PROPERTY_LIST_TEST_SUBGROUP_NAME "dataset_property_list_test_group"
+#define DATASET_PROPERTY_LIST_TEST_SPACE_RANK 2
+#define DATASET_PROPERTY_LIST_TEST_DSET_NAME1 "property_list_test_dataset1"
+#define DATASET_PROPERTY_LIST_TEST_DSET_NAME2 "property_list_test_dataset2"
+#define DATASET_PROPERTY_LIST_TEST_DSET_NAME3 "property_list_test_dataset3"
+#define DATASET_PROPERTY_LIST_TEST_DSET_NAME4 "property_list_test_dataset4"
+
+#define DATASET_SMALL_READ_TEST_ALL_DSET_SPACE_RANK 3
+#define DATASET_SMALL_READ_TEST_ALL_DSET_DTYPESIZE sizeof(int)
+#define DATASET_SMALL_READ_TEST_ALL_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_SMALL_READ_TEST_ALL_GROUP_NAME "dataset_small_read_all_test"
+#define DATASET_SMALL_READ_TEST_ALL_DSET_NAME "dataset_small_read_all_dset"
+
+#define DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_SPACE_RANK 3
+#define DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPESIZE sizeof(int)
+#define DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_SMALL_READ_TEST_HYPERSLAB_GROUP_NAME "dataset_small_read_hyperslab_test"
+#define DATASET_SMALL_READ_TEST_HYPERSLAB_DSET_NAME "dataset_small_read_hyperslab_dset"
+
+#define DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK 3
+#define DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE sizeof(int)
+#define DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_SMALL_READ_TEST_POINT_SELECTION_NUM_POINTS 10
+#define DATASET_SMALL_READ_TEST_POINT_SELECTION_GROUP_NAME "dataset_small_read_point_selection_test"
+#define DATASET_SMALL_READ_TEST_POINT_SELECTION_DSET_NAME "dataset_small_read_point_selection_dset"
+
+#define DATASET_IO_POINT_GROUP_NAME "dataset_io_point_selection_test"
+#define DATASET_IO_POINT_DSET_NAME_NOCHUNK "dataset_io_point_selection_dset_nochunk"
+#define DATASET_IO_POINT_DSET_NAME_CHUNK "dataset_io_point_selection_dset_chunk"
+
+#ifndef NO_LARGE_TESTS
+#define DATASET_LARGE_READ_TEST_ALL_DSET_SPACE_RANK 3
+#define DATASET_LARGE_READ_TEST_ALL_DSET_DTYPESIZE sizeof(int)
+#define DATASET_LARGE_READ_TEST_ALL_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_LARGE_READ_TEST_ALL_GROUP_NAME "dataset_large_read_all_test"
+#define DATASET_LARGE_READ_TEST_ALL_DSET_NAME "dataset_large_read_all_dset"
+
+#define DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_SPACE_RANK 3
+#define DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPESIZE sizeof(int)
+#define DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_LARGE_READ_TEST_HYPERSLAB_GROUP_NAME "dataset_large_read_hyperslab_test"
+#define DATASET_LARGE_READ_TEST_HYPERSLAB_DSET_NAME "dataset_large_read_hyperslab_dset"
+
+#define DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_SPACE_RANK 1
+#define DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPESIZE sizeof(int)
+#define DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_LARGE_READ_TEST_POINT_SELECTION_GROUP_NAME "dataset_large_read_point_selection_test"
+#define DATASET_LARGE_READ_TEST_POINT_SELECTION_DSET_NAME "dataset_large_read_point_selection_dset"
+#endif
+
+#define DATASET_READ_INVALID_PARAMS_TEST_DSET_SPACE_RANK 3
+#define DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_READ_INVALID_PARAMS_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_INVALID_PARAMS_TEST_GROUP_NAME "dataset_read_invalid_params_test"
+#define DATASET_READ_INVALID_PARAMS_TEST_DSET_NAME "dataset_read_invalid_params_dset"
+
+#define DATASET_SMALL_WRITE_TEST_ALL_DSET_SPACE_RANK 3
+#define DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPESIZE sizeof(int)
+#define DATASET_SMALL_WRITE_TEST_ALL_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_SMALL_WRITE_TEST_ALL_GROUP_NAME "dataset_small_write_all_test"
+#define DATASET_SMALL_WRITE_TEST_ALL_DSET_NAME "dataset_small_write_all_dset"
+
+#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK 3
+#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE sizeof(int)
+#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_GROUP_NAME "dataset_small_write_hyperslab_test"
+#define DATASET_SMALL_WRITE_TEST_HYPERSLAB_DSET_NAME "dataset_small_write_hyperslab_dset"
+
+#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK 3
+#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPESIZE sizeof(int)
+#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_NUM_POINTS 10
+#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_GROUP_NAME "dataset_small_write_point_selection_test"
+#define DATASET_SMALL_WRITE_TEST_POINT_SELECTION_DSET_NAME "dataset_small_write_point_selection_dset"
+
+#ifndef NO_LARGE_TESTS
+#define DATASET_LARGE_WRITE_TEST_ALL_DSET_SPACE_RANK 3
+#define DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPESIZE sizeof(int)
+#define DATASET_LARGE_WRITE_TEST_ALL_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_LARGE_WRITE_TEST_ALL_GROUP_NAME "dataset_large_write_all_test"
+#define DATASET_LARGE_WRITE_TEST_ALL_DSET_NAME "dataset_large_write_all_dset"
+
+#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_SPACE_RANK 3
+#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPESIZE sizeof(int)
+#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_GROUP_NAME "dataset_large_write_hyperslab_test"
+#define DATASET_LARGE_WRITE_TEST_HYPERSLAB_DSET_NAME "dataset_large_write_hyperslab_dset"
+
+#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_DSET_SPACE_RANK 3
+#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_DSET_DTYPESIZE sizeof(int)
+#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_GROUP_NAME "dataset_large_write_point_selection_test"
+#define DATASET_LARGE_WRITE_TEST_POINT_SELECTION_DSET_NAME "dataset_large_write_point_selection_dset"
+#endif
+
+#define DATASET_DATA_VERIFY_WRITE_TEST_DSET_SPACE_RANK 3
+#define DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_DATA_VERIFY_WRITE_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_DATA_VERIFY_WRITE_TEST_NUM_POINTS 10
+#define DATASET_DATA_VERIFY_WRITE_TEST_GROUP_NAME "dataset_data_write_verification_test"
+#define DATASET_DATA_VERIFY_WRITE_TEST_DSET_NAME "dataset_data_write_verification_dset"
+
+#define DATASET_WRITE_INVALID_PARAMS_TEST_DSET_SPACE_RANK 3
+#define DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_WRITE_INVALID_PARAMS_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_INVALID_PARAMS_TEST_GROUP_NAME "dataset_write_invalid_params_test"
+#define DATASET_WRITE_INVALID_PARAMS_TEST_DSET_NAME "dataset_write_invalid_params_dset"
+
+#define DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_SPACE_RANK 3
+#define DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPESIZE sizeof(int)
+#define DATASET_DATA_BUILTIN_CONVERSION_TEST_MEM_DTYPE H5T_NATIVE_INT
+#define DATASET_DATA_BUILTIN_CONVERSION_TEST_NUM_POINTS 10
+#define DATASET_DATA_BUILTIN_CONVERSION_TEST_GROUP_NAME "dataset_builtin_conversion_verification_test"
+#define DATASET_DATA_BUILTIN_CONVERSION_TEST_DSET_NAME "dataset_builtin_conversion_verification_dset"
+
+#define DATASET_COMPOUND_PARTIAL_IO_DSET_DIMS 10
+#define DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_GROUP_NAME "dataset_compound_partial_io_test"
+#define DATASET_DATA_COMPOUND_PARTIAL_IO_TEST_DSET_NAME "dataset_compound_partial_io_test"
+
+#define DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_SPACE_RANK 2
+#define DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_NUM_PASSES 3
+#define DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_GROUP_NAME "set_extent_chunked_unlimited_test"
+#define DATASET_SET_EXTENT_CHUNKED_UNLIMITED_TEST_DSET_NAME "set_extent_chunked_unlimited_test_dset"
+
+#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_SPACE_RANK 2
+#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_NUM_PASSES 3
+#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_GROUP_NAME "set_extent_chunked_fixed_test"
+#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME "set_extent_chunked_fixed_test_dset"
+#define DATASET_SET_EXTENT_CHUNKED_FIXED_TEST_DSET_NAME2 "set_extent_chunked_fixed_test_dset2"
+
+#define DATASET_SET_EXTENT_DATA_TEST_SPACE_RANK 2
+#define DATASET_SET_EXTENT_DATA_TEST_GROUP_NAME "set_extent_chunked_data_test"
+#define DATASET_SET_EXTENT_DATA_TEST_DSET_NAME "set_extent_chunked_data_test_dset"
+#define DATASET_SET_EXTENT_DATA_TEST_SPACE_DIM 8
+
+#define DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_RANK 2
+#define DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_GROUP_NAME "set_extent_chunked_double_handles_test"
+#define DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_DSET_NAME "set_extent_chunked_double_handles_test_dset"
+#define DATASET_SET_EXTENT_DOUBLE_HANDLES_TEST_SPACE_DIM 8
+
+#define DATASET_SET_EXTENT_INVALID_PARAMS_TEST_SPACE_RANK 2
+#define DATASET_SET_EXTENT_INVALID_PARAMS_TEST_GROUP_NAME "set_extent_invalid_params_test"
+#define DATASET_SET_EXTENT_INVALID_PARAMS_TEST_DSET_NAME "set_extent_invalid_params_test_dset"
+#define DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_COMPACT_DSET_NAME "set_extent_invalid_layout_test_compact_dset"
+#define DATASET_SET_EXTENT_INVALID_LAYOUT_TEST_CONTIGUOUS_DSET_NAME \
+ "set_extent_invalid_layout_test_contiguous_dset"
+
+#define DATASET_SINGLE_CHUNK_TEST_SPACE_RANK 2
+#define DATASET_SINGLE_CHUNK_TEST_GROUP_NAME "single_chunk_dataset_test"
+#define DATASET_SINGLE_CHUNK_TEST_DSET_NAME "single_chunk_dataset"
+
+#define DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_SPACE_RANK 2
+#define DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_SINGLE_CHUNK_WRITE_TEST_GROUP_NAME "single_chunk_dataset_write_test"
+#define DATASET_SINGLE_CHUNK_WRITE_TEST_DSET_NAME "single_chunk_dataset"
+
+#define DATASET_MULTI_CHUNK_TEST_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_TEST_GROUP_NAME "multi_chunk_dataset_test"
+#define DATASET_MULTI_CHUNK_TEST_DSET_NAME "multi_chunk_dataset"
+
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_write_same_space_read_test"
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_write_diff_space_read_test"
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_same_space_overwrite_test"
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS 10
+
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_diff_space_overwrite_test"
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS 10
+
+#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_SPACE_RANK 2
+#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_CTYPE int
+#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_GROUP_NAME "read_partial_chunk_all_sel_test"
+#define DATASET_PARTIAL_CHUNK_READ_ALL_SEL_TEST_DSET_NAME "read_partial_chunk_all_sel_dset"
+
+#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_SPACE_RANK 2
+#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_CTYPE int
+#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_GROUP_NAME "read_partial_chunk_hyper_sel_test"
+#define DATASET_PARTIAL_CHUNK_READ_HYPER_SEL_TEST_DSET_NAME "read_partial_chunk_hyper_sel_dset"
+
+#define DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_RANK 1
+#define DATASET_GET_VLEN_BUF_SIZE_DSET_SPACE_DIM 4
+#define DATASET_GET_VLEN_BUF_SIZE_GROUP_NAME "get_vlen_buffer_size_group"
+#define DATASET_GET_VLEN_BUF_SIZE_DSET_NAME "get_vlen_buffer_size_dset"
+#endif
diff --git a/test/API/H5_api_datatype_test.c b/test/API/H5_api_datatype_test.c
new file mode 100644
index 0000000..9d53292
--- /dev/null
+++ b/test/API/H5_api_datatype_test.c
@@ -0,0 +1,2693 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_datatype_test.h"
+
+/*
+ * Disable tests that currently compromise internal HDF5 integrity.
+ */
+#define PROBLEMATIC_TESTS
+
+static int test_create_committed_datatype(void);
+static int test_create_committed_datatype_invalid_params(void);
+static int test_create_anonymous_committed_datatype(void);
+static int test_create_anonymous_committed_datatype_invalid_params(void);
+#ifndef PROBLEMATIC_TESTS
+static int test_create_committed_datatype_empty_types(void);
+#endif
+static int test_recommit_committed_type(void);
+static int test_open_committed_datatype(void);
+static int test_open_committed_datatype_invalid_params(void);
+static int test_reopen_committed_datatype_indirect(void);
+static int test_close_committed_datatype_invalid_id(void);
+static int test_datatype_property_lists(void);
+static int test_create_dataset_with_committed_type(void);
+static int test_create_attribute_with_committed_type(void);
+static int test_delete_committed_type(void);
+static int test_resurrect_datatype(void);
+static int test_flush_committed_datatype(void);
+static int test_flush_committed_datatype_invalid_params(void);
+static int test_refresh_committed_datatype(void);
+static int test_refresh_committed_datatype_invalid_params(void);
+#ifndef PROBLEMATIC_TESTS
+static int test_cant_commit_predefined(void);
+#endif
+static int test_cant_modify_committed_type(void);
+
+/*
+ * The array of datatype tests to be performed.
+ */
+static int (*datatype_tests[])(void) = {
+ test_create_committed_datatype,
+ test_create_committed_datatype_invalid_params,
+ test_create_anonymous_committed_datatype,
+ test_create_anonymous_committed_datatype_invalid_params,
+#ifndef PROBLEMATIC_TESTS
+ test_create_committed_datatype_empty_types,
+#endif
+ test_recommit_committed_type,
+ test_open_committed_datatype,
+ test_open_committed_datatype_invalid_params,
+ test_reopen_committed_datatype_indirect,
+ test_close_committed_datatype_invalid_id,
+ test_datatype_property_lists,
+ test_create_dataset_with_committed_type,
+ test_create_attribute_with_committed_type,
+ test_delete_committed_type,
+ test_resurrect_datatype,
+ test_flush_committed_datatype,
+ test_flush_committed_datatype_invalid_params,
+ test_refresh_committed_datatype,
+ test_refresh_committed_datatype_invalid_params,
+#ifndef PROBLEMATIC_TESTS
+ test_cant_commit_predefined,
+#endif
+ test_cant_modify_committed_type,
+};
+
+/*
+ * A test to check that a committed datatype can be created.
+ */
+static int
+test_create_committed_datatype(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING("creation of a committed datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_CREATE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype to commit\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, DATATYPE_CREATE_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_CREATE_TEST_TYPE_NAME);
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a committed datatype can't be
+ * created when H5Tcommit2 is passed invalid parameters.
+ */
+static int
+test_create_committed_datatype_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Tcommit2 with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATATYPE_CREATE_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype to commit\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Tcommit2_invalid_loc_id)
+ {
+ TESTING_2("H5Tcommit2 with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(H5I_INVALID_HID, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, type_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit2 succeeded with an invalid loc_id!\n");
+ PART_ERROR(H5Tcommit2_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit2_invalid_loc_id);
+
+ PART_BEGIN(H5Tcommit2_invalid_type_name)
+ {
+ TESTING_2("H5Tcommit2 with an invalid datatype name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, NULL, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit2 succeeded with an invalid datatype name!\n");
+ PART_ERROR(H5Tcommit2_invalid_type_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, "", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit2 succeeded with an invalid datatype name!\n");
+ PART_ERROR(H5Tcommit2_invalid_type_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit2_invalid_type_name);
+
+ PART_BEGIN(H5Tcommit2_invalid_type_id)
+ {
+ TESTING_2("H5Tcommit2 with an invalid datatype ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, H5I_INVALID_HID,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit2 succeeded with an invalid datatype ID!\n");
+ PART_ERROR(H5Tcommit2_invalid_type_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit2_invalid_type_id);
+
+ PART_BEGIN(H5Tcommit2_invalid_lcpl)
+ {
+ TESTING_2("H5Tcommit2 with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, type_id,
+ H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit2 succeeded with an invalid LCPL!\n");
+ PART_ERROR(H5Tcommit2_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit2_invalid_lcpl);
+
+ PART_BEGIN(H5Tcommit2_invalid_tcpl)
+ {
+ TESTING_2("H5Tcommit2 with an invalid TCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, type_id,
+ H5P_DEFAULT, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit2 succeeded with an invalid TCPL!\n");
+ PART_ERROR(H5Tcommit2_invalid_tcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit2_invalid_tcpl);
+
+ PART_BEGIN(H5Tcommit2_invalid_tapl)
+ {
+ TESTING_2("H5Tcommit2 with an invalid TAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME, type_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit2 succeeded with an invalid TAPL!\n");
+ PART_ERROR(H5Tcommit2_invalid_tapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit2_invalid_tapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an anonymous committed datatype
+ * can be created with H5Tcommit_anon.
+ */
+static int
+test_create_anonymous_committed_datatype(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING("creation of anonymous committed datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_ANONYMOUS_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_CREATE_ANONYMOUS_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit_anon(group_id, type_id, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit anonymous datatype\n");
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a committed datatype can't be
+ * created when H5Tcommit_anon is passed invalid parameters.
+ */
+static int
+test_create_anonymous_committed_datatype_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Tcommit_anon with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATATYPE_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Tcommit_anon_invalid_loc_id)
+ {
+ TESTING_2("H5Tcommit_anon with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit_anon(H5I_INVALID_HID, type_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit_anon succeeded with an invalid loc_id!\n");
+ PART_ERROR(H5Tcommit_anon_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_anon_invalid_loc_id);
+
+ PART_BEGIN(H5Tcommit_anon_invalid_type_id)
+ {
+ TESTING_2("H5Tcommit_anon with an invalid datatype ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit_anon(group_id, H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit_anon succeeded with an invalid datatype ID!\n");
+ PART_ERROR(H5Tcommit_anon_invalid_type_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_anon_invalid_type_id);
+
+ PART_BEGIN(H5Tcommit_anon_invalid_tcpl)
+ {
+ TESTING_2("H5Tcommit_anon with an invalid TCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit_anon(group_id, type_id, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit_anon succeeded with an invalid TCPL!\n");
+ PART_ERROR(H5Tcommit_anon_invalid_tcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_anon_invalid_tcpl);
+
+ PART_BEGIN(H5Tcommit_anon_invalid_tapl)
+ {
+ TESTING_2("H5Tcommit_anon with an invalid TAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit_anon(group_id, type_id, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tcommit_anon succeeded with an invalid TAPL!\n");
+ PART_ERROR(H5Tcommit_anon_invalid_tapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_anon_invalid_tapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that committing a datatype fails with empty
+ * compound and enum datatypes.
+ */
+#ifndef PROBLEMATIC_TESTS
+static int
+test_create_committed_datatype_empty_types(void)
+{
+ herr_t err_ret = FAIL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("creation of committed datatype with empty types");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_CREATE_EMPTY_TYPES_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATATYPE_CREATE_EMPTY_TYPES_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Tcommit_empty_compound_type)
+ {
+ TESTING_2("creation of committed datatype with empty compound type");
+
+ if ((type_id = H5Tcreate(H5T_COMPOUND, (size_t)32)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create compound type\n");
+ PART_ERROR(H5Tcommit_empty_compound_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_EMPTY_TYPES_TEST_CMPD_TYPE_NAME, type_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" committed empty compound datatype!\n");
+ PART_ERROR(H5Tcommit_empty_compound_type);
+ }
+
+ /* Add a field to the compound datatype */
+ if (H5Tinsert(type_id, "a", (size_t)0, H5T_NATIVE_INT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to insert field into compound datatype\n");
+ PART_ERROR(H5Tcommit_empty_compound_type);
+ }
+
+ /* Attempt to commit the now non-empty compound datatype */
+ if (H5Tcommit2(group_id, DATATYPE_CREATE_EMPTY_TYPES_TEST_CMPD_TYPE_NAME, type_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit non-empty compound datatype\n");
+ PART_ERROR(H5Tcommit_empty_compound_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_empty_compound_type);
+
+ if (type_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ }
+ H5E_END_TRY;
+ type_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Tcommit_empty_enum_type)
+ {
+ int enum_val = 1;
+
+ TESTING_2("creation of committed datatype with empty enum type");
+
+ if ((type_id = H5Tenum_create(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create enum type\n");
+ PART_ERROR(H5Tcommit_empty_enum_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, DATATYPE_CREATE_EMPTY_TYPES_TEST_ENUM_TYPE_NAME, type_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" committed empty enum datatype!\n");
+ PART_ERROR(H5Tcommit_empty_enum_type);
+ }
+
+ /* Add a field to the enum datatype */
+ if (H5Tenum_insert(type_id, "a", &enum_val) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to insert field into enum datatype\n");
+ PART_ERROR(H5Tcommit_empty_enum_type);
+ }
+
+ /* Attempt to commit the now non-empty enum datatype */
+ if (H5Tcommit2(group_id, DATATYPE_CREATE_EMPTY_TYPES_TEST_ENUM_TYPE_NAME, type_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit non-empty enum datatype\n");
+ PART_ERROR(H5Tcommit_empty_enum_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_empty_enum_type);
+
+ if (type_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ }
+ H5E_END_TRY;
+ type_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#endif
+
+/*
+ * A test to check that a committed datatype can't be re-committed.
+ */
+static int
+test_recommit_committed_type(void)
+{
+ htri_t is_committed = FALSE;
+ herr_t err_ret;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING("inability to re-commit a committed datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, RECOMMIT_COMMITTED_TYPE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", RECOMMIT_COMMITTED_TYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Copy a predefined datatype and commit the copy */
+ if ((type_id = H5Tcopy(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy predefined integer datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, "native_int", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit datatype\n");
+ goto error;
+ }
+
+ if ((is_committed = H5Tcommitted(type_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to determine if datatype is committed\n");
+ goto error;
+ }
+
+ if (!is_committed) {
+ H5_FAILED();
+ HDprintf(" H5Tcommitted() returned false!\n");
+ goto error;
+ }
+
+ /* We should not be able to re-commit a committed type */
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, "native_int", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" re-committed an already committed datatype!\n");
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a committed datatype
+ * can be opened using H5Topen2.
+ */
+static int
+test_open_committed_datatype(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING("H5Topen2");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_OPEN_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_OPEN_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype to commit\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, DATATYPE_OPEN_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_OPEN_TEST_TYPE_NAME);
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+
+ if ((type_id = H5Topen2(group_id, DATATYPE_OPEN_TEST_TYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open committed datatype '%s'\n", DATATYPE_OPEN_TEST_TYPE_NAME);
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a committed datatype can't
+ * be opened when H5Topen2 is passed invalid parameters.
+ */
+static int
+test_open_committed_datatype_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Topen2 with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATATYPE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype to commit\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME);
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Topen2_invalid_loc_id)
+ {
+ TESTING_2("H5Topen2 with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ type_id = H5Topen2(H5I_INVALID_HID, DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (type_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened committed datatype with an invalid location ID!\n");
+ H5Tclose(type_id);
+ PART_ERROR(H5Topen2_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Topen2_invalid_loc_id);
+
+ PART_BEGIN(H5Topen2_invalid_type_name)
+ {
+ TESTING_2("H5Topen2 with an invalid datatype name");
+
+ H5E_BEGIN_TRY
+ {
+ type_id = H5Topen2(group_id, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (type_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened committed datatype with an invalid datatype name!\n");
+ H5Tclose(type_id);
+ PART_ERROR(H5Topen2_invalid_type_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ type_id = H5Topen2(group_id, "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (type_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened committed datatype with an invalid datatype name!\n");
+ H5Tclose(type_id);
+ PART_ERROR(H5Topen2_invalid_type_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Topen2_invalid_type_name);
+
+ PART_BEGIN(H5Topen2_invalid_tapl)
+ {
+ TESTING_2("H5Topen2 with an invalid TAPL");
+
+ H5E_BEGIN_TRY
+ {
+ type_id = H5Topen2(group_id, DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (type_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened committed datatype with an invalid TAPL!\n");
+ H5Tclose(type_id);
+ PART_ERROR(H5Topen2_invalid_tapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Topen2_invalid_tapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that open named datatypes can be reopened indirectly
+ * through H5Dget_type without causing problems.
+ */
+static int
+test_reopen_committed_datatype_indirect(void)
+{
+ size_t dt_size = 0;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID, reopened_type_id = H5I_INVALID_HID;
+ hid_t strtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("reopening open committed datatypes using H5Dget_type");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or stored datatype aren't supported with "
+ "this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_REOPEN_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_REOPEN_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(DATATYPE_REOPEN_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(reopen_compound_type)
+ {
+ TESTING_2("re-open of compound datatype");
+
+ if ((strtype = H5Tcopy(H5T_C_S1)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy C-string datatype\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ if (H5Tset_size(strtype, H5T_VARIABLE) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set string datatype's size to variable\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ if ((type_id = H5Tcreate(H5T_COMPOUND, sizeof(char *))) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create compound datatype\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ if (H5Tinsert(type_id, "vlstr", (size_t)0, strtype) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to insert field into compound datatype\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ if (H5Tclose(strtype) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close string datatype\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ /* Get size of compound type */
+ if ((dt_size = H5Tget_size(type_id)) == 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve size of compound datatype\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ /* Commit compound type and verify the size doesn't change */
+ if (H5Tcommit2(group_id, "cmpd_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit compound datatype\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ if (dt_size != H5Tget_size(type_id)) {
+ H5_FAILED();
+ HDprintf(" committing datatype caused its size to change!\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ /* Create dataset with compound type */
+ if ((dset_id = H5Dcreate2(group_id, "cmpd_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create dataset using committed datatype\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ /* Indirectly reopen type and verify that the size doesn't change */
+ if ((reopened_type_id = H5Dget_type(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open committed datatype using H5Dget_type\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ if (dt_size != H5Tget_size(reopened_type_id)) {
+ H5_FAILED();
+ HDprintf(" size of re-opened datatype didn't match size of original datatype\n");
+ PART_ERROR(reopen_compound_type);
+ }
+
+ PASSED();
+ }
+ PART_END(reopen_compound_type);
+
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(strtype);
+ strtype = H5I_INVALID_HID;
+ H5Tclose(type_id);
+ type_id = H5I_INVALID_HID;
+ H5Tclose(reopened_type_id);
+ reopened_type_id = H5I_INVALID_HID;
+ H5Dclose(dset_id);
+ dset_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(reopen_enum_type)
+ {
+ int enum_value;
+
+ TESTING_2("re-open of enum datatype");
+
+ if ((type_id = H5Tenum_create(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create enum datatype\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ enum_value = 0;
+ if (H5Tenum_insert(type_id, "val1", &enum_value) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to insert value into enum datatype\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ enum_value = 1;
+ if (H5Tenum_insert(type_id, "val2", &enum_value) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to insert value into enum datatype\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ /* Get size of enum type */
+ if ((dt_size = H5Tget_size(type_id)) == 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve size of enum datatype\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ /* Commit enum type and verify the size doesn't change */
+ if (H5Tcommit2(group_id, "enum_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit enum datatype\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ if (dt_size != H5Tget_size(type_id)) {
+ H5_FAILED();
+ HDprintf(" committing datatype caused its size to change!\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ /* Create dataset with enum type */
+ if ((dset_id = H5Dcreate2(group_id, "enum_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create dataset using committed datatype\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ /* Indirectly reopen type and verify that the size doesn't change */
+ if ((reopened_type_id = H5Dget_type(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open committed datatype using H5Dget_type\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ if (dt_size != H5Tget_size(reopened_type_id)) {
+ H5_FAILED();
+ HDprintf(" size of re-opened datatype didn't match size of original datatype\n");
+ PART_ERROR(reopen_enum_type);
+ }
+
+ PASSED();
+ }
+ PART_END(reopen_enum_type);
+
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ type_id = H5I_INVALID_HID;
+ H5Tclose(reopened_type_id);
+ reopened_type_id = H5I_INVALID_HID;
+ H5Dclose(dset_id);
+ dset_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(reopen_vlen_type)
+ {
+ TESTING_2("reopen of a variable-length datatype");
+
+ if ((type_id = H5Tvlen_create(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create variable-length datatype\n");
+ PART_ERROR(reopen_vlen_type);
+ }
+
+ /* Get size of variable-length type */
+ if ((dt_size = H5Tget_size(type_id)) == 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve size of variable-length datatype\n");
+ PART_ERROR(reopen_vlen_type);
+ }
+
+ /* Commit variable-length type and verify the size doesn't change */
+ if (H5Tcommit2(group_id, "vlen_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit variable-length datatype\n");
+ PART_ERROR(reopen_vlen_type);
+ }
+
+ if (dt_size != H5Tget_size(type_id)) {
+ H5_FAILED();
+ HDprintf(" committing datatype caused its size to change!\n");
+ PART_ERROR(reopen_vlen_type);
+ }
+
+ /* Create dataset with variable-length type */
+ if ((dset_id = H5Dcreate2(group_id, "vlen_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create dataset using committed datatype\n");
+ PART_ERROR(reopen_vlen_type);
+ }
+
+ /* Indirectly reopen type and verify that the size doesn't change */
+ if ((reopened_type_id = H5Dget_type(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open committed datatype using H5Dget_type\n");
+ PART_ERROR(reopen_vlen_type);
+ }
+
+ if (dt_size != H5Tget_size(reopened_type_id)) {
+ H5_FAILED();
+ HDprintf(" size of re-opened datatype didn't match size of original datatype\n");
+ PART_ERROR(reopen_vlen_type);
+ }
+
+ PASSED();
+ }
+ PART_END(reopen_vlen_type);
+
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ type_id = H5I_INVALID_HID;
+ H5Tclose(reopened_type_id);
+ reopened_type_id = H5I_INVALID_HID;
+ H5Dclose(dset_id);
+ dset_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(reopen_opaque_type)
+ {
+ const char *tag = "opaque_tag";
+
+ TESTING_2("reopen of an opaque datatype");
+
+ if ((type_id = H5Tcreate(H5T_OPAQUE, (size_t)13)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create opaque datatype\n");
+ PART_ERROR(reopen_opaque_type);
+ }
+
+ if (H5Tset_tag(type_id, tag) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set tag on opaque datatype\n");
+ PART_ERROR(reopen_opaque_type);
+ }
+
+ /* Get size of opaque type */
+ if ((dt_size = H5Tget_size(type_id)) == 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve size of opaque datatype\n");
+ PART_ERROR(reopen_opaque_type);
+ }
+
+ /* Commit opaque type and verify the size doesn't change */
+ if (H5Tcommit2(group_id, "opaque_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit opaque datatype\n");
+ PART_ERROR(reopen_opaque_type);
+ }
+
+ if (dt_size != H5Tget_size(type_id)) {
+ H5_FAILED();
+ HDprintf(" committing datatype caused its size to change!\n");
+ PART_ERROR(reopen_opaque_type);
+ }
+
+ /* Create dataset with opaque type */
+ if ((dset_id = H5Dcreate2(group_id, "opaque_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create dataset using committed datatype\n");
+ PART_ERROR(reopen_opaque_type);
+ }
+
+ /* Indirectly reopen type and verify that the size doesn't change */
+ if ((reopened_type_id = H5Dget_type(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open committed datatype using H5Dget_type\n");
+ PART_ERROR(reopen_opaque_type);
+ }
+
+ if (dt_size != H5Tget_size(reopened_type_id)) {
+ H5_FAILED();
+ HDprintf(" size of re-opened datatype didn't match size of original datatype\n");
+ PART_ERROR(reopen_opaque_type);
+ }
+
+ PASSED();
+ }
+ PART_END(reopen_opaque_type);
+
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ type_id = H5I_INVALID_HID;
+ H5Tclose(reopened_type_id);
+ reopened_type_id = H5I_INVALID_HID;
+ H5Dclose(dset_id);
+ dset_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(reopen_array_type)
+ {
+ hsize_t array_dims[] = {2, 3};
+
+ TESTING_2("reopen of an array datatype");
+
+ if ((type_id = H5Tarray_create2(H5T_NATIVE_INT, 1, array_dims)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create array datatype\n");
+ PART_ERROR(reopen_array_type);
+ }
+
+ /* Get size of array type */
+ if ((dt_size = H5Tget_size(type_id)) == 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve size of array datatype\n");
+ PART_ERROR(reopen_array_type);
+ }
+
+ /* Commit array type and verify the size doesn't change */
+ if (H5Tcommit2(group_id, "array_type", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit array datatype\n");
+ PART_ERROR(reopen_array_type);
+ }
+
+ if (dt_size != H5Tget_size(type_id)) {
+ H5_FAILED();
+ HDprintf(" committing datatype caused its size to change!\n");
+ PART_ERROR(reopen_array_type);
+ }
+
+ /* Create dataset with array type */
+ if ((dset_id = H5Dcreate2(group_id, "array_dset", type_id, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create dataset using committed datatype\n");
+ PART_ERROR(reopen_array_type);
+ }
+
+ /* Indirectly reopen type and verify that the size doesn't change */
+ if ((reopened_type_id = H5Dget_type(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to re-open committed datatype using H5Dget_type\n");
+ PART_ERROR(reopen_array_type);
+ }
+
+ if (dt_size != H5Tget_size(reopened_type_id)) {
+ H5_FAILED();
+ HDprintf(" size of re-opened datatype didn't match size of original datatype\n");
+ PART_ERROR(reopen_array_type);
+ }
+
+ PASSED();
+ }
+ PART_END(reopen_array_type);
+
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ type_id = H5I_INVALID_HID;
+ H5Tclose(reopened_type_id);
+ reopened_type_id = H5I_INVALID_HID;
+ H5Dclose(dset_id);
+ dset_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(strtype);
+ H5Tclose(type_id);
+ H5Tclose(reopened_type_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Tclose fails when
+ * it is passed an invalid datatype ID.
+ */
+static int
+test_close_committed_datatype_invalid_id(void)
+{
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+
+ TESTING("H5Tclose with an invalid committed datatype ID");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file or stored datatype aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tclose(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Tclose succeeded with an invalid committed datatype ID!\n");
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a TCPL used for datatype creation
+ * can be persisted and that a valid copy of that TCPL can
+ * be retrieved later with a call to H5Tget_create_plist.
+ */
+static int
+test_datatype_property_lists(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id1 = H5I_INVALID_HID, type_id2 = H5I_INVALID_HID;
+ hid_t tcpl_id1 = H5I_INVALID_HID, tcpl_id2 = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("datatype property list operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, stored datatype, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_PROPERTY_LIST_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_PROPERTY_LIST_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id1 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if ((type_id2 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if ((tcpl_id1 = H5Pcreate(H5P_DATATYPE_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create TCPL\n");
+ goto error;
+ }
+
+ /* Currently no TCPL routines are defined */
+
+ if (H5Tcommit2(group_id, DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1, type_id1, H5P_DEFAULT, tcpl_id1,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1);
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2, type_id2, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2);
+ goto error;
+ }
+
+ if (H5Pclose(tcpl_id1) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Tget_create_plist)
+ {
+ TESTING_2("H5Tget_create_plist");
+
+ /* Try to receive copies for the two property lists */
+ if ((tcpl_id1 = H5Tget_create_plist(type_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Tget_create_plist);
+ }
+
+ if ((tcpl_id2 = H5Tget_create_plist(type_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Tget_create_plist);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tget_create_plist);
+
+ /* Now close the property lists and datatypes and see if we can still retrieve copies of
+ * the property lists upon opening (instead of creating) a datatype
+ */
+ if (tcpl_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(tcpl_id1);
+ }
+ H5E_END_TRY;
+ tcpl_id1 = H5I_INVALID_HID;
+ }
+ if (tcpl_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(tcpl_id2);
+ }
+ H5E_END_TRY;
+ tcpl_id2 = H5I_INVALID_HID;
+ }
+ if (type_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id1);
+ }
+ H5E_END_TRY;
+ type_id1 = H5I_INVALID_HID;
+ }
+ if (type_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id2);
+ }
+ H5E_END_TRY;
+ type_id2 = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Tget_create_plist_reopened)
+ {
+ TESTING_2("H5Tget_create_plist after re-opening committed datatype");
+
+ if ((type_id1 = H5Topen2(group_id, DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open datatype '%s'\n", DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1);
+ PART_ERROR(H5Tget_create_plist_reopened);
+ }
+
+ if ((type_id2 = H5Topen2(group_id, DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open datatype '%s'\n", DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2);
+ PART_ERROR(H5Tget_create_plist_reopened);
+ }
+
+ if ((tcpl_id1 = H5Tget_create_plist(type_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Tget_create_plist_reopened);
+ }
+
+ if ((tcpl_id2 = H5Tget_create_plist(type_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Tget_create_plist_reopened);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tget_create_plist_reopened);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(tcpl_id1) < 0)
+ TEST_ERROR;
+ if (H5Pclose(tcpl_id2) < 0)
+ TEST_ERROR;
+ if (H5Tclose(type_id1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(type_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(tcpl_id1);
+ H5Pclose(tcpl_id2);
+ H5Tclose(type_id1);
+ H5Tclose(type_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset can be created using
+ * a committed datatype.
+ */
+static int
+test_create_dataset_with_committed_type(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING("dataset creation with a committed datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or stored datatype aren't supported with "
+ "this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME);
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gopen2(container_group, DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = H5Topen2(group_id, DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open committed datatype '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(DATATYPE_CREATE_TEST_DATASET_DIMS, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME, type_id, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s' using committed datatype\n",
+ DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset '%s'\n", DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an attribute can be created
+ * using a committed datatype.
+ */
+static int
+test_create_attribute_with_committed_type(void)
+{
+ htri_t attr_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING("attribute creation with a committed datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or stored datatype aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n",
+ ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME, type_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+
+ if ((type_id = H5Topen2(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open committed datatype '%s'\n",
+ ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if ((space_id =
+ generate_random_dataspace(ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME, type_id, space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s'\n", ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ /* Verify the attribute has been created */
+ if ((attr_exists = H5Aexists(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if attribute '%s' exists\n",
+ ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ if (!attr_exists) {
+ H5_FAILED();
+ HDprintf(" attribute did not exist\n");
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Aopen(group_id, ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open attribute '%s'\n", ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a committed datatype can
+ * be deleted.
+ */
+static int
+test_delete_committed_type(void)
+{
+ htri_t type_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING("committed datatype deletion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, attribute, or stored datatype aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_DELETE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group '%s'\n", DATATYPE_DELETE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, DATATYPE_DELETE_TEST_DTYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", DATATYPE_DELETE_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if ((type_exists = H5Lexists(group_id, DATATYPE_DELETE_TEST_DTYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if datatype '%s' exists\n", DATATYPE_DELETE_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if (!type_exists) {
+ H5_FAILED();
+ HDprintf(" datatype didn't exist\n");
+ goto error;
+ }
+
+ if (H5Ldelete(group_id, DATATYPE_DELETE_TEST_DTYPE_NAME, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete datatype '%s'\n", DATATYPE_DELETE_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if ((type_exists = H5Lexists(group_id, DATATYPE_DELETE_TEST_DTYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if datatype '%s' exists\n", DATATYPE_DELETE_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if (type_exists) {
+ H5_FAILED();
+ HDprintf(" datatype exists\n");
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a committed datatype can still be opened when
+ * the link to the datatype is deleted and then a new one is created.
+ */
+static int
+test_resurrect_datatype(void)
+{
+#ifndef NO_ID_PREVENTS_OBJ_DELETE
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+#endif /* NO_ID_PREVENTS_OBJ_DELETE */
+
+ TESTING("resurrecting datatype after deletion");
+
+#ifndef NO_ID_PREVENTS_OBJ_DELETE
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, hard link, or stored datatype aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATATYPE_RESURRECT_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATATYPE_RESURRECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create a named datatype in the file */
+ if ((type_id = H5Tcopy(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy predefined integer type\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, DATATYPE_RESURRECT_TEST_DTYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit datatype\n");
+ goto error;
+ }
+
+ /* Unlink the datatype while it's open (will mark it for deletion when closed) */
+ if (H5Ldelete(group_id, DATATYPE_RESURRECT_TEST_DTYPE_NAME, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete datatype\n");
+ goto error;
+ }
+#ifndef NO_OBJECT_GET_NAME
+ /* Check that datatype name is NULL */
+ if (H5Iget_name(type_id, NULL, (size_t)0) != 0) {
+ H5_FAILED();
+ HDprintf(" deleted datatype name was not NULL!\n");
+ goto error;
+ }
+#endif
+
+ /* Re-link the datatype to the group hierarchy (shouldn't get deleted now) */
+ if (H5Lcreate_hard(type_id, ".", group_id, DATATYPE_RESURRECT_TEST_DTYPE_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create new link for deleted datatype\n");
+ goto error;
+ }
+
+ /* Close things */
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ /* Re-open the file */
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gopen2(container_group, DATATYPE_RESURRECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n", DATATYPE_RESURRECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Attempt to open the datatype under the new name */
+ if ((type_id = H5Topen2(group_id, DATATYPE_RESURRECT_TEST_DTYPE_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open resurrected datatype\n");
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+#else /* NO_ID_PREVENTS_OBJ_DELETE */
+ SKIPPED();
+#endif /* NO_ID_PREVENTS_OBJ_DELETE */
+
+ return 0;
+
+#ifndef NO_ID_PREVENTS_OBJ_DELETE
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#endif /* NO_ID_PREVENTS_OBJ_DELETE */
+}
+
+static int
+test_flush_committed_datatype(void)
+{
+ TESTING("H5Tflush");
+
+ SKIPPED();
+
+ return 0;
+}
+
+static int
+test_flush_committed_datatype_invalid_params(void)
+{
+ TESTING("H5Tflush with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+static int
+test_refresh_committed_datatype(void)
+{
+ TESTING("H5Trefresh");
+
+ SKIPPED();
+
+ return 0;
+}
+
+static int
+test_refresh_committed_datatype_invalid_params(void)
+{
+ TESTING("H5Trefresh with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that predefined HDF5 datatypes can't be directly committed.
+ * An application should first copy the type with H5Tcopy and then commit the
+ * copied datatype.
+ */
+#ifndef PROBLEMATIC_TESTS
+static int
+test_cant_commit_predefined(void)
+{
+ herr_t err_ret;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+
+ TESTING("inability to commit predefined types directly");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, PREDEFINED_TYPE_COMMIT_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", PREDEFINED_TYPE_COMMIT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tcommit2(group_id, "committed_predefined_type", H5T_NATIVE_INT, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" committed a predefined datatype directly (without copying it)!\n");
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#endif
+
+/*
+ * A test to check that a datatype cannot be modified once it has been committed.
+ */
+static int
+test_cant_modify_committed_type(void)
+{
+ htri_t is_committed = FALSE;
+ herr_t err_ret;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+
+ TESTING("inability to modify a committed datatype");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or stored datatype aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATATYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, MODIFY_COMMITTED_TYPE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", MODIFY_COMMITTED_TYPE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Copy a predefined datatype and commit the copy */
+ if ((type_id = H5Tcopy(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy predefined integer datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, "native_int", type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to commit datatype\n");
+ goto error;
+ }
+
+ if ((is_committed = H5Tcommitted(type_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to determine if datatype is committed\n");
+ goto error;
+ }
+
+ if (!is_committed) {
+ H5_FAILED();
+ HDprintf(" H5Tcommitted() returned false!\n");
+ goto error;
+ }
+
+ /* We should not be able to modify a type after it has been committed. */
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Tset_precision(type_id, (size_t)256);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" modified committed datatype!\n");
+ goto error;
+ }
+
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+int
+H5_api_datatype_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Datatype Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(datatype_tests); i++) {
+ nerrors += (*datatype_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/test/API/H5_api_datatype_test.h b/test/API/H5_api_datatype_test.h
new file mode 100644
index 0000000..753f9b2
--- /dev/null
+++ b/test/API/H5_api_datatype_test.h
@@ -0,0 +1,79 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_DATATYPE_TEST_H
+#define H5_API_DATATYPE_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_datatype_test(void);
+
+/*************************************************
+ * *
+ * API Datatype test defines *
+ * *
+ *************************************************/
+
+#define DATATYPE_CREATE_TEST_DATASET_DIMS 2
+#define DATATYPE_CREATE_TEST_GROUP_NAME "committed_datatype_creation_test"
+#define DATATYPE_CREATE_TEST_TYPE_NAME "test_type"
+
+#define DATATYPE_CREATE_INVALID_PARAMS_TEST_SPACE_RANK 2
+#define DATATYPE_CREATE_INVALID_PARAMS_TEST_GROUP_NAME "committed_datatype_creation_invalid_params_test"
+#define DATATYPE_CREATE_INVALID_PARAMS_TEST_TYPE_NAME "committed_datatype_creation_invalid_params_datatype"
+
+#define DATATYPE_CREATE_ANONYMOUS_GROUP_NAME "anonymous_type_creation_test"
+#define DATATYPE_CREATE_ANONYMOUS_TYPE_NAME "anon_type"
+
+#define DATATYPE_CREATE_ANONYMOUS_INVALID_PARAMS_GROUP_NAME "anonymous_type_creation_invalid_params_test"
+
+#define DATATYPE_CREATE_EMPTY_TYPES_TEST_CMPD_TYPE_NAME "compound_type"
+#define DATATYPE_CREATE_EMPTY_TYPES_TEST_ENUM_TYPE_NAME "enum_type"
+#define DATATYPE_CREATE_EMPTY_TYPES_TEST_GROUP_NAME "committed_datatype_empty_types_test"
+
+#define RECOMMIT_COMMITTED_TYPE_TEST_GROUP_NAME "recommit_committed_type_test"
+
+#define DATATYPE_OPEN_TEST_GROUP_NAME "datatype_open_test"
+#define DATATYPE_OPEN_TEST_TYPE_NAME "open_test_datatype"
+
+#define DATATYPE_OPEN_INVALID_PARAMS_TEST_GROUP_NAME "datatype_open_invalid_params_test"
+#define DATATYPE_OPEN_INVALID_PARAMS_TEST_TYPE_NAME "open_invalid_params_test_datatype"
+
+#define DATATYPE_REOPEN_TEST_SPACE_RANK 2
+#define DATATYPE_REOPEN_TEST_GROUP_NAME "datatype_reopen_test"
+
+#define DATASET_CREATE_WITH_DATATYPE_TEST_DATASET_DIMS 2
+#define DATASET_CREATE_WITH_DATATYPE_TEST_GROUP_NAME "dataset_create_with_committed_type_test"
+#define DATASET_CREATE_WITH_DATATYPE_TEST_TYPE_NAME "committed_type_test_dtype1"
+#define DATASET_CREATE_WITH_DATATYPE_TEST_DSET_NAME "committed_type_test_dset"
+
+#define ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_SPACE_RANK 2
+#define ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_GROUP_NAME "attribute_create_with_committed_type_test"
+#define ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_DTYPE_NAME "committed_type_test_dtype2"
+#define ATTRIBUTE_CREATE_WITH_DATATYPE_TEST_ATTR_NAME "committed_type_test_attr"
+
+#define DATATYPE_DELETE_TEST_GROUP_NAME "datatype_deletion_test"
+#define DATATYPE_DELETE_TEST_DTYPE_NAME "delete_test_dtype"
+
+#define DATATYPE_RESURRECT_TEST_GROUP_NAME "datatype_resurrection_test"
+#define DATATYPE_RESURRECT_TEST_DTYPE_NAME "delete_test_dtype"
+#define DATATYPE_RESURRECT_TEST_DTYPE_NAME2 "resurrected_dtype"
+
+#define DATATYPE_PROPERTY_LIST_TEST_SUBGROUP_NAME "datatype_property_list_test_group"
+#define DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME1 "property_list_test_datatype1"
+#define DATATYPE_PROPERTY_LIST_TEST_DATATYPE_NAME2 "property_list_test_datatype2"
+
+#define PREDEFINED_TYPE_COMMIT_TEST_GROUP_NAME "predefined_type_commit_test"
+
+#define MODIFY_COMMITTED_TYPE_TEST_GROUP_NAME "modify_committed_type_test"
+
+#endif
diff --git a/test/API/H5_api_file_test.c b/test/API/H5_api_file_test.c
new file mode 100644
index 0000000..279e9e7
--- /dev/null
+++ b/test/API/H5_api_file_test.c
@@ -0,0 +1,2564 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_file_test.h"
+
+static int test_create_file(void);
+static int test_create_file_invalid_params(void);
+static int test_create_file_excl(void);
+static int test_open_file(void);
+static int test_open_file_invalid_params(void);
+static int test_open_nonexistent_file(void);
+static int test_file_open_overlap(void);
+static int test_file_permission(void);
+static int test_reopen_file(void);
+static int test_close_file_invalid_id(void);
+static int test_flush_file(void);
+static int test_file_is_accessible(void);
+static int test_file_property_lists(void);
+static int test_get_file_intent(void);
+static int test_get_file_obj_count(void);
+static int test_file_mounts(void);
+static int test_get_file_name(void);
+
+/*
+ * The array of file tests to be performed.
+ */
+static int (*file_tests[])(void) = {
+ test_create_file,
+ test_create_file_invalid_params,
+ test_create_file_excl,
+ test_open_file,
+ test_open_file_invalid_params,
+ test_open_nonexistent_file,
+ test_file_open_overlap,
+ test_file_permission,
+ test_reopen_file,
+ test_close_file_invalid_id,
+ test_flush_file,
+ test_file_is_accessible,
+ test_file_property_lists,
+ test_get_file_intent,
+ test_get_file_obj_count,
+ test_file_mounts,
+ test_get_file_name,
+};
+
+/*
+ * Tests that a file can be created.
+ */
+static int
+test_create_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+
+ TESTING("H5Fcreate");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (prefix_filename(test_path_prefix, FILE_CREATE_TEST_FILENAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * Tests that a file can't be created when H5Fcreate is passed
+ * invalid parameters.
+ */
+static int
+test_create_file_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+
+ TESTING_MULTIPART("H5Fcreate with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (prefix_filename(test_path_prefix, FILE_CREATE_INVALID_PARAMS_FILE_NAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fcreate_invalid_name)
+ {
+ TESTING_2("H5Fcreate with invalid file name");
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(NULL, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was created with a NULL name!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fcreate_invalid_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate("", H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was created with an invalid name of ''!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fcreate_invalid_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fcreate_invalid_name);
+
+ PART_BEGIN(H5Fcreate_invalid_flags)
+ {
+ TESTING_2("H5Fcreate with invalid flags");
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(prefixed_filename, H5F_ACC_RDWR, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was created with invalid flag H5F_ACC_RDWR!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fcreate_invalid_flags);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(prefixed_filename, H5F_ACC_CREAT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was created with invalid flag H5F_ACC_CREAT!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fcreate_invalid_flags);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(prefixed_filename, H5F_ACC_SWMR_READ, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was created with invalid flag H5F_ACC_SWMR_READ!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fcreate_invalid_flags);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fcreate_invalid_flags);
+
+ PART_BEGIN(H5Fcreate_invalid_fcpl)
+ {
+ TESTING_2("H5Fcreate with invalid FCPL");
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was created with invalid FCPL!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fcreate_invalid_fcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fcreate_invalid_fcpl);
+ }
+ END_MULTIPART;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ /* Attempt to remove the file if it ended up being created. */
+ H5Fdelete(prefixed_filename, H5P_DEFAULT);
+
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * Tests that file creation will fail when a file is created
+ * using the H5F_ACC_EXCL flag while the file already exists.
+ */
+static int
+test_create_file_excl(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t file_id2 = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+
+ TESTING("H5Fcreate with H5F_ACC_EXCL/H5F_ACC_TRUNC flag");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (prefix_filename(test_path_prefix, FILE_CREATE_EXCL_FILE_NAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first file\n");
+ goto error;
+ }
+
+ /* Close the file */
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ /* Try again with H5F_ACC_EXCL. This should fail because the file already
+ * exists on disk from the previous steps.
+ */
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(prefixed_filename, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created already existing file using H5F_ACC_EXCL flag!\n");
+ goto error;
+ }
+
+ /* Test creating with H5F_ACC_TRUNC. This will truncate the existing file on disk. */
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't truncate the existing file\n");
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ H5Fclose(file_id2);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * Tests that a file can be opened.
+ */
+static int
+test_open_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Fopen");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fopen_rdonly)
+ {
+ TESTING_2("H5Fopen in read-only mode");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to open file '%s' in read-only mode\n", H5_api_test_filename);
+ PART_ERROR(H5Fopen_rdonly);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fopen_rdonly);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Fopen_rdwrite)
+ {
+ TESTING_2("H5Fopen in read-write mode");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to open file '%s' in read-write mode\n", H5_api_test_filename);
+ PART_ERROR(H5Fopen_rdwrite);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fopen_rdwrite);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * XXX: SWMR open flags
+ */
+ }
+ END_MULTIPART;
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests that a file can't be opened when H5Fopen is given
+ * invalid parameters.
+ */
+static int
+test_open_file_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Fopen with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fopen_invalid_name)
+ {
+ TESTING_2("H5Fopen with invalid file name");
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fopen(NULL, H5F_ACC_RDWR, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was opened with a NULL name!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fopen_invalid_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fopen("", H5F_ACC_RDWR, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was opened with an invalid name of ''!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fopen_invalid_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fopen_invalid_name);
+
+ PART_BEGIN(H5Fopen_invalid_flags)
+ {
+ TESTING_2("H5Fopen with invalid flags");
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fopen(H5_api_test_filename, H5F_ACC_TRUNC, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was opened with invalid flag H5F_ACC_TRUNC!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fopen_invalid_flags);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fopen(H5_api_test_filename, H5F_ACC_EXCL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" file was opened with invalid flag H5F_ACC_EXCL!\n");
+ H5Fclose(file_id);
+ PART_ERROR(H5Fopen_invalid_flags);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fopen_invalid_flags);
+ }
+ END_MULTIPART;
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that opening a file which doesn't exist will fail.
+ */
+static int
+test_open_nonexistent_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+
+ TESTING("for invalid opening of a non-existent file");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (prefix_filename(test_path_prefix, NONEXISTENT_FILENAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ /* XXX: Make sure to first delete the file so we know for sure it doesn't exist */
+
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fopen(prefixed_filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (file_id >= 0) {
+ H5_FAILED();
+ HDprintf(" non-existent file was opened!\n");
+ goto error;
+ }
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * Tests that a file can be opened read-only or read-write
+ * and things are handled appropriately.
+ */
+static int
+test_file_permission(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dspace_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+ herr_t h5_ret = FAIL;
+
+ TESTING_MULTIPART("file permissions (invalid creation of objects in read-only file)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, attribute, stored datatype aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if (prefix_filename(test_path_prefix, FILE_PERMISSION_TEST_FILENAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename);
+ goto error;
+ }
+
+ if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create data space\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(file_id, FILE_PERMISSION_TEST_DSET_NAME, H5T_STD_U32LE, dspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create data set: %s\n", FILE_PERMISSION_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ /* Open the file (with read-only permission) */
+ if ((file_id = H5Fopen(prefixed_filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gcreate_rdonly_file)
+ {
+ TESTING_2("invalid creation of group in read-only file");
+
+ /* Create a group with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ group_id =
+ H5Gcreate2(file_id, FILE_PERMISSION_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" a group was created in a read-only file!\n");
+ PART_ERROR(H5Gcreate_rdonly_file);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gcreate_anon(file_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" a group was created in a read-only file!\n");
+ PART_ERROR(H5Gcreate_rdonly_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_rdonly_file);
+
+ PART_BEGIN(H5Dcreate_rdonly_file)
+ {
+ TESTING_2("invalid creation of dataset in read-only file");
+
+ /* Create a dataset with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate2(file_id, FILE_PERMISSION_TEST_DSET2_NAME, H5T_STD_U32LE, dspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" a dataset was created in a read-only file!\n");
+ PART_ERROR(H5Dcreate_rdonly_file);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate_anon(file_id, H5T_STD_U32LE, dspace_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" a dataset was created in a read-only file!\n");
+ PART_ERROR(H5Dcreate_rdonly_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_rdonly_file);
+
+ PART_BEGIN(H5Acreate_rdonly_file)
+ {
+ TESTING_2("invalid creation of attribute in read-only file");
+
+ /* Create an attribute with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ attr_id = H5Acreate2(file_id, FILE_PERMISSION_TEST_ATTR_NAME, H5T_NATIVE_INT, dspace_id,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (attr_id >= 0) {
+ H5_FAILED();
+ HDprintf(" an attribute was created in a read-only file!\n");
+ PART_ERROR(H5Acreate_rdonly_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Acreate_rdonly_file);
+
+ PART_BEGIN(H5Tcommit_rdonly_file)
+ {
+ TESTING_2("invalid creation of committed datatype in read-only file");
+
+ if ((dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy a native datatype\n");
+ PART_ERROR(H5Tcommit_rdonly_file);
+ }
+
+ /* Commit a datatype with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ h5_ret = H5Tcommit2(file_id, FILE_PERMISSION_TEST_NAMED_DTYPE, dtype_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (h5_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" a named datatype was committed in a read-only file!\n");
+ PART_ERROR(H5Tcommit_rdonly_file);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ h5_ret = H5Tcommit_anon(file_id, dtype_id, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (h5_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" a named datatype was committed in a read-only file!\n");
+ PART_ERROR(H5Tcommit_rdonly_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_rdonly_file);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Tclose(dtype_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(dspace_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dspace_id);
+ H5Dclose(dset_id);
+ H5Aclose(attr_id);
+ H5Tclose(dtype_id);
+ H5Gclose(group_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * A test to check that a file can be re-opened with H5Freopen.
+ */
+static int
+test_reopen_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t file_id2 = H5I_INVALID_HID;
+
+ TESTING("re-open of a file with H5Freopen");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((file_id2 = H5Freopen(file_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file\n");
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id2) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ H5Fclose(file_id2);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Fclose doesn't succeed for an
+ * invalid file ID */
+static int
+test_close_file_invalid_id(void)
+{
+ herr_t err_ret = -1;
+
+ TESTING("H5Fclose with an invalid ID");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Fclose(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" closed an invalid file ID!\n");
+ goto error;
+ }
+
+ PASSED();
+
+ return 0;
+
+error:
+ return 1;
+}
+
+/*
+ * A test to check that a file can be flushed using H5Fflush.
+ */
+static int
+test_flush_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t dspace_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+ char dset_name[32];
+ unsigned u;
+
+ TESTING_MULTIPART("H5Fflush");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, or file flush aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if (prefix_filename(test_path_prefix, FILE_FLUSH_TEST_FILENAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename);
+ goto error;
+ }
+
+ /* Create multiple small datasets in file */
+ if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create data space\n");
+ goto error;
+ }
+
+ for (u = 0; u < 10; u++) {
+ HDsprintf(dset_name, "Dataset %u", u);
+
+ if ((dset_id = H5Dcreate2(file_id, dset_name, H5T_STD_U32LE, dspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create data set: %s\n", dset_name);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fflush_local)
+ {
+ TESTING_2("file flushing at local scope");
+
+ if (H5Fflush(file_id, H5F_SCOPE_LOCAL) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to flush file with scope H5F_SCOPE_LOCAL\n");
+ PART_ERROR(H5Fflush_local);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fflush_local);
+
+ PART_BEGIN(H5Fflush_global)
+ {
+ TESTING_2("file flushing at global scope");
+
+ if (H5Fflush(file_id, H5F_SCOPE_GLOBAL) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to flush file with scope H5F_SCOPE_GLOBAL\n");
+ PART_ERROR(H5Fflush_global);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fflush_global);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dspace_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dspace_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * A test for H5Fis_accessible.
+ */
+static int
+test_file_is_accessible(void)
+{
+ const char *const fake_filename = "nonexistent_file.h5";
+ char *prefixed_filename = NULL;
+ htri_t is_accessible = FAIL;
+
+ TESTING_MULTIPART("H5Fis_accessible");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (prefix_filename(test_path_prefix, fake_filename, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fis_accessible_valid_file)
+ {
+ TESTING_2("H5Fis_accessible on existing file");
+
+ if ((is_accessible = H5Fis_accessible(H5_api_test_filename, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if file '%s' is accessible with default FAPL\n",
+ H5_api_test_filename);
+ PART_ERROR(H5Fis_accessible_valid_file);
+ }
+
+ if (!is_accessible) {
+ H5_FAILED();
+ HDprintf(" file '%s' is not accessible with default FAPL\n", H5_api_test_filename);
+ PART_ERROR(H5Fis_accessible_valid_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fis_accessible_valid_file);
+
+ is_accessible = -1;
+
+ PART_BEGIN(H5Fis_accessible_invalid_file)
+ {
+ TESTING_2("H5Fis_accessible on non-existing file");
+
+ H5E_BEGIN_TRY
+ {
+ is_accessible = H5Fis_accessible(prefixed_filename, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (is_accessible > 0) {
+ H5_FAILED();
+ HDprintf(" non-existent file '%s' was accessible with default FAPL: is_accessible=%d!\n",
+ prefixed_filename, is_accessible);
+ PART_ERROR(H5Fis_accessible_invalid_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fis_accessible_invalid_file);
+ }
+ END_MULTIPART;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ return 0;
+
+error:
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * A test to check that a FCPL used for file creation can
+ * be persisted and that a valid copy of that FCPL can be
+ * retrieved later with a call to H5Fget_create_plist. Also
+ * tests that a valid copy of a FAPL used for file access
+ * can be retrieved with a call to H5Fget_access_plist.
+ */
+static int
+test_file_property_lists(void)
+{
+ hsize_t prop_val = 0;
+ hid_t file_id1 = H5I_INVALID_HID;
+ hid_t file_id2 = H5I_INVALID_HID;
+ hid_t fcpl_id1 = H5I_INVALID_HID;
+ hid_t fcpl_id2 = H5I_INVALID_HID;
+ hid_t fapl_id1 = H5I_INVALID_HID;
+ hid_t fapl_id2 = H5I_INVALID_HID;
+ char *prefixed_filename1 = NULL;
+ char *prefixed_filename2 = NULL;
+
+ TESTING_MULTIPART("file property list operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic or more file or get property list aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if (prefix_filename(test_path_prefix, FILE_PROPERTY_LIST_TEST_FNAME1, &prefixed_filename1) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+ if (prefix_filename(test_path_prefix, FILE_PROPERTY_LIST_TEST_FNAME2, &prefixed_filename2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((fcpl_id1 = H5Pcreate(H5P_FILE_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create FCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_userblock(fcpl_id1, FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set test property on FCPL\n");
+ goto error;
+ }
+
+ if ((file_id1 = H5Fcreate(prefixed_filename1, H5F_ACC_TRUNC, fcpl_id1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file\n");
+ goto error;
+ }
+
+ if ((file_id2 = H5Fcreate(prefixed_filename2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file\n");
+ goto error;
+ }
+
+ if (H5Pclose(fcpl_id1) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fget_create_plist)
+ {
+ TESTING_2("H5Fget_create_plist");
+
+ /* Try to receive copies of the two property lists, one which has the property set and one which
+ * does not */
+ if ((fcpl_id1 = H5Fget_create_plist(file_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get FCPL\n");
+ PART_ERROR(H5Fget_create_plist);
+ }
+
+ if ((fcpl_id2 = H5Fget_create_plist(file_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get FCPL\n");
+ PART_ERROR(H5Fget_create_plist);
+ }
+
+ /* Ensure that property list 1 has the property set and property list 2 does not */
+ if (H5Pget_userblock(fcpl_id1, &prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve test property from FCPL\n");
+ PART_ERROR(H5Fget_create_plist);
+ }
+
+ if (prop_val != FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) {
+ H5_FAILED();
+ HDprintf(" retrieved test property value '%llu' did not match expected value '%llu'\n",
+ (long long unsigned)prop_val,
+ (long long unsigned)FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL);
+ PART_ERROR(H5Fget_create_plist);
+ }
+
+ if (H5Pget_userblock(fcpl_id2, &prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve test property from FCPL\n");
+ PART_ERROR(H5Fget_create_plist);
+ }
+
+ if (prop_val == FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) {
+ HDprintf(" retrieved test property value '%llu' matched control value '%llu' when it "
+ "shouldn't have\n",
+ (long long unsigned)prop_val,
+ (long long unsigned)FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL);
+ PART_ERROR(H5Fget_create_plist);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_create_plist);
+
+ PART_BEGIN(H5Fget_access_plist)
+ {
+ TESTING_2("H5Fget_access_plist");
+
+ /* Due to the nature of needing to supply a FAPL with the VOL connector having been set on it to
+ * the H5Fcreate() call, we cannot exactly test using H5P_DEFAULT as the FAPL for one of the
+ * create calls in this test. However, the use of H5Fget_access_plist() will still be used to
+ * check that the FAPL is correct after both creating and opening a file.
+ */
+ if ((fapl_id1 = H5Fget_access_plist(file_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get FAPL\n");
+ PART_ERROR(H5Fget_access_plist);
+ }
+
+ if ((fapl_id2 = H5Fget_access_plist(file_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get FAPL\n");
+ PART_ERROR(H5Fget_access_plist);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_access_plist);
+
+ /* Now see if we can still retrieve copies of the property lists upon opening
+ * (instead of creating) a file. If they were reconstructed properly upon file
+ * open, the creation property lists should also have the same test values
+ * as set before.
+ */
+ if (fcpl_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fcpl_id1);
+ }
+ H5E_END_TRY;
+ fcpl_id1 = H5I_INVALID_HID;
+ }
+ if (fcpl_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fcpl_id2);
+ }
+ H5E_END_TRY;
+ fcpl_id2 = H5I_INVALID_HID;
+ }
+ if (fapl_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id1);
+ }
+ H5E_END_TRY;
+ fapl_id1 = H5I_INVALID_HID;
+ }
+ if (fapl_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id2);
+ }
+ H5E_END_TRY;
+ fapl_id2 = H5I_INVALID_HID;
+ }
+ if (file_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id1);
+ }
+ H5E_END_TRY;
+ file_id1 = H5I_INVALID_HID;
+ }
+ if (file_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id2);
+ }
+ H5E_END_TRY;
+ file_id2 = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Fget_create_plist_reopened)
+ {
+ TESTING_2("H5Fget_create_plist after re-opening file");
+
+ if ((file_id1 = H5Fopen(prefixed_filename1, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ PART_ERROR(H5Fget_create_plist_reopened);
+ }
+
+ if ((file_id2 = H5Fopen(prefixed_filename2, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ PART_ERROR(H5Fget_create_plist_reopened);
+ }
+
+ if ((fcpl_id1 = H5Fget_create_plist(file_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get FCPL\n");
+ PART_ERROR(H5Fget_create_plist_reopened);
+ }
+
+ if ((fcpl_id2 = H5Fget_create_plist(file_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get FCPL\n");
+ PART_ERROR(H5Fget_create_plist_reopened);
+ }
+
+ /* Check the values of the test property */
+ if (H5Pget_userblock(fcpl_id1, &prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve test property from FCPL\n");
+ PART_ERROR(H5Fget_create_plist_reopened);
+ }
+
+ if (prop_val != FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) {
+ H5_FAILED();
+ HDprintf(" retrieved test property value '%llu' did not match expected value '%llu'\n",
+ (long long unsigned)prop_val,
+ (long long unsigned)FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL);
+ PART_ERROR(H5Fget_create_plist_reopened);
+ }
+
+ if (H5Pget_userblock(fcpl_id2, &prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve test property from FCPL\n");
+ PART_ERROR(H5Fget_create_plist_reopened);
+ }
+
+ if (prop_val == FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL) {
+ HDprintf(" retrieved test property value '%llu' matched control value '%llu' when it "
+ "shouldn't have\n",
+ (long long unsigned)prop_val,
+ (long long unsigned)FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL);
+ PART_ERROR(H5Fget_create_plist_reopened);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_create_plist_reopened);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(fcpl_id1) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fcpl_id2) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id1) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id2) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename1);
+ prefixed_filename1 = NULL;
+ HDfree(prefixed_filename2);
+ prefixed_filename2 = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fcpl_id1);
+ H5Pclose(fcpl_id2);
+ H5Pclose(fapl_id1);
+ H5Pclose(fapl_id2);
+ H5Fclose(file_id1);
+ H5Fclose(file_id2);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename1);
+ HDfree(prefixed_filename2);
+
+ return 1;
+}
+
+/*
+ * A test to check that the file intent flags can be retrieved.
+ */
+static int
+test_get_file_intent(void)
+{
+ unsigned file_intent;
+ hid_t file_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+
+ TESTING_MULTIPART("retrieval of file intent with H5Fget_intent");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic or more file aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if (prefix_filename(test_path_prefix, FILE_INTENT_TEST_FILENAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ /* Test that file intent retrieval works correctly for file create */
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fget_intent_file_creation)
+ {
+ TESTING_2("H5Fget_intent on newly-created file");
+
+ if (H5Fget_intent(file_id, &file_intent) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve file intent\n");
+ PART_ERROR(H5Fget_intent_file_creation);
+ }
+
+ if (H5F_ACC_RDWR != file_intent) {
+ H5_FAILED();
+ HDprintf(" received incorrect file intent for file creation\n");
+ PART_ERROR(H5Fget_intent_file_creation);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_intent_file_creation);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Fget_intent_rdonly_file_open)
+ {
+ TESTING_2("H5Fget_intent for file opened read-only");
+
+ /* Test that file intent retrieval works correctly for file open */
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ PART_ERROR(H5Fget_intent_rdonly_file_open);
+ }
+
+ if (H5Fget_intent(file_id, &file_intent) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve file intent\n");
+ PART_ERROR(H5Fget_intent_rdonly_file_open);
+ }
+
+ if (H5F_ACC_RDONLY != file_intent) {
+ H5_FAILED();
+ HDprintf(" received incorrect file intent for read-only file open\n");
+ PART_ERROR(H5Fget_intent_rdonly_file_open);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_intent_rdonly_file_open);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Fget_intent_rdwrite_file_open)
+ {
+ TESTING_2("H5Fget_intent for file opened read-write");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ PART_ERROR(H5Fget_intent_rdwrite_file_open);
+ }
+
+ if (H5Fget_intent(file_id, &file_intent) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve file intent\n");
+ PART_ERROR(H5Fget_intent_rdwrite_file_open);
+ }
+
+ if (H5F_ACC_RDWR != file_intent) {
+ H5_FAILED();
+ HDprintf(" received incorrect file intent\n");
+ PART_ERROR(H5Fget_intent_rdwrite_file_open);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_intent_rdwrite_file_open);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * A test to check that the number of open objects and IDs of objects in a file
+ * can be retrieved.
+ */
+static int
+test_get_file_obj_count(void)
+{
+ ssize_t obj_count;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t file_id2 = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t object_id = H5I_INVALID_HID;
+ hid_t named_dtype_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t dspace_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ char *prefixed_filename1 = NULL;
+ char *prefixed_filename2 = NULL;
+
+ TESTING_MULTIPART("retrieval of open object number and IDs");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic or more file, basic dataset, group, datatype, or attribute "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if (prefix_filename(test_path_prefix, GET_OBJ_COUNT_TEST_FILENAME1, &prefixed_filename1) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+ if (prefix_filename(test_path_prefix, GET_OBJ_COUNT_TEST_FILENAME2, &prefixed_filename2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(prefixed_filename1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename1);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(file_id, GET_OBJ_COUNT_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", GET_OBJ_COUNT_TEST_GRP_NAME);
+ goto error;
+ }
+
+ /* Create a second file while keeping the first file open */
+ if ((file_id2 = H5Fcreate(prefixed_filename2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename2);
+ goto error;
+ }
+
+ /* Create a named datatype */
+ if ((named_dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy a native datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(file_id2, GET_OBJ_COUNT_TEST_NAMED_DTYPE, named_dtype_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit a named datatype\n");
+ goto error;
+ }
+
+ /* Create a dataspace for the attribute and dataset */
+ if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create data space for attribute\n");
+ goto error;
+ }
+
+ /* Create an attribute for the second file */
+ if ((attr_id = H5Acreate2(file_id2, GET_OBJ_COUNT_TEST_ATTR_NAME, H5T_NATIVE_INT, dspace_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the attribute '%s'\n", GET_OBJ_COUNT_TEST_ATTR_NAME);
+ goto error;
+ }
+
+ /* Create a dataset for the second file */
+ if ((dset_id = H5Dcreate2(file_id2, GET_OBJ_COUNT_TEST_DSET_NAME, H5T_NATIVE_INT, dspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the dataset '%s'\n", GET_OBJ_COUNT_TEST_DSET_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fget_obj_count_files)
+ {
+ TESTING_2("H5Fget_obj_count for files");
+
+ /* Get the number of files currently opened */
+ if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_FILE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the number of open files\n");
+ PART_ERROR(H5Fget_obj_count_files);
+ }
+
+ if (obj_count != 2) {
+ H5_FAILED();
+ HDprintf(" number of open files (%ld) did not match expected number (2)\n", obj_count);
+ PART_ERROR(H5Fget_obj_count_files);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_obj_count_files);
+
+ PART_BEGIN(H5Fget_obj_count_grps_single_file)
+ {
+ TESTING_2("H5Fget_obj_count for groups in single file");
+
+ /* Get the number of groups */
+ if ((obj_count = H5Fget_obj_count(file_id, H5F_OBJ_GROUP)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve number of open groups\n");
+ PART_ERROR(H5Fget_obj_count_grps_single_file);
+ }
+
+ if (obj_count != 1) {
+ H5_FAILED();
+ HDprintf(" number of open groups (%ld) did not match expected number (1)\n", obj_count);
+ PART_ERROR(H5Fget_obj_count_grps_single_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_obj_count_grps_single_file);
+
+ PART_BEGIN(H5Fget_obj_count_grps)
+ {
+ TESTING_2("H5Fget_obj_count for groups");
+
+ /* Get the number of groups in two opened files */
+ if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_GROUP)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the number of open groups\n");
+ PART_ERROR(H5Fget_obj_count_grps);
+ }
+
+ if (obj_count != 1) {
+ H5_FAILED();
+ HDprintf(" number of open groups (%ld) did not match expected number (1)\n", obj_count);
+ PART_ERROR(H5Fget_obj_count_grps);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_obj_count_grps);
+
+ PART_BEGIN(H5Fget_obj_count_types)
+ {
+ TESTING_2("H5Fget_obj_count for datatypes");
+#ifndef WRONG_DATATYPE_OBJ_COUNT
+ /* Get the number of named datatype in two opened files */
+ if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATATYPE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the number of open named datatypes\n");
+ PART_ERROR(H5Fget_obj_count_types);
+ }
+
+ if (obj_count != 1) {
+ H5_FAILED();
+ HDprintf(" number of open named datatypes (%ld) did not match expected number (1)\n",
+ obj_count);
+ PART_ERROR(H5Fget_obj_count_types);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Fget_obj_count_types);
+#endif
+ }
+ PART_END(H5Fget_obj_count_types);
+
+ PART_BEGIN(H5Fget_obj_count_attrs)
+ {
+ TESTING_2("H5Fget_obj_count for attributes");
+
+ /* Get the number of attribute in two opened files */
+ if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ATTR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the number of open attributes\n");
+ PART_ERROR(H5Fget_obj_count_attrs);
+ }
+
+ if (obj_count != 1) {
+ H5_FAILED();
+ HDprintf(" number of open attributes (%ld) did not match expected number (1)\n",
+ obj_count);
+ PART_ERROR(H5Fget_obj_count_attrs);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_obj_count_attrs);
+
+ PART_BEGIN(H5Fget_obj_count_dsets)
+ {
+ TESTING_2("H5Fget_obj_count for datasets");
+
+ /* Get the number of dataset in two opened files */
+ if ((obj_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET)) < 0 || obj_count != 1) {
+ H5_FAILED();
+ HDprintf(" couldn't get the number of open datasets\n");
+ PART_ERROR(H5Fget_obj_count_dsets);
+ }
+
+ if (obj_count != 1) {
+ H5_FAILED();
+ HDprintf(" number of open datasets (%ld) did not match expected number (1)\n", obj_count);
+ PART_ERROR(H5Fget_obj_count_dsets);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_obj_count_dsets);
+
+ PART_BEGIN(H5Fget_obj_count_all_single_file)
+ {
+ TESTING_2("H5Fget_obj_count for all object types in single file");
+
+ /* Get the number of all open objects */
+ if ((obj_count = H5Fget_obj_count(file_id, H5F_OBJ_ALL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve number of open objects\n");
+ PART_ERROR(H5Fget_obj_count_all_single_file);
+ }
+
+ /* One for the file and another for the group */
+ if (obj_count != 2) {
+ H5_FAILED();
+ HDprintf(" number of open objects (%ld) did not match expected number (2)\n", obj_count);
+ PART_ERROR(H5Fget_obj_count_all_single_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_obj_count_all_single_file);
+
+ PART_BEGIN(H5Fget_obj_count_all)
+ {
+ TESTING_2("H5Fget_obj_count for all object types");
+#ifndef WRONG_DATATYPE_OBJ_COUNT
+ /* Get the number of all open objects */
+ if ((obj_count = H5Fget_obj_count(H5F_OBJ_ALL, H5F_OBJ_ALL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve number of open objects\n");
+ PART_ERROR(H5Fget_obj_count_all);
+ }
+
+ if (obj_count != 6) {
+ H5_FAILED();
+ HDprintf(" number of open objects (%ld) did not match expected number (6)\n", obj_count);
+ PART_ERROR(H5Fget_obj_count_all);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Fget_obj_count_all);
+#endif
+ }
+ PART_END(H5Fget_obj_count_all);
+
+ PART_BEGIN(H5Fget_obj_ids_singular_grp)
+ {
+ TESTING_2("H5Fget_obj_ids for a singular group");
+
+ if (H5Fget_obj_ids(file_id, H5F_OBJ_GROUP, (size_t)obj_count, &object_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get opened group IDs\n");
+ PART_ERROR(H5Fget_obj_ids_singular_grp);
+ }
+
+ if (object_id != group_id) {
+ H5_FAILED();
+ HDprintf(" opened object ID (%ld) did not match only currently open group ID (%ld)\n",
+ object_id, group_id);
+ PART_ERROR(H5Fget_obj_ids_singular_grp);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_obj_ids_singular_grp);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(dspace_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(named_dtype_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id2) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename1);
+ prefixed_filename1 = NULL;
+ HDfree(prefixed_filename2);
+ prefixed_filename2 = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Tclose(named_dtype_id);
+ H5Sclose(dspace_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ H5Fclose(file_id2);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename1);
+ HDfree(prefixed_filename2);
+
+ return 1;
+}
+
+/*
+ * A test to check that opening files in an overlapping way
+ * works correctly.
+ */
+static int
+test_file_open_overlap(void)
+{
+#ifndef NO_DOUBLE_OBJECT_OPENS
+ ssize_t obj_count;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t file_id2 = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dspace_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+#endif
+
+ TESTING("overlapping file opens");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic or more file, dataset, or group aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+#ifndef NO_DOUBLE_OBJECT_OPENS
+ if (prefix_filename(test_path_prefix, OVERLAPPING_FILENAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename);
+ goto error;
+ }
+
+ if ((file_id2 = H5Fopen(prefixed_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", prefixed_filename);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(file_id, OVERLAPPING_OPEN_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OVERLAPPING_OPEN_TEST_GRP_NAME);
+ goto error;
+ }
+
+ /* Create a dataspace for the dataset */
+ if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create data space for dataset\n");
+ goto error;
+ }
+
+ /* Create a dataset in the group of the first file */
+ if ((dset_id = H5Dcreate2(group_id, OVERLAPPING_OPEN_TEST_DSET_NAME, H5T_NATIVE_INT, dspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the dataset '%s'\n", OVERLAPPING_OPEN_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /* Get the number of objects opened in the first file: 3 == file + dataset + group */
+ if ((obj_count = H5Fget_obj_count(file_id, H5F_OBJ_LOCAL | H5F_OBJ_ALL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve the number of objects opened in the file\n");
+ goto error;
+ }
+
+ if (obj_count != 3) {
+ H5_FAILED();
+ HDprintf(" number of objects opened in file (%ld) did not match expected number (3)\n", obj_count);
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ /* Create a dataset in the second file */
+ if ((dset_id = H5Dcreate2(file_id2, OVERLAPPING_OPEN_TEST_DSET_NAME, H5T_NATIVE_INT, dspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the dataset '%s'\n", OVERLAPPING_OPEN_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /* Get the number of objects opened in the first file: 2 == file + dataset */
+ if ((obj_count = H5Fget_obj_count(file_id2, H5F_OBJ_ALL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve the number of objects opened in the file\n");
+ goto error;
+ }
+
+ if (obj_count != 2) {
+ H5_FAILED();
+ HDprintf(" number of objects opened in the file (%ld) did not match expected number (2)\n",
+ obj_count);
+ goto error;
+ }
+
+ if (H5Sclose(dspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id2) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Sclose(dspace_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+ H5Fclose(file_id2);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that file mounting and unmounting works
+ * correctly.
+ */
+static int
+test_file_mounts(void)
+{
+#ifndef NO_FILE_MOUNTS
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t child_fid = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+#endif
+
+ TESTING("file mounting/unmounting");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_MOUNT) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, file mount, or basic group aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+#ifndef NO_FILE_MOUNTS
+ if (prefix_filename(test_path_prefix, FILE_MOUNT_TEST_FILENAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(file_id, FILE_MOUNT_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", FILE_MOUNT_TEST_GRP_NAME);
+ goto error;
+ }
+
+ if ((child_fid = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ /* Mount one file (child_fid) to the group of another file (file_id) */
+ if (H5Fmount(file_id, FILE_MOUNT_TEST_GRP_NAME, child_fid, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't mount file\n");
+ goto error;
+ }
+
+ if (H5Funmount(file_id, FILE_MOUNT_TEST_GRP_NAME) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't mount file\n");
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(child_fid) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Fclose(file_id);
+ H5Fclose(child_fid);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to ensure that a file's name can be retrieved.
+ */
+static int
+test_get_file_name(void)
+{
+ ssize_t file_name_buf_len = 0;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dspace_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t named_dtype_id = H5I_INVALID_HID;
+ char *prefixed_filename = NULL;
+ char *file_name_buf = NULL;
+
+ TESTING_MULTIPART("retrieval of file name");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic or more file, basic dataset, group, datatype, or attribute "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if (prefix_filename(test_path_prefix, GET_FILE_NAME_TEST_FNAME, &prefixed_filename) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't prefix filename\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(prefixed_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", prefixed_filename);
+ goto error;
+ }
+
+ /* Retrieve the size of the file name */
+ if ((file_name_buf_len = H5Fget_name(file_id, NULL, 0)) < 0)
+ TEST_ERROR;
+
+ /* Allocate buffer for file name */
+ if (NULL == (file_name_buf = (char *)HDmalloc((size_t)file_name_buf_len + 1)))
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fget_name_file_id)
+ {
+ TESTING_2("H5Fget_name using file ID");
+
+ memset(file_name_buf, 0, (size_t)file_name_buf_len);
+
+ /* Retrieve the actual file name */
+ if (H5Fget_name(file_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get file name %s\n", prefixed_filename);
+ PART_ERROR(H5Fget_name_file_id);
+ }
+
+ if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) {
+ H5_FAILED();
+ HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf,
+ prefixed_filename);
+ PART_ERROR(H5Fget_name_file_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_name_file_id);
+
+ PART_BEGIN(H5Fget_name_grp_id)
+ {
+ TESTING_2("H5Fget_name using non-root group ID");
+
+ /* Attempt to retrieve the name of the file from an object that isn't the root group */
+ memset(file_name_buf, 0, (size_t)file_name_buf_len);
+
+ if ((group_id = H5Gcreate2(file_id, GET_FILE_NAME_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create group '%s'\n", GET_FILE_NAME_TEST_GRP_NAME);
+ PART_ERROR(H5Fget_name_grp_id);
+ }
+
+ if (H5Fget_name(group_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get file name %s\n", prefixed_filename);
+ PART_ERROR(H5Fget_name_grp_id);
+ }
+
+ if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) {
+ H5_FAILED();
+ HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf,
+ prefixed_filename);
+ PART_ERROR(H5Fget_name_grp_id);
+ }
+
+ if (group_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ }
+ H5E_END_TRY;
+ group_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_name_grp_id);
+
+ PART_BEGIN(H5Fget_name_dset_id)
+ {
+ TESTING_2("H5Fget_name using dataset ID");
+
+ if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataspace\n");
+ PART_ERROR(H5Fget_name_dset_id);
+ }
+
+ /* Create a dataset in the file */
+ if ((dset_id = H5Dcreate2(file_id, GET_FILE_NAME_TEST_DSET_NAME, H5T_NATIVE_INT, dspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the dataset '%s'\n", GET_FILE_NAME_TEST_DSET_NAME);
+ PART_ERROR(H5Fget_name_dset_id);
+ }
+
+ /* Get and verify file name from the dataset */
+ if (H5Fget_name(dset_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get file name %s\n", prefixed_filename);
+ PART_ERROR(H5Fget_name_dset_id);
+ }
+
+ if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) {
+ H5_FAILED();
+ HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf,
+ prefixed_filename);
+ PART_ERROR(H5Fget_name_dset_id);
+ }
+
+ if (dspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dspace_id);
+ }
+ H5E_END_TRY;
+ dspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_name_dset_id);
+
+ PART_BEGIN(H5Fget_name_attr_id)
+ {
+ TESTING_2("H5Fget_name using attribute ID");
+
+ if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataspace\n");
+ PART_ERROR(H5Fget_name_attr_id);
+ }
+
+ /* Create an attribute for the dataset */
+ if ((attr_id = H5Acreate2(file_id, GET_FILE_NAME_TEST_ATTR_NAME, H5T_NATIVE_INT, dspace_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create the attribute '%s'\n", GET_FILE_NAME_TEST_ATTR_NAME);
+ PART_ERROR(H5Fget_name_attr_id);
+ }
+
+ /* Get and verify file name from the attribute */
+ if (H5Fget_name(attr_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get file name %s\n", prefixed_filename);
+ PART_ERROR(H5Fget_name_attr_id);
+ }
+
+ if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) {
+ H5_FAILED();
+ HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf,
+ prefixed_filename);
+ PART_ERROR(H5Fget_name_attr_id);
+ }
+
+ if (dspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dspace_id);
+ }
+ H5E_END_TRY;
+ dspace_id = H5I_INVALID_HID;
+ }
+ if (attr_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Aclose(attr_id);
+ }
+ H5E_END_TRY;
+ attr_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_name_attr_id);
+
+ PART_BEGIN(H5Fget_name_dtype_id)
+ {
+ TESTING_2("H5Fget_name using committed datatype ID");
+
+ /* Create a named datatype */
+ if ((named_dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy a native datatype\n");
+ PART_ERROR(H5Fget_name_dtype_id);
+ }
+
+ if (H5Tcommit2(file_id, GET_FILE_NAME_TEST_NAMED_DTYPE, named_dtype_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit a named datatype\n");
+ PART_ERROR(H5Fget_name_dtype_id);
+ }
+
+ /* Get and verify file name from the committed datatype */
+ if (H5Fget_name(named_dtype_id, file_name_buf, (size_t)file_name_buf_len + 1) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get file name %s\n", prefixed_filename);
+ PART_ERROR(H5Fget_name_dtype_id);
+ }
+
+ if (HDstrncmp(file_name_buf, prefixed_filename, (size_t)file_name_buf_len)) {
+ H5_FAILED();
+ HDprintf(" file name '%s' didn't match expected name '%s'\n", file_name_buf,
+ prefixed_filename);
+ PART_ERROR(H5Fget_name_dtype_id);
+ }
+
+ if (named_dtype_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(named_dtype_id);
+ }
+ H5E_END_TRY;
+ named_dtype_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_name_dtype_id);
+
+ PART_BEGIN(H5Fget_name_dspace_id)
+ {
+ ssize_t name_len = 0;
+
+ TESTING_2("invalid H5Fget_name using dataspace ID");
+
+ if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataspace\n");
+ PART_ERROR(H5Fget_name_dspace_id);
+ }
+
+ /* Try get file name from data space. Supposed to fail because
+ * it's illegal operation. */
+ H5E_BEGIN_TRY
+ {
+ name_len = H5Fget_name(dspace_id, file_name_buf, (size_t)file_name_buf_len + 1);
+ }
+ H5E_END_TRY;
+
+ if (name_len >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved file name using H5Fget_name on a dataspace ID!\n");
+ PART_ERROR(H5Fget_name_dspace_id);
+ }
+
+ if (dspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dspace_id);
+ }
+ H5E_END_TRY;
+ dspace_id = H5I_INVALID_HID;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fget_name_dspace_id);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (file_name_buf) {
+ HDfree(file_name_buf);
+ file_name_buf = NULL;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ HDfree(prefixed_filename);
+ prefixed_filename = NULL;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (file_name_buf)
+ HDfree(file_name_buf);
+ H5Tclose(named_dtype_id);
+ H5Sclose(dspace_id);
+ H5Dclose(dset_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ HDfree(prefixed_filename);
+
+ return 1;
+}
+
+/*
+ * Cleanup temporary test files
+ */
+static void
+cleanup_files(void)
+{
+ remove_test_file(test_path_prefix, FILE_CREATE_TEST_FILENAME);
+ remove_test_file(test_path_prefix, FILE_CREATE_EXCL_FILE_NAME);
+
+ /* The below file should not get created */
+ /* remove_test_file(test_path_prefix, FILE_CREATE_INVALID_PARAMS_FILE_NAME); */
+
+#ifndef NO_DOUBLE_OBJECT_OPENS
+ remove_test_file(test_path_prefix, OVERLAPPING_FILENAME);
+#endif
+ remove_test_file(test_path_prefix, FILE_PERMISSION_TEST_FILENAME);
+ remove_test_file(test_path_prefix, FILE_FLUSH_TEST_FILENAME);
+ remove_test_file(test_path_prefix, FILE_PROPERTY_LIST_TEST_FNAME1);
+ remove_test_file(test_path_prefix, FILE_PROPERTY_LIST_TEST_FNAME2);
+ remove_test_file(test_path_prefix, FILE_INTENT_TEST_FILENAME);
+ remove_test_file(test_path_prefix, GET_OBJ_COUNT_TEST_FILENAME1);
+ remove_test_file(test_path_prefix, GET_OBJ_COUNT_TEST_FILENAME2);
+#ifndef NO_FILE_MOUNTS
+ remove_test_file(test_path_prefix, FILE_MOUNT_TEST_FILENAME);
+#endif
+ remove_test_file(test_path_prefix, GET_FILE_NAME_TEST_FNAME);
+}
+
+int
+H5_api_file_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API File Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(file_tests); i++) {
+ nerrors += (*file_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ HDprintf("Cleaning up testing files\n");
+ cleanup_files();
+
+ return nerrors;
+}
diff --git a/test/API/H5_api_file_test.h b/test/API/H5_api_file_test.h
new file mode 100644
index 0000000..948cb6a
--- /dev/null
+++ b/test/API/H5_api_file_test.h
@@ -0,0 +1,85 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_FILE_TEST_H
+#define H5_API_FILE_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_file_test(void);
+
+/*********************************************
+ * *
+ * API File test defines *
+ * *
+ *********************************************/
+
+#define FILE_CREATE_TEST_FILENAME "test_file.h5"
+
+#define FILE_CREATE_INVALID_PARAMS_FILE_NAME "invalid_params_file.h5"
+
+#define FILE_CREATE_EXCL_FILE_NAME "excl_flag_file.h5"
+
+#define NONEXISTENT_FILENAME "nonexistent_file.h5"
+
+#define OVERLAPPING_FILENAME "overlapping_file.h5"
+#define OVERLAPPING_OPEN_TEST_GRP_NAME "group"
+#define OVERLAPPING_OPEN_TEST_DSET_NAME "dataset"
+
+#define FILE_PERMISSION_TEST_FILENAME "file_permission.h5"
+#define FILE_PERMISSION_TEST_GRP_NAME "group"
+#define FILE_PERMISSION_TEST_DSET_NAME "Dataset"
+#define FILE_PERMISSION_TEST_DSET2_NAME "Dataset2"
+#define FILE_PERMISSION_TEST_ATTR_NAME "attribute"
+#define FILE_PERMISSION_TEST_NAMED_DTYPE "named_dtype"
+
+#define FILE_FLUSH_TEST_FILENAME "flush_file.h5"
+
+#define FILE_PROPERTY_LIST_TEST_FCPL_PROP_VAL 65536
+#define FILE_PROPERTY_LIST_TEST_FNAME1 "property_list_test_file1.h5"
+#define FILE_PROPERTY_LIST_TEST_FNAME2 "property_list_test_file2.h5"
+
+#define FILE_INTENT_TEST_FILENAME "intent_test_file.h5"
+
+#define GET_OBJ_COUNT_TEST_FILENAME1 "file_obj_count1.h5"
+#define GET_OBJ_COUNT_TEST_FILENAME2 "file_obj_count2.h5"
+#define GET_OBJ_COUNT_TEST_GRP_NAME "/group"
+#define GET_OBJ_COUNT_TEST_DSET_NAME "Dataset"
+#define GET_OBJ_COUNT_TEST_ATTR_NAME "Attribute"
+#define GET_OBJ_COUNT_TEST_NAMED_DTYPE "named_dtype"
+
+#define FILE_MOUNT_TEST_FILENAME "file_mount.h5"
+#define FILE_MOUNT_TEST_GRP_NAME "group"
+
+#define GET_FILE_NAME_TEST_FNAME "file_name_retrieval.h5"
+#define GET_FILE_NAME_TEST_GRP_NAME "group"
+#define GET_FILE_NAME_TEST_DSET_NAME "dataset"
+#define GET_FILE_NAME_TEST_ATTR_NAME "attribute"
+#define GET_FILE_NAME_TEST_NAMED_DTYPE "datatype"
+
+#define FILESPACE_INFO_FILENAME "filespace_info.h5"
+#define FSP_SIZE512 (hsize_t)512
+
+#define FILE_GET_ID_TEST_FILENAME "test_file_id.h5"
+
+#define FILE_CLOSE_DEGREE_FILENAME "test_close_degree.h5"
+
+#define GET_FREE_SECTIONS_FILENAME "test_free_sections.h5"
+
+#define FILE_SIZE_FILENAME "file_size.h5"
+#define KB 1024U
+
+#define FILE_INFO_FILENAME "file_info.h5"
+
+#define DOUBLE_GROUP_OPEN_FILENAME "double_group_open.h5"
+
+#endif
diff --git a/test/API/H5_api_group_test.c b/test/API/H5_api_group_test.c
new file mode 100644
index 0000000..f652202
--- /dev/null
+++ b/test/API/H5_api_group_test.c
@@ -0,0 +1,2394 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_group_test.h"
+
+static int test_create_group_under_root(void);
+static int test_create_group_under_existing_group(void);
+static int test_create_many_groups(void);
+static int test_create_deep_groups(void);
+static int test_create_intermediate_group(void);
+static int test_create_group_invalid_params(void);
+static int test_create_anonymous_group(void);
+static int test_create_anonymous_group_invalid_params(void);
+static int test_open_nonexistent_group(void);
+static int test_open_group_invalid_params(void);
+static int test_close_group_invalid_id(void);
+static int test_group_property_lists(void);
+static int test_get_group_info(void);
+static int test_get_group_info_invalid_params(void);
+static int test_flush_group(void);
+static int test_flush_group_invalid_params(void);
+static int test_refresh_group(void);
+static int test_refresh_group_invalid_params(void);
+static int create_group_recursive(hid_t parent_gid, unsigned counter);
+
+/*
+ * The array of group tests to be performed.
+ */
+static int (*group_tests[])(void) = {
+ test_create_group_under_root,
+ test_create_group_under_existing_group,
+ test_create_many_groups,
+ test_create_deep_groups,
+ test_create_intermediate_group,
+ test_create_group_invalid_params,
+ test_create_anonymous_group,
+ test_create_anonymous_group_invalid_params,
+ test_open_nonexistent_group,
+ test_open_group_invalid_params,
+ test_close_group_invalid_id,
+ test_group_property_lists,
+ test_get_group_info,
+ test_get_group_info_invalid_params,
+ test_flush_group,
+ test_flush_group_invalid_params,
+ test_refresh_group,
+ test_refresh_group_invalid_params,
+};
+
+/*
+ * A test to check that a group can be created under the root group.
+ */
+static int
+test_create_group_under_root(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t parent_gid = H5I_INVALID_HID;
+
+ TESTING("creation of group under the root group");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ /* Create the group under the root group of the file */
+ if ((parent_gid =
+ H5Gcreate2(file_id, GROUP_CREATE_UNDER_ROOT_GNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", GROUP_CREATE_UNDER_ROOT_GNAME);
+ goto error;
+ }
+
+ if (H5Gclose(parent_gid) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(parent_gid);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a group can be created under an existing
+ * group which is not the root group.
+ */
+static int
+test_create_group_under_existing_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID, child_group_id = H5I_INVALID_HID,
+ grandchild_group_id = H5I_INVALID_HID;
+
+ TESTING("creation of group under existing group using a relative path");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ /* Open the already-existing group (/group_tests) in the file as the parent */
+ if ((parent_group_id = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group\n");
+ goto error;
+ }
+
+ /* Create a new group (/group_tests/child_group) under the already-existing parent Group using a relative
+ * path */
+ if ((child_group_id = H5Gcreate2(parent_group_id, GROUP_CREATE_UNDER_GROUP_REL_GNAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group using relative path: %s\n", GROUP_CREATE_UNDER_GROUP_REL_GNAME);
+ goto error;
+ }
+
+ /* Create a new group (child_group/grandchild_group) under the already-existing parent Group using an
+ * absolute path */
+ if ((grandchild_group_id = H5Gcreate2(parent_group_id, GROUP_CREATE_UNDER_GROUP_ABS_GNAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group using absolute path: %s\n", GROUP_CREATE_UNDER_GROUP_ABS_GNAME);
+ goto error;
+ }
+
+ if (H5Gclose(grandchild_group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(child_group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(parent_group_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(grandchild_group_id);
+ H5Gclose(child_group_id);
+ H5Gclose(parent_group_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to create many (one million) groups
+ */
+static int
+test_create_many_groups(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID, child_group_id = H5I_INVALID_HID;
+ char group_name[NAME_BUF_SIZE];
+ unsigned i;
+
+ TESTING("H5Gcreate many groups");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((parent_group_id = H5Gcreate2(container_group, MANY_GROUP_CREATIONS_GNAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MANY_GROUP_CREATIONS_GNAME);
+ goto error;
+ }
+
+ /* Create multiple groups under the parent group */
+ HDprintf("\n");
+ for (i = 0; i < GROUP_NUMB_MANY; i++) {
+ HDprintf("\r %u/%u", i + 1, GROUP_NUMB_MANY);
+ sprintf(group_name, "group %02u", i);
+ if ((child_group_id =
+ H5Gcreate2(parent_group_id, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", group_name);
+ goto error;
+ }
+
+ if (H5Gclose(child_group_id) < 0)
+ TEST_ERROR;
+ }
+
+ if (H5Gclose(parent_group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(child_group_id);
+ H5Gclose(parent_group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to create groups of the depth GROUP_DEPTH.
+ */
+static int
+test_create_deep_groups(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+
+ TESTING("H5Gcreate groups of great depths");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ /* Create the group under the root group of the file */
+ if ((group_id = H5Gcreate2(container_group, DEEP_GROUP_CREATIONS_GNAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", DEEP_GROUP_CREATIONS_GNAME);
+ goto error;
+ }
+
+ HDprintf("\n");
+ if (create_group_recursive(group_id, 1) < 0)
+ TEST_ERROR;
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Recursive function to create groups of the depth GROUP_DEPTH.
+ */
+static int
+create_group_recursive(hid_t parent_gid, unsigned counter)
+{
+ hid_t child_gid = H5I_INVALID_HID;
+ char gname[NAME_BUF_SIZE];
+
+ HDprintf("\r %u/%u", counter, GROUP_DEPTH);
+ if (counter == 1)
+ sprintf(gname, "2nd_child_group");
+ else if (counter == 2)
+ sprintf(gname, "3rd_child_group");
+ else
+ sprintf(gname, "%dth_child_group", counter + 1);
+ if ((child_gid = H5Gcreate2(parent_gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", gname);
+ goto error;
+ }
+
+ if (counter < GROUP_DEPTH) {
+ if (create_group_recursive(child_gid, counter + 1) < 0)
+ TEST_ERROR;
+ }
+
+ if (H5Gclose(child_gid) < 0)
+ TEST_ERROR;
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(child_gid);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to create groups automatically using H5Pset_create_intermediate_group
+ */
+static int
+test_create_intermediate_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t crt_intmd_lcpl_id = H5I_INVALID_HID;
+
+ TESTING("H5Gcreate group with intermediate group creation");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ /* Set up plist for creating intermediate groups */
+ if ((crt_intmd_lcpl_id = H5Pcreate(H5P_LINK_CREATE)) < 0)
+ TEST_ERROR;
+ if (H5Pset_create_intermediate_group(crt_intmd_lcpl_id, TRUE) < 0)
+ TEST_ERROR;
+
+ /* Create an intermediate group using a relative path */
+ if ((group_id = H5Gcreate2(container_group,
+ GROUP_CREATE_INTMD_REL_INTMD_NAME "/" GROUP_CREATE_INTMD_REL_END_NAME,
+ crt_intmd_lcpl_id, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+
+ /* Verify both groups were created */
+ if ((group_id =
+ H5Gopen2(file_id, GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_REL_INTMD_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+ if ((group_id = H5Gopen2(file_id,
+ GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_REL_INTMD_NAME
+ "/" GROUP_CREATE_INTMD_REL_END_NAME,
+ H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+
+ /* Create an intermediate group using an absolute path */
+ if ((group_id = H5Gcreate2(container_group,
+ "/" GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_ABS_INTMD_NAME
+ "/" GROUP_CREATE_INTMD_ABS_END_NAME,
+ crt_intmd_lcpl_id, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+
+ /* Verify both groups were created */
+ if ((group_id =
+ H5Gopen2(file_id, GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_ABS_INTMD_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+ if ((group_id = H5Gopen2(file_id,
+ GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_ABS_INTMD_NAME
+ "/" GROUP_CREATE_INTMD_ABS_END_NAME,
+ H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+
+ /* Create two intermediate groups using an absolute path */
+ if ((group_id = H5Gcreate2(container_group,
+ "/" GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_MULT_INTMD1_NAME
+ "/" GROUP_CREATE_INTMD_MULT_INTMD2_NAME "/" GROUP_CREATE_INTMD_MULT_END_NAME,
+ crt_intmd_lcpl_id, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+
+ /* Verify all three groups were created */
+ if ((group_id = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_MULT_INTMD1_NAME,
+ H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+ if ((group_id = H5Gopen2(file_id,
+ GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_MULT_INTMD1_NAME
+ "/" GROUP_CREATE_INTMD_MULT_INTMD2_NAME,
+ H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+ if ((group_id = H5Gopen2(file_id,
+ GROUP_TEST_GROUP_NAME "/" GROUP_CREATE_INTMD_MULT_INTMD1_NAME
+ "/" GROUP_CREATE_INTMD_MULT_INTMD2_NAME
+ "/" GROUP_CREATE_INTMD_MULT_END_NAME,
+ H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ group_id = H5I_INVALID_HID;
+
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(crt_intmd_lcpl_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ H5Pclose(crt_intmd_lcpl_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a group can't be created when H5Gcreate
+ * is passed invalid parameters.
+ */
+static int
+test_create_group_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Gcreate with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gcreate_invalid_loc_id)
+ {
+ TESTING_2("H5Gcreate with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gcreate2(H5I_INVALID_HID, GROUP_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created group with invalid loc_id!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gcreate_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_invalid_loc_id);
+
+ PART_BEGIN(H5Gcreate_invalid_grp_name)
+ {
+ TESTING_2("H5Gcreate with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gcreate2(file_id, NULL, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created group with a NULL name!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gcreate_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gcreate2(file_id, "", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created group with an invalid group name of ''!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gcreate_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_invalid_grp_name);
+
+ PART_BEGIN(H5Gcreate_invalid_lcpl)
+ {
+ TESTING_2("H5Gcreate with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gcreate2(file_id, GROUP_CREATE_INVALID_PARAMS_GROUP_NAME, H5I_INVALID_HID,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created group with invalid LCPL!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gcreate_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_invalid_lcpl);
+
+ PART_BEGIN(H5Gcreate_invalid_gcpl)
+ {
+ TESTING_2("H5Gcreate with an invalid GCPL");
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gcreate2(file_id, GROUP_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT,
+ H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created group with invalid GCPL!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gcreate_invalid_gcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_invalid_gcpl);
+
+ PART_BEGIN(H5Gcreate_invalid_gapl)
+ {
+ TESTING_2("H5Gcreate with an invalid GAPL");
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gcreate2(file_id, GROUP_CREATE_INVALID_PARAMS_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created group with invalid GAPL!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gcreate_invalid_gapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_invalid_gapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an anonymous group can be created with
+ * H5Gcreate_anon.
+ */
+static int
+test_create_anonymous_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, new_group_id = H5I_INVALID_HID;
+
+ TESTING("creation of anonymous group");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group\n");
+ goto error;
+ }
+
+ if ((new_group_id = H5Gcreate_anon(file_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create anonymous group\n");
+ goto error;
+ }
+
+ if (H5Gclose(new_group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(new_group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an anonymous group can't be created
+ * when H5Gcreate_anon is passed invalid parameters.
+ */
+static int
+test_create_anonymous_group_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, new_group_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Gcreate_anon with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gcreate_anon_invalid_loc_id)
+ {
+ TESTING_2("H5Gcreate_anon with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ new_group_id = H5Gcreate_anon(H5I_INVALID_HID, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (new_group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created anonymous group with invalid loc_id!\n");
+ H5Gclose(new_group_id);
+ PART_ERROR(H5Gcreate_anon_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_anon_invalid_loc_id);
+
+ PART_BEGIN(H5Gcreate_anon_invalid_gcpl)
+ {
+ TESTING_2("H5Gcreate_anon with an invalid GCPL");
+
+ H5E_BEGIN_TRY
+ {
+ new_group_id = H5Gcreate_anon(container_group, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (new_group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created anonymous group with invalid GCPL!\n");
+ H5Gclose(new_group_id);
+ PART_ERROR(H5Gcreate_anon_invalid_gcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_anon_invalid_gcpl);
+
+ PART_BEGIN(H5Gcreate_anon_invalid_gapl)
+ {
+ TESTING_2("H5Gcreate_anon with an invalid GAPL");
+
+ H5E_BEGIN_TRY
+ {
+ new_group_id = H5Gcreate_anon(container_group, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (new_group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" created anonymous group with invalid GAPL!\n");
+ H5Gclose(new_group_id);
+ PART_ERROR(H5Gcreate_anon_invalid_gapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_anon_invalid_gapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(new_group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a group which doesn't exist cannot
+ * be opened.
+ */
+static int
+test_open_nonexistent_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+
+ TESTING("for invalid opening of a nonexistent group");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gopen2(file_id, OPEN_NONEXISTENT_GROUP_TEST_GNAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened non-existent group!\n");
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a group can't be opened when H5Gopen
+ * is passed invalid parameters.
+ */
+static int
+test_open_group_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Gopen with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gopen_invalid_loc_id)
+ {
+ TESTING_2("H5Gopen with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gopen2(H5I_INVALID_HID, GROUP_TEST_GROUP_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened group using an invalid loc_id!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gopen_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gopen_invalid_loc_id);
+
+ PART_BEGIN(H5Gopen_invalid_grp_name)
+ {
+ TESTING_2("H5Gopen with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gopen2(file_id, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened group using a NULL name!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gopen_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gopen2(file_id, "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened group using an invalid name of ''!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gopen_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gopen_invalid_grp_name);
+
+ PART_BEGIN(H5Gopen_invalid_gapl)
+ {
+ TESTING_2("H5Gopen with an invalid GAPL");
+
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened group using an invalid GAPL!\n");
+ H5Gclose(group_id);
+ PART_ERROR(H5Gopen_invalid_gapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gopen_invalid_gapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Gclose doesn't succeed for an
+ * invalid group ID.
+ */
+static int
+test_close_group_invalid_id(void)
+{
+ herr_t err_ret = -1;
+
+ TESTING("H5Gclose with an invalid group ID");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic group aren't supported with this connector\n");
+ return 0;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gclose(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" close a group with an invalid ID!\n");
+ goto error;
+ }
+
+ PASSED();
+
+ return 0;
+
+error:
+ return 1;
+}
+
+/*
+ * A test to check that a GCPL used for group creation can
+ * be persisted and that a valid copy of that GCPL can be
+ * retrieved later with a call to H5Gget_create_plist.
+ */
+static int
+test_group_property_lists(void)
+{
+ unsigned dummy_prop_val = GROUP_PROPERTY_LIST_TEST_DUMMY_VAL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id1 = H5I_INVALID_HID, group_id2 = H5I_INVALID_HID;
+ hid_t gcpl_id1 = H5I_INVALID_HID, gcpl_id2 = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("group property list operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, property list, creation order aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((gcpl_id1 = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id1, dummy_prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set property on GCPL\n");
+ goto error;
+ }
+
+ /* Create the group in the file */
+ if ((group_id1 = H5Gcreate2(container_group, GROUP_PROPERTY_LIST_TEST_GROUP_NAME1, H5P_DEFAULT, gcpl_id1,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group\n");
+ goto error;
+ }
+
+ /* Create the second group using H5P_DEFAULT for the GCPL */
+ if ((group_id2 = H5Gcreate2(container_group, GROUP_PROPERTY_LIST_TEST_GROUP_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group\n");
+ goto error;
+ }
+
+ if (H5Pclose(gcpl_id1) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gget_create_plist)
+ {
+ TESTING_2("H5Gget_create_plist");
+
+ /* Try to retrieve copies of the two property lists, one which has the property set and one which
+ * does not */
+ if ((gcpl_id1 = H5Gget_create_plist(group_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get GCPL\n");
+ PART_ERROR(H5Gget_create_plist);
+ }
+
+ if ((gcpl_id2 = H5Gget_create_plist(group_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get GCPL\n");
+ PART_ERROR(H5Gget_create_plist);
+ }
+
+ /* Ensure that property list 1 has the property set and property list 2 does not */
+ dummy_prop_val = 0;
+
+ if (H5Pget_link_creation_order(gcpl_id1, &dummy_prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve GCPL property value\n");
+ PART_ERROR(H5Gget_create_plist);
+ }
+
+ if (dummy_prop_val != GROUP_PROPERTY_LIST_TEST_DUMMY_VAL) {
+ H5_FAILED();
+ HDprintf(" retrieved GCPL property value '%llu' did not match expected value '%llu'\n",
+ (unsigned long long)dummy_prop_val,
+ (unsigned long long)GROUP_PROPERTY_LIST_TEST_DUMMY_VAL);
+ PART_ERROR(H5Gget_create_plist);
+ }
+
+ dummy_prop_val = 0;
+
+ if (H5Pget_link_creation_order(gcpl_id2, &dummy_prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve GCPL property value\n");
+ PART_ERROR(H5Gget_create_plist);
+ }
+
+ if (dummy_prop_val == GROUP_PROPERTY_LIST_TEST_DUMMY_VAL) {
+ H5_FAILED();
+ HDprintf(" retrieved GCPL property value '%llu' matched control value '%llu' when it "
+ "shouldn't have\n",
+ (unsigned long long)dummy_prop_val,
+ (unsigned long long)GROUP_PROPERTY_LIST_TEST_DUMMY_VAL);
+ PART_ERROR(H5Gget_create_plist);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_create_plist);
+
+ /* Now see if we can still retrieve copies of the property lists upon opening
+ * (instead of creating) a group. If they were reconstructed properly upon file
+ * open, the creation property lists should also have the same test values
+ * as set before.
+ */
+ if (gcpl_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id1);
+ }
+ H5E_END_TRY;
+ gcpl_id1 = H5I_INVALID_HID;
+ }
+ if (gcpl_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id2);
+ }
+ H5E_END_TRY;
+ gcpl_id2 = H5I_INVALID_HID;
+ }
+ if (group_id1 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id1);
+ }
+ H5E_END_TRY;
+ group_id1 = H5I_INVALID_HID;
+ }
+ if (group_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ }
+ H5E_END_TRY;
+ group_id2 = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Gget_create_plist_reopened)
+ {
+ TESTING_2("H5Gget_create_plist after re-opening a group");
+
+ if ((group_id1 = H5Gopen2(container_group, GROUP_PROPERTY_LIST_TEST_GROUP_NAME1, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group\n");
+ PART_ERROR(H5Gget_create_plist_reopened);
+ }
+
+ if ((group_id2 = H5Gopen2(container_group, GROUP_PROPERTY_LIST_TEST_GROUP_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group\n");
+ PART_ERROR(H5Gget_create_plist_reopened);
+ }
+
+ if ((gcpl_id1 = H5Gget_create_plist(group_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Gget_create_plist_reopened);
+ }
+
+ if ((gcpl_id2 = H5Gget_create_plist(group_id2)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get property list\n");
+ PART_ERROR(H5Gget_create_plist_reopened);
+ }
+
+ /* Re-check the property values */
+ dummy_prop_val = 0;
+
+ if (H5Pget_link_creation_order(gcpl_id1, &dummy_prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve GCPL property value\n");
+ PART_ERROR(H5Gget_create_plist_reopened);
+ }
+
+ if (dummy_prop_val != GROUP_PROPERTY_LIST_TEST_DUMMY_VAL) {
+ H5_FAILED();
+ HDprintf(" retrieved GCPL property value '%llu' did not match expected value '%llu'\n",
+ (unsigned long long)dummy_prop_val,
+ (unsigned long long)GROUP_PROPERTY_LIST_TEST_DUMMY_VAL);
+ PART_ERROR(H5Gget_create_plist_reopened);
+ }
+
+ dummy_prop_val = 0;
+
+ if (H5Pget_link_creation_order(gcpl_id2, &dummy_prop_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve GCPL property value\n");
+ PART_ERROR(H5Gget_create_plist_reopened);
+ }
+
+ if (dummy_prop_val == GROUP_PROPERTY_LIST_TEST_DUMMY_VAL) {
+ H5_FAILED();
+ HDprintf(" retrieved GCPL property value '%llu' matched control value '%llu' when it "
+ "shouldn't have\n",
+ (unsigned long long)dummy_prop_val,
+ (unsigned long long)GROUP_PROPERTY_LIST_TEST_DUMMY_VAL);
+ PART_ERROR(H5Gget_create_plist_reopened);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_create_plist_reopened);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id1) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id1) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id1);
+ H5Pclose(gcpl_id2);
+ H5Gclose(group_id1);
+ H5Gclose(group_id2);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for the functionality of H5Gget_info(_by_idx).
+ */
+static int
+test_get_group_info(void)
+{
+ H5G_info_t group_info;
+ unsigned i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char group_name[NAME_BUF_SIZE];
+
+ TESTING_MULTIPART("retrieval of group info");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, creation order aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((parent_group_id = H5Gcreate2(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", GROUP_GET_INFO_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create multiple groups under the parent group */
+ for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) {
+ /* Create the groups with a reverse-ordering naming scheme to test creation order */
+ HDsnprintf(group_name, NAME_BUF_SIZE, "group %02u",
+ (unsigned)(GROUP_GET_INFO_TEST_GROUP_NUMB - i - 1));
+
+ if ((group_id = H5Gcreate2(parent_group_id, group_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", group_name);
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gget_info)
+ {
+ TESTING_2("retrieval of group info with H5Gget_info");
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /* Retrieve information about the parent group */
+ if (H5Gget_info(parent_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get group info\n");
+ PART_ERROR(H5Gget_info);
+ }
+
+ if (group_info.nlinks != GROUP_GET_INFO_TEST_GROUP_NUMB) {
+ H5_FAILED();
+ HDprintf(" group's number of links '%lu' doesn't match expected value '%u'\n",
+ group_info.nlinks, (unsigned int)GROUP_GET_INFO_TEST_GROUP_NUMB);
+ PART_ERROR(H5Gget_info);
+ }
+
+ /*
+ * For the purpose of this test, the max creation order should match
+ * the number of links in the group.
+ */
+ if (group_info.max_corder != GROUP_GET_INFO_TEST_GROUP_NUMB) {
+ H5_FAILED();
+ HDprintf(" group's max creation order '%lld' doesn't match expected value '%lld'\n",
+ (long long)group_info.max_corder, (long long)GROUP_GET_INFO_TEST_GROUP_NUMB);
+ PART_ERROR(H5Gget_info);
+ }
+
+ /* Ensure that the storage_type field is at least set to a meaningful value */
+ if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_COMPACT &&
+ group_info.storage_type != H5G_STORAGE_TYPE_DENSE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) {
+ H5_FAILED();
+ HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Gget_info);
+ }
+
+ /* Assume that mounted should be FALSE in this case */
+ if (group_info.mounted != FALSE) {
+ H5_FAILED();
+ HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n");
+ PART_ERROR(H5Gget_info);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info);
+
+ PART_BEGIN(H5Gget_info_by_name)
+ {
+ TESTING_2("retrieval of group info with H5Gget_info_by_name");
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /* Retrieve information about the parent group */
+ if (H5Gget_info_by_name(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, &group_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get group info by name\n");
+ PART_ERROR(H5Gget_info_by_name);
+ }
+
+ if (group_info.nlinks != GROUP_GET_INFO_TEST_GROUP_NUMB) {
+ H5_FAILED();
+ HDprintf(" group's number of links '%lu' doesn't match expected value '%u'\n",
+ group_info.nlinks, (unsigned int)GROUP_GET_INFO_TEST_GROUP_NUMB);
+ PART_ERROR(H5Gget_info_by_name);
+ }
+
+ /*
+ * For the purpose of this test, the max creation order should match
+ * the number of links in the group.
+ */
+ if (group_info.max_corder != GROUP_GET_INFO_TEST_GROUP_NUMB) {
+ H5_FAILED();
+ HDprintf(" group's max creation order '%lld' doesn't match expected value '%lld'\n",
+ (long long)group_info.max_corder, (long long)GROUP_GET_INFO_TEST_GROUP_NUMB);
+ PART_ERROR(H5Gget_info_by_name);
+ }
+
+ /* Ensure that the storage_type field is at least set to a meaningful value */
+ if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_COMPACT &&
+ group_info.storage_type != H5G_STORAGE_TYPE_DENSE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) {
+ H5_FAILED();
+ HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Gget_info_by_name);
+ }
+
+ /* Assume that mounted should be FALSE in this case */
+ if (group_info.mounted != FALSE) {
+ H5_FAILED();
+ HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n");
+ PART_ERROR(H5Gget_info_by_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_name);
+
+ PART_BEGIN(H5Gget_info_by_idx_crt_order_increasing)
+ {
+ TESTING_2("H5Gget_info_by_idx by creation order in increasing order");
+
+ for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) {
+ memset(&group_info, 0, sizeof(group_info));
+
+ /* Retrieve information about each group under the parent group */
+ if (H5Gget_info_by_idx(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, (hsize_t)i, &group_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get group info for group at index %u\n", i);
+ PART_ERROR(H5Gget_info_by_idx_crt_order_increasing);
+ }
+
+ if (group_info.nlinks != 0) {
+ H5_FAILED();
+ HDprintf(" group's number of links '%lu' doesn't match expected value '%d'\n",
+ group_info.nlinks, 0);
+ PART_ERROR(H5Gget_info_by_idx_crt_order_increasing);
+ }
+
+ if (group_info.max_corder != 0) {
+ H5_FAILED();
+ HDprintf(" group's max creation order '%lld' doesn't match expected value '%d'\n",
+ (long long)group_info.max_corder, 0);
+ PART_ERROR(H5Gget_info_by_idx_crt_order_increasing);
+ }
+
+ /* Ensure that the storage_type field is at least set to a meaningful value */
+ if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_COMPACT &&
+ group_info.storage_type != H5G_STORAGE_TYPE_DENSE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) {
+ H5_FAILED();
+ HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Gget_info_by_idx_crt_order_increasing);
+ }
+
+ /* Assume that mounted should be FALSE in this case */
+ if (group_info.mounted != FALSE) {
+ H5_FAILED();
+ HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n");
+ PART_ERROR(H5Gget_info_by_idx_crt_order_increasing);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_crt_order_increasing);
+
+ PART_BEGIN(H5Gget_info_by_idx_crt_order_decreasing)
+ {
+ TESTING_2("H5Gget_info_by_idx by creation order in decreasing order");
+
+ for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) {
+ memset(&group_info, 0, sizeof(group_info));
+
+ /* Retrieve information about each group under the parent group */
+ if (H5Gget_info_by_idx(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, (hsize_t)i, &group_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get group info for group at index %u\n", i);
+ PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing);
+ }
+
+ if (group_info.nlinks != 0) {
+ H5_FAILED();
+ HDprintf(" group's number of links '%lu' doesn't match expected value '%d'\n",
+ group_info.nlinks, 0);
+ PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing);
+ }
+
+ if (group_info.max_corder != 0) {
+ H5_FAILED();
+ HDprintf(" group's max creation order '%lld' doesn't match expected value '%d'\n",
+ (long long)group_info.max_corder, 0);
+ PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing);
+ }
+
+ /* Ensure that the storage_type field is at least set to a meaningful value */
+ if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_COMPACT &&
+ group_info.storage_type != H5G_STORAGE_TYPE_DENSE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) {
+ H5_FAILED();
+ HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing);
+ }
+
+ /* Assume that mounted should be FALSE in this case */
+ if (group_info.mounted != FALSE) {
+ H5_FAILED();
+ HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n");
+ PART_ERROR(H5Gget_info_by_idx_crt_order_decreasing);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_crt_order_decreasing);
+
+ PART_BEGIN(H5Gget_info_by_idx_name_order_increasing)
+ {
+ TESTING_2("H5Gget_info_by_idx by alphabetical order in increasing order");
+
+ for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) {
+ memset(&group_info, 0, sizeof(group_info));
+
+ /* Retrieve information about each group under the parent group */
+ if (H5Gget_info_by_idx(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, (hsize_t)i, &group_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get group info for group at index %u\n", i);
+ PART_ERROR(H5Gget_info_by_idx_name_order_increasing);
+ }
+
+ if (group_info.nlinks != 0) {
+ H5_FAILED();
+ HDprintf(" group's number of links '%lu' doesn't match expected value '%d'\n",
+ group_info.nlinks, 0);
+ PART_ERROR(H5Gget_info_by_idx_name_order_increasing);
+ }
+
+ if (group_info.max_corder != 0) {
+ H5_FAILED();
+ HDprintf(" group's max creation order '%lld' doesn't match expected value '%d'\n",
+ (long long)group_info.max_corder, 0);
+ PART_ERROR(H5Gget_info_by_idx_name_order_increasing);
+ }
+
+ /* Ensure that the storage_type field is at least set to a meaningful value */
+ if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_COMPACT &&
+ group_info.storage_type != H5G_STORAGE_TYPE_DENSE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) {
+ H5_FAILED();
+ HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Gget_info_by_idx_name_order_increasing);
+ }
+
+ /* Assume that mounted should be FALSE in this case */
+ if (group_info.mounted != FALSE) {
+ H5_FAILED();
+ HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n");
+ PART_ERROR(H5Gget_info_by_idx_name_order_increasing);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_name_order_increasing);
+
+ PART_BEGIN(H5Gget_info_by_idx_name_order_decreasing)
+ {
+ TESTING_2("H5Gget_info_by_idx by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ for (i = 0; i < GROUP_GET_INFO_TEST_GROUP_NUMB; i++) {
+ memset(&group_info, 0, sizeof(group_info));
+
+ /* Retrieve information about each group under the parent group */
+ if (H5Gget_info_by_idx(container_group, GROUP_GET_INFO_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, (hsize_t)i, &group_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get group info for group at index %u\n", i);
+ PART_ERROR(H5Gget_info_by_idx_name_order_decreasing);
+ }
+
+ if (group_info.nlinks != 0) {
+ H5_FAILED();
+ HDprintf(" group's number of links '%lld' doesn't match expected value '%lld'\n",
+ group_info.nlinks, 0);
+ PART_ERROR(H5Gget_info_by_idx_name_order_decreasing);
+ }
+
+ if (group_info.max_corder != 0) {
+ H5_FAILED();
+ HDprintf(" group's max creation order '%lld' doesn't match expected value '%lld'\n",
+ (long long)group_info.max_corder, 0);
+ PART_ERROR(H5Gget_info_by_idx_name_order_decreasing);
+ }
+
+ /* Ensure that the storage_type field is at least set to a meaningful value */
+ if (group_info.storage_type != H5G_STORAGE_TYPE_SYMBOL_TABLE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_COMPACT &&
+ group_info.storage_type != H5G_STORAGE_TYPE_DENSE &&
+ group_info.storage_type != H5G_STORAGE_TYPE_UNKNOWN) {
+ H5_FAILED();
+ HDprintf(" group info's 'storage_type' field wasn't set to a meaningful value\n");
+ PART_ERROR(H5Gget_info_by_idx_name_order_decreasing);
+ }
+
+ /* Assume that mounted should be FALSE in this case */
+ if (group_info.mounted != FALSE) {
+ H5_FAILED();
+ HDprintf(" group info's 'mounted' field was TRUE when it should have been FALSE\n");
+ PART_ERROR(H5Gget_info_by_idx_name_order_decreasing);
+ }
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Gget_info_by_idx_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Gget_info_by_idx_name_order_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(parent_group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(parent_group_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a group's info can't be retrieved when
+ * H5Gget_info(_by_name/_by_idx) is passed invalid parameters.
+ */
+static int
+test_get_group_info_invalid_params(void)
+{
+ H5G_info_t group_info;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("retrieval of group info with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, more group, creation order aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gget_info_invalid_loc_id)
+ {
+ TESTING_2("H5Gget_info with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info(H5I_INVALID_HID, &group_info);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info with an invalid loc_id!\n");
+ PART_ERROR(H5Gget_info_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_invalid_loc_id);
+
+ PART_BEGIN(H5Gget_info_invalid_grp_info_pointer)
+ {
+ TESTING_2("H5Gget_info with an invalid group info pointer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info(file_id, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info with invalid group info pointer!\n");
+ PART_ERROR(H5Gget_info_invalid_grp_info_pointer);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_invalid_grp_info_pointer);
+
+ PART_BEGIN(H5Gget_info_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Gget_info_by_name with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_name(H5I_INVALID_HID, ".", &group_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_name with an invalid loc_id!\n");
+ PART_ERROR(H5Gget_info_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Gget_info_by_name_invalid_grp_name)
+ {
+ TESTING_2("H5Gget_info_by_name with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_name(file_id, NULL, &group_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_name with a NULL name!\n");
+ PART_ERROR(H5Gget_info_by_name_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_name(file_id, "", &group_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " retrieved info of group using H5Gget_info_by_name with an invalid name of ''!\n");
+ PART_ERROR(H5Gget_info_by_name_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_name_invalid_grp_name);
+
+ PART_BEGIN(H5Gget_info_by_name_invalid_grp_info_pointer)
+ {
+ TESTING_2("H5Gget_info_by_name with an invalid group info pointer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_name(file_id, ".", NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_name with an invalid group info "
+ "pointer!\n");
+ PART_ERROR(H5Gget_info_by_name_invalid_grp_info_pointer);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_name_invalid_grp_info_pointer);
+
+ PART_BEGIN(H5Gget_info_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Gget_info_by_name with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_name(file_id, ".", &group_info, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_name with an invalid LAPL!\n");
+ PART_ERROR(H5Gget_info_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_name_invalid_lapl);
+
+ PART_BEGIN(H5Gget_info_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Gget_info_by_idx with an invalid loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &group_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with an invalid loc_id!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Gget_info_by_idx_invalid_grp_name)
+ {
+ TESTING_2("H5Gget_info_by_idx with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_idx(file_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, &group_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with a NULL group name!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Gget_info_by_idx(file_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, &group_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with an invalid group name of "
+ "''!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_invalid_grp_name);
+
+ PART_BEGIN(H5Gget_info_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Gget_info_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_idx(file_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, &group_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with invalid index type "
+ "H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Gget_info_by_idx(file_id, ".", H5_INDEX_N, H5_ITER_INC, 0, &group_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with invalid index type "
+ "H5_INDEX_N!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Gget_info_by_idx_invalid_iter_order)
+ {
+ TESTING_2("H5Gget_info_by_idx with an invalid iteration order");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_idx(file_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, &group_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with invalid iteration order "
+ "H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Gget_info_by_idx(file_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, &group_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with invalid iteration order "
+ "H5_ITER_N!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_invalid_iter_order);
+
+ PART_BEGIN(H5Gget_info_by_idx_invalid_grp_info_pointer)
+ {
+ TESTING_2("H5Gget_info_by_idx with an invalid group info pointer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_idx(file_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with an invalid group info "
+ "pointer!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_grp_info_pointer);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_invalid_grp_info_pointer);
+
+ PART_BEGIN(H5Gget_info_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Gget_info_by_idx with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Gget_info_by_idx(file_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &group_info,
+ H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" retrieved info of group using H5Gget_info_by_idx with an invalid LAPL!\n");
+ PART_ERROR(H5Gget_info_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gget_info_by_idx_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Gflush.
+ */
+static int
+test_flush_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+
+ TESTING("H5Gflush");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, more group, creation order aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, GROUP_FLUSH_GNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", GROUP_FLUSH_GNAME);
+ goto error;
+ }
+
+ /* Flush the group */
+ if (H5Gflush(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't flush the group '%s'\n", GROUP_FLUSH_GNAME);
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Gflush fails when it
+ * is passed invalid parameters.
+ */
+static int
+test_flush_group_invalid_params(void)
+{
+ herr_t status;
+
+ TESTING("H5Gflush with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(" API functions for group flush aren't supported with this connector\n");
+ return 0;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Gflush(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" flushed group with invalid ID!\n");
+ goto error;
+ }
+
+ PASSED();
+
+ return 0;
+
+error:
+ return 1;
+}
+
+/*
+ * A test for H5Grefresh.
+ */
+static int
+test_refresh_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+
+ TESTING("H5Grefresh");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or refresh aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, GROUP_REFRESH_GNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", GROUP_REFRESH_GNAME);
+ goto error;
+ }
+
+ /* Refresh the group */
+ if (H5Grefresh(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't refresh the group '%s'\n", GROUP_REFRESH_GNAME);
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Grefresh fails when it
+ * is passed invalid parameters.
+ */
+static int
+test_refresh_group_invalid_params(void)
+{
+ herr_t status;
+
+ TESTING("H5Grefresh with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ SKIPPED();
+ HDprintf(" API functions for group refresh aren't supported with this connector\n");
+ return 0;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Grefresh(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" refreshed group with invalid ID!\n");
+ goto error;
+ }
+
+ PASSED();
+
+ return 0;
+
+error:
+ return 1;
+}
+
+int
+H5_api_group_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Group Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(group_tests); i++) {
+ nerrors += (*group_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/test/API/H5_api_group_test.h b/test/API/H5_api_group_test.h
new file mode 100644
index 0000000..baf14c8
--- /dev/null
+++ b/test/API/H5_api_group_test.h
@@ -0,0 +1,65 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_GROUP_TEST_H
+#define H5_API_GROUP_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_group_test(void);
+
+/**********************************************
+ * *
+ * API Group test defines *
+ * *
+ **********************************************/
+
+#define GROUP_CREATE_UNDER_ROOT_GNAME "/group_under_root"
+
+#define GROUP_CREATE_UNDER_GROUP_REL_GNAME "child_group"
+#define GROUP_CREATE_UNDER_GROUP_ABS_GNAME "child_group/grandchild_group"
+
+#define GROUP_CREATE_INVALID_PARAMS_GROUP_NAME "/invalid_params_group"
+
+#define GROUP_CREATE_ANONYMOUS_GROUP_NAME "anon_group"
+
+#define GROUP_CREATE_INTMD_REL_INTMD_NAME "rel_intmd"
+#define GROUP_CREATE_INTMD_REL_END_NAME "rel_end"
+#define GROUP_CREATE_INTMD_ABS_INTMD_NAME "abs_intmd"
+#define GROUP_CREATE_INTMD_ABS_END_NAME "abs_end"
+#define GROUP_CREATE_INTMD_MULT_INTMD1_NAME "mult_intmd1"
+#define GROUP_CREATE_INTMD_MULT_INTMD2_NAME "mult_intmd2"
+#define GROUP_CREATE_INTMD_MULT_END_NAME "mult_end"
+
+#define OPEN_NONEXISTENT_GROUP_TEST_GNAME "/nonexistent_group"
+
+#define GROUP_PROPERTY_LIST_TEST_GROUP_NAME1 "property_list_test_group1"
+#define GROUP_PROPERTY_LIST_TEST_GROUP_NAME2 "property_list_test_group2"
+#define GROUP_PROPERTY_LIST_TEST_DUMMY_VAL H5P_CRT_ORDER_TRACKED
+
+#define GROUP_GET_INFO_TEST_GROUP_NAME "group_info_test"
+#define GROUP_GET_INFO_TEST_GROUP_NUMB 16
+
+#define GROUP_FLUSH_GNAME "group_flush_test"
+
+#define GROUP_REFRESH_GNAME "group_refresh_test"
+
+#define NAME_BUF_SIZE 64
+#define GROUP_NUMB 16
+
+#define MANY_GROUP_CREATIONS_GNAME "home_for_many_groups"
+#define GROUP_NUMB_MANY 100u
+
+#define DEEP_GROUP_CREATIONS_GNAME "home_for_deep_groups"
+#define GROUP_DEPTH 100u
+
+#endif
diff --git a/test/API/H5_api_link_test.c b/test/API/H5_api_link_test.c
new file mode 100644
index 0000000..9a8c65a
--- /dev/null
+++ b/test/API/H5_api_link_test.c
@@ -0,0 +1,27072 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_link_test.h"
+
+/*
+ * TODO: add link tests for short-circuit success in operator callback
+ */
+
+static int test_create_hard_link(void);
+static int test_create_hard_link_long_name(void);
+static int test_create_hard_link_many(void);
+static int test_create_hard_link_same_loc(void);
+static int test_create_hard_link_invalid_params(void);
+static int test_create_soft_link_existing_relative(void);
+static int test_create_soft_link_existing_absolute(void);
+static int test_create_soft_link_dangling_relative(void);
+static int test_create_soft_link_dangling_absolute(void);
+static int test_create_soft_link_long_name(void);
+static int test_create_soft_link_many(void);
+static int test_create_soft_link_invalid_params(void);
+static int test_create_external_link(void);
+static int test_create_external_link_dangling(void);
+static int test_create_external_link_multi(void);
+static int test_create_external_link_ping_pong(void);
+static int test_create_external_link_invalid_params(void);
+static int test_create_user_defined_link(void);
+static int test_create_user_defined_link_invalid_params(void);
+static int test_delete_link(void);
+static int test_delete_link_reset_grp_max_crt_order(void);
+static int test_delete_link_invalid_params(void);
+static int test_copy_link(void);
+static int test_copy_links_into_group_with_links(void);
+static int test_copy_link_across_files(void);
+static int test_copy_link_invalid_params(void);
+static int test_move_link(void);
+static int test_move_links_into_group_with_links(void);
+static int test_move_link_across_files(void);
+static int test_move_link_reset_grp_max_crt_order(void);
+static int test_move_link_invalid_params(void);
+static int test_get_link_val(void);
+static int test_get_link_val_invalid_params(void);
+static int test_get_link_info(void);
+static int test_get_link_info_invalid_params(void);
+static int test_get_link_name(void);
+static int test_get_link_name_invalid_params(void);
+static int test_link_iterate_hard_links(void);
+static int test_link_iterate_soft_links(void);
+static int test_link_iterate_external_links(void);
+static int test_link_iterate_ud_links(void);
+static int test_link_iterate_mixed_links(void);
+static int test_link_iterate_invalid_params(void);
+static int test_link_iterate_0_links(void);
+static int test_link_visit_hard_links_no_cycles(void);
+static int test_link_visit_soft_links_no_cycles(void);
+static int test_link_visit_external_links_no_cycles(void);
+static int test_link_visit_ud_links_no_cycles(void);
+static int test_link_visit_mixed_links_no_cycles(void);
+static int test_link_visit_hard_links_cycles(void);
+static int test_link_visit_soft_links_cycles(void);
+static int test_link_visit_external_links_cycles(void);
+static int test_link_visit_ud_links_cycles(void);
+static int test_link_visit_mixed_links_cycles(void);
+static int test_link_visit_invalid_params(void);
+static int test_link_visit_0_links(void);
+
+static herr_t link_iter_hard_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+static herr_t link_iter_soft_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#ifndef NO_EXTERNAL_LINKS
+static herr_t link_iter_external_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+#ifndef NO_USER_DEFINED_LINKS
+static herr_t link_iter_ud_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data);
+#endif
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+static herr_t link_iter_mixed_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+static herr_t link_iter_invalid_params_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+static herr_t link_iter_0_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data);
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+static herr_t link_iter_idx_saving_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+
+static herr_t link_visit_hard_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+static herr_t link_visit_soft_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#ifndef NO_EXTERNAL_LINKS
+static herr_t link_visit_external_links_no_cycles_cb(hid_t group_id, const char *name,
+ const H5L_info2_t *info, void *op_data);
+#endif
+#ifndef NO_USER_DEFINED_LINKS
+static herr_t link_visit_ud_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+static herr_t link_visit_mixed_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+static herr_t link_visit_hard_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+static herr_t link_visit_soft_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#ifndef NO_EXTERNAL_LINKS
+static herr_t link_visit_external_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+#ifndef NO_USER_DEFINED_LINKS
+static herr_t link_visit_ud_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+static herr_t link_visit_mixed_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+static herr_t link_visit_invalid_params_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+static herr_t link_visit_0_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data);
+
+/*
+ * The array of link tests to be performed.
+ */
+static int (*link_tests[])(void) = {
+ test_create_hard_link,
+ test_create_hard_link_long_name,
+ test_create_hard_link_many,
+ test_create_hard_link_same_loc,
+ test_create_hard_link_invalid_params,
+ test_create_soft_link_existing_relative,
+ test_create_soft_link_existing_absolute,
+ test_create_soft_link_dangling_relative,
+ test_create_soft_link_dangling_absolute,
+ test_create_soft_link_long_name,
+ test_create_soft_link_many,
+ test_create_soft_link_invalid_params,
+ test_create_external_link,
+ test_create_external_link_dangling,
+ test_create_external_link_multi,
+ test_create_external_link_ping_pong,
+ test_create_external_link_invalid_params,
+ test_create_user_defined_link,
+ test_create_user_defined_link_invalid_params,
+ test_delete_link,
+ test_delete_link_reset_grp_max_crt_order,
+ test_delete_link_invalid_params,
+ test_copy_link,
+ test_copy_links_into_group_with_links,
+ test_copy_link_across_files,
+ test_copy_link_invalid_params,
+ test_move_link,
+ test_move_links_into_group_with_links,
+ test_move_link_across_files,
+ test_move_link_reset_grp_max_crt_order,
+ test_move_link_invalid_params,
+ test_get_link_val,
+ test_get_link_val_invalid_params,
+ test_get_link_info,
+ test_get_link_info_invalid_params,
+ test_get_link_name,
+ test_get_link_name_invalid_params,
+ test_link_iterate_hard_links,
+ test_link_iterate_soft_links,
+ test_link_iterate_external_links,
+ test_link_iterate_ud_links,
+ test_link_iterate_mixed_links,
+ test_link_iterate_invalid_params,
+ test_link_iterate_0_links,
+ test_link_visit_hard_links_no_cycles,
+ test_link_visit_soft_links_no_cycles,
+ test_link_visit_external_links_no_cycles,
+ test_link_visit_ud_links_no_cycles,
+ test_link_visit_mixed_links_no_cycles,
+ test_link_visit_hard_links_cycles,
+ test_link_visit_soft_links_cycles,
+ test_link_visit_external_links_cycles,
+ test_link_visit_ud_links_cycles,
+ test_link_visit_mixed_links_cycles,
+ test_link_visit_invalid_params,
+ test_link_visit_0_links,
+};
+
+/*
+ * A test to check that a hard link can be created
+ * using H5Lcreate_hard.
+ */
+static int
+test_create_hard_link(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+
+ TESTING("hard link creation");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or hard link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, HARD_LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(file_id, "/", group_id, HARD_LINK_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", HARD_LINK_TEST_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, HARD_LINK_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", HARD_LINK_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a hard link with a long name can be created
+ * using H5Lcreate_hard.
+ */
+static int
+test_create_hard_link_long_name(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ char vol_name[5];
+ size_t name_len = MAX_NAME_LEN;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ char *objname = NULL; /* Name of object [Long] */
+ size_t u; /* Local index variable */
+
+ TESTING("hard link creation with a long name");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or hard link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, HARD_LINK_TEST_GROUP_LONG_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5VLget_connector_name(file_id, vol_name, 5) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get VOL connector name\n");
+ goto error;
+ }
+
+ /** for DAOS VOL, max link name supported is 99 (Lexical key) */
+ if (strcmp(vol_name, "daos") == 0)
+ name_len = 99;
+
+ /* Construct very long file name */
+ if ((objname = (char *)HDmalloc((size_t)(name_len + 1))) == NULL)
+ TEST_ERROR;
+
+ for (u = 0; u < name_len; u++)
+ objname[u] = 'a';
+ objname[name_len] = '\0';
+
+ if (H5Lcreate_hard(file_id, "/", group_id, objname, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link with a long name\n");
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, objname, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if the link with a long name exists\n");
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ /* Release memory */
+ if (objname)
+ HDfree(objname);
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ if (objname)
+ HDfree(objname);
+
+ return 1;
+}
+
+/*
+ * A test to check that many hard links can be created
+ * using H5Lcreate_hard.
+ */
+static int
+test_create_hard_link_many(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID, group_id2 = H5I_INVALID_HID;
+#ifndef NO_OBJECT_GET_NAME
+ char objname[HARD_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE]; /* Object name */
+#endif
+
+ TESTING("hard link creation of many links");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file or group, or hard link aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, HARD_LINK_TEST_GROUP_MANY_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id2 = H5Gcreate2(group_id, HARD_LINK_TEST_GROUP_MANY_FINAL_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_TEST_GROUP_MANY_FINAL_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(group_id, HARD_LINK_TEST_GROUP_MANY_FINAL_NAME, group_id, "hard1", H5P_DEFAULT,
+ H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard1", group_id, "hard2", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard2", group_id, "hard3", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard3", group_id, "hard4", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard4", group_id, "hard5", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard5", group_id, "hard6", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard6", group_id, "hard7", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard7", group_id, "hard8", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard8", group_id, "hard9", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard9", group_id, "hard10", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard10", group_id, "hard11", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard11", group_id, "hard12", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard12", group_id, "hard13", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard13", group_id, "hard14", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard14", group_id, "hard15", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard15", group_id, "hard16", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard16", group_id, "hard17", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard17", group_id, "hard18", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard18", group_id, "hard19", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard19", group_id, "hard20", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_hard(group_id, "hard20", group_id, "hard21", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, "hard21", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link 'hard21' exists\n");
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 'hard21' did not exist\n");
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ /* Reopen the file and group and verify the hard link */
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gopen2(container_group, HARD_LINK_TEST_GROUP_MANY_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n", HARD_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Open the object through last hard link */
+ if ((group_id2 = H5Gopen2(group_id, "hard21", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open the group '%s' with the last hard link 'hard21'\n",
+ HARD_LINK_TEST_GROUP_MANY_FINAL_NAME);
+ goto error;
+ }
+#ifndef NO_OBJECT_GET_NAME
+ /* Check name */
+ if (H5Iget_name(group_id2, objname, (size_t)HARD_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the name of the object '%s'\n", HARD_LINK_TEST_GROUP_MANY_FINAL_NAME);
+ goto error;
+ }
+
+ if (HDstrcmp(objname, "/" LINK_TEST_GROUP_NAME "/" HARD_LINK_TEST_GROUP_MANY_NAME "/hard21")) {
+ H5_FAILED();
+ HDprintf(" wrong name of the object '%s'\n", objname);
+ goto error;
+ }
+#endif
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(group_id2);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that behavior is correct when using
+ * the H5L_SAME_LOC macro for H5Lcreate_hard().
+ */
+static int
+test_create_hard_link_same_loc(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("hard link creation with H5L_SAME_LOC");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or hard link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, H5L_SAME_LOC_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", H5L_SAME_LOC_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5L_SAME_LOC_first_param)
+ {
+ TESTING_2("usage of H5L_SAME_LOC for first parameter of H5Lcreate_hard");
+
+ /* Library functionality for this part of the test is broken */
+ if (H5Lcreate_hard(H5L_SAME_LOC, ".", group_id, H5L_SAME_LOC_TEST_LINK_NAME1, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first link '%s'\n", H5L_SAME_LOC_TEST_LINK_NAME1);
+ PART_ERROR(H5L_SAME_LOC_first_param);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, H5L_SAME_LOC_TEST_LINK_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5L_SAME_LOC_first_param);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ PART_ERROR(H5L_SAME_LOC_first_param);
+ }
+
+ PASSED();
+ }
+ PART_END(H5L_SAME_LOC_first_param);
+
+ PART_BEGIN(H5L_SAME_LOC_third_param)
+ {
+ TESTING_2("usage of H5L_SAME_LOC for third parameter of H5Lcreate_hard");
+
+ if (H5Lcreate_hard(group_id, ".", H5L_SAME_LOC, H5L_SAME_LOC_TEST_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second link '%s'\n", H5L_SAME_LOC_TEST_LINK_NAME2);
+ PART_ERROR(H5L_SAME_LOC_third_param);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, H5L_SAME_LOC_TEST_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", H5L_SAME_LOC_TEST_LINK_NAME2);
+ PART_ERROR(H5L_SAME_LOC_third_param);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ PART_ERROR(H5L_SAME_LOC_third_param);
+ }
+
+ PASSED();
+ }
+ PART_END(H5L_SAME_LOC_third_param);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a hard link can't be created when
+ * H5Lcreate_hard is passed invalid parameters.
+ */
+static int
+test_create_hard_link_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+#ifndef NO_PREVENT_HARD_LINKS_ACROSS_FILES
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+ hid_t ext_file_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("hard link creation with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or hard link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, HARD_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", HARD_LINK_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lcreate_hard_invalid_cur_loc_id)
+ {
+ TESTING_2("H5Lcreate_hard with an invalid cur_loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(H5I_INVALID_HID, "/", group_id,
+ HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with an invalid cur_loc_id!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_cur_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_hard_invalid_cur_loc_id);
+
+ PART_BEGIN(H5Lcreate_hard_invalid_cur_name)
+ {
+ TESTING_2("H5Lcreate_hard with an invalid cur_name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(file_id, NULL, group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with a NULL cur_name!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_cur_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(file_id, "", group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with an invalid cur_name of ''!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_cur_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_hard_invalid_cur_name);
+
+ PART_BEGIN(H5Lcreate_hard_invalid_new_loc_id)
+ {
+ TESTING_2("H5Lcreate_hard with an invalid new_loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(file_id, "/", H5I_INVALID_HID,
+ HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with an invalid new_loc_id!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_new_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_hard_invalid_new_loc_id);
+
+ PART_BEGIN(H5Lcreate_hard_invalid_new_name)
+ {
+ TESTING_2("H5Lcreate_hard with an invalid new_name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(file_id, "/", group_id, NULL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with a NULL new_name!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_new_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(file_id, "/", group_id, "", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with an invalid new_name of ''!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_new_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_hard_invalid_new_name);
+
+ PART_BEGIN(H5Lcreate_hard_invalid_lcpl)
+ {
+ TESTING_2("H5Lcreate_hard with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(file_id, "/", group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with an invalid LCPL!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_hard_invalid_lcpl);
+
+ PART_BEGIN(H5Lcreate_hard_invalid_lapl)
+ {
+ TESTING_2("H5Lcreate_hard with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(file_id, "/", group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with an invalid LAPL!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_hard_invalid_lapl);
+
+ PART_BEGIN(H5Lcreate_hard_invalid_same_loc)
+ {
+ TESTING_2("H5Lcreate_hard with the invalid same location");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(H5L_SAME_LOC, "/", H5L_SAME_LOC,
+ HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link with the invalid same location!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_same_loc);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_hard_invalid_same_loc);
+
+ PART_BEGIN(H5Lcreate_hard_across_files)
+ {
+ TESTING_2("H5Lcreate_hard across files");
+#ifndef NO_PREVENT_HARD_LINKS_ACROSS_FILES
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lcreate_hard_across_files);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(file_id, "/", ext_file_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link across files!\n");
+ PART_ERROR(H5Lcreate_hard_across_files);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_hard(ext_file_id, "/", group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created hard link across files!\n");
+ PART_ERROR(H5Lcreate_hard_across_files);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lcreate_hard_across_files);
+#endif
+ }
+ PART_END(H5Lcreate_hard_across_files);
+
+ PART_BEGIN(H5Lcreate_hard_invalid_existence)
+ {
+ TESTING_2("invalid link existence after previous invalid H5Lcreate_hard calls");
+
+ /* Verify the link hasn't been created */
+ if ((link_exists = H5Lexists(group_id, HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n",
+ HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_hard_invalid_existence);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" link existed!\n");
+ PART_ERROR(H5Lcreate_hard_invalid_existence);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_hard_invalid_existence);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+#ifndef NO_PREVENT_HARD_LINKS_ACROSS_FILES
+ if (H5Fclose(ext_file_id) < 0)
+ TEST_ERROR;
+#endif
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(ext_file_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* test_create_hard_link_invalid_params */
+
+/*
+ * A test to check that a soft link, which points to an
+ * existing object with a relative path, can be created.
+ */
+static int
+test_create_soft_link_existing_relative(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t object_id = H5I_INVALID_HID;
+
+ TESTING("soft link creation to existing object by relative path");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, SOFT_LINK_EXISTING_RELATIVE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ SOFT_LINK_EXISTING_RELATIVE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((object_id = H5Gcreate2(group_id, SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create object '%s' for soft link's target\n",
+ SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+
+ if (H5Lcreate_soft(SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME, group_id,
+ SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ if ((object_id = H5Gopen2(group_id, SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open object '%s' through the soft link\n",
+ SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(object_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a soft link, which points to an
+ * existing object using an absolute path, can be created.
+ */
+static int
+test_create_soft_link_existing_absolute(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID, root_id = H5I_INVALID_HID;
+
+ TESTING("soft link creation to existing object by absolute path");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, SOFT_LINK_EXISTING_ABSOLUTE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ SOFT_LINK_EXISTING_ABSOLUTE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/", group_id, SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ if ((root_id = H5Gopen2(group_id, SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open object pointed to by soft link '%s'\n",
+ SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(root_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(root_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a soft link, which points to
+ * an object that doesn't exist by using a relative
+ * path, can be created.
+ */
+static int
+test_create_soft_link_dangling_relative(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t object_id = H5I_INVALID_HID;
+
+ TESTING("dangling soft link creation to object by relative path");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, SOFT_LINK_DANGLING_RELATIVE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ SOFT_LINK_DANGLING_RELATIVE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_soft(SOFT_LINK_DANGLING_RELATIVE_TEST_OBJECT_NAME, group_id,
+ SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ object_id = H5Gopen2(group_id, SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (object_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened target of dangling link '%s'!\n", SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME);
+ H5Gclose(object_id);
+ goto error;
+ }
+
+ if ((object_id = H5Gcreate2(group_id, SOFT_LINK_DANGLING_RELATIVE_TEST_OBJECT_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create object '%s' for soft link's target\n",
+ SOFT_LINK_DANGLING_RELATIVE_TEST_OBJECT_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+
+ if ((object_id = H5Gopen2(group_id, SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open object pointed to by soft link '%s'\n",
+ SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(object_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a soft link, which points to an
+ * object that doesn't exist by using an absolute path,
+ * can be created.
+ */
+static int
+test_create_soft_link_dangling_absolute(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t object_id = H5I_INVALID_HID;
+
+ TESTING("dangling soft link creation to object by absolute path");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, SOFT_LINK_DANGLING_ABSOLUTE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ SOFT_LINK_DANGLING_ABSOLUTE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" SOFT_LINK_DANGLING_ABSOLUTE_TEST_SUBGROUP_NAME
+ "/" SOFT_LINK_DANGLING_ABSOLUTE_TEST_OBJECT_NAME,
+ group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ object_id = H5Gopen2(group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (object_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened target of dangling link '%s'!\n", SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME);
+ H5Gclose(object_id);
+ goto error;
+ }
+
+ if ((object_id = H5Gcreate2(group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_OBJECT_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create object '%s' for soft link's target\n",
+ SOFT_LINK_DANGLING_ABSOLUTE_TEST_OBJECT_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+
+ if ((object_id = H5Gopen2(group_id, SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open object pointed to by soft link '%s'\n",
+ SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(object_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a soft link with a long name can be created
+ * using H5Lcreate_soft.
+ */
+static int
+test_create_soft_link_long_name(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ char vol_name[5];
+ size_t name_len = MAX_NAME_LEN;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ char *objname = NULL; /* Name of object [Long] */
+ size_t u; /* Local index variable */
+
+ TESTING("soft link creation with a long name");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, SOFT_LINK_TEST_GROUP_LONG_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", SOFT_LINK_TEST_GROUP_LONG_NAME);
+ goto error;
+ }
+
+ if (H5VLget_connector_name(file_id, vol_name, 5) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get VOL connector name\n");
+ goto error;
+ }
+
+ /** for DAOS VOL, max link name supported is 99 (Lexical key) */
+ if (strcmp(vol_name, "daos") == 0)
+ name_len = 99;
+
+ /* Construct very long file name */
+ if ((objname = (char *)HDmalloc((size_t)(name_len + 1))) == NULL)
+ TEST_ERROR;
+
+ for (u = 0; u < name_len; u++)
+ objname[u] = 'b';
+ objname[name_len] = '\0';
+
+ if (H5Lcreate_soft(SOFT_LINK_TEST_LONG_OBJECT_NAME, group_id, objname, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link with a long name\n");
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, objname, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if the link with a long name exists\n");
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ /* Release memory */
+ if (objname)
+ HDfree(objname);
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ if (objname)
+ HDfree(objname);
+
+ return 1;
+}
+
+/*
+ * A test to check that many soft links can be created
+ * using H5Lcreate_soft.
+ */
+static int
+test_create_soft_link_many(void)
+{
+#ifndef NO_SOFT_LINK_MANY_DANGLING
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t object_id = H5I_INVALID_HID;
+#ifndef NO_OBJECT_GET_NAME
+ char objname[SOFT_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE]; /* Object name */
+#endif
+#endif
+
+ TESTING("soft link creation of many links");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or group, basic or soft link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+#ifndef NO_SOFT_LINK_MANY_DANGLING
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, SOFT_LINK_TEST_GROUP_MANY_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", SOFT_LINK_TEST_GROUP_MANY_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" SOFT_LINK_TEST_GROUP_MANY_NAME
+ "/" SOFT_LINK_TEST_GROUP_MANY_FINAL_NAME,
+ group_id, "soft1", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft1", group_id, "soft2", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft2", group_id, "soft3", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft3", group_id, "soft4", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft4", group_id, "soft5", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft5", group_id, "soft6", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft6", group_id, "soft7", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft7", group_id, "soft8", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft8", group_id, "soft9", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft9", group_id, "soft10", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft10", group_id, "soft11", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft11", group_id, "soft12", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft12", group_id, "soft13", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft13", group_id, "soft14", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft14", group_id, "soft15", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+ if (H5Lcreate_soft("soft15", group_id, "soft16", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ TEST_ERROR;
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, "soft16", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link 'soft16' exists\n");
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 'soft16' did not exist\n");
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ /* Reopen the file and group and verify the hard link */
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gopen2(container_group, SOFT_LINK_TEST_GROUP_MANY_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n", SOFT_LINK_TEST_GROUP_MANY_NAME);
+ goto error;
+ }
+
+ /*
+ * XXX: Try to open the object through last soft link. If should fail because it doesn't exist. If
+ * H5Oopen is available, use that.
+ */
+ H5E_BEGIN_TRY
+ {
+ object_id = H5Gopen2(group_id, "soft16", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (object_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened target of dangling soft link '%s'!\n", SOFT_LINK_TEST_GROUP_MANY_NAME);
+ H5Gclose(object_id);
+ goto error;
+ }
+
+ if ((object_id = H5Gcreate2(group_id, SOFT_LINK_TEST_GROUP_MANY_FINAL_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create object '%s' for soft link's target\n", SOFT_LINK_TEST_GROUP_MANY_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+
+ /*
+ * XXX: Open the object through last soft link. It should work this time. If H5Oopen is available, use
+ * that.
+ */
+ if ((object_id = H5Gopen2(group_id, "soft16", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open object pointed to by soft link '%s'\n", SOFT_LINK_TEST_GROUP_MANY_NAME);
+ goto error;
+ }
+#ifndef NO_OBJECT_GET_NAME
+ /* Check name */
+ if (H5Iget_name(object_id, objname, (size_t)SOFT_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the name of the object 'soft16'\n");
+ goto error;
+ }
+
+ if (HDstrcmp(objname, "/" LINK_TEST_GROUP_NAME "/" SOFT_LINK_TEST_GROUP_MANY_NAME "/soft16")) {
+ H5_FAILED();
+ HDprintf(" wrong name of the object '%s'\n", objname);
+ goto error;
+ }
+#endif
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(object_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that a soft link can't be created
+ * when H5Lcreate_soft is passed invalid parameters.
+ */
+static int
+test_create_soft_link_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("soft link creation with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or link aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, SOFT_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", SOFT_LINK_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lcreate_soft_invalid_link_target)
+ {
+ TESTING_2("H5Lcreate_soft with an invalid link target");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_soft(NULL, group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created soft link '%s' with an invalid link target!\n",
+ SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_link_target);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_soft("", group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created soft link '%s' with an invalid link target!\n",
+ SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_link_target);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_soft_invalid_link_target);
+
+ PART_BEGIN(H5Lcreate_soft_invalid_link_loc_id)
+ {
+ TESTING_2("H5Lcreate_soft with an invalid link_loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_soft("/", H5I_INVALID_HID, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created soft link '%s' with an invalid link_loc_id!\n",
+ SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_link_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_soft_invalid_link_loc_id);
+
+ PART_BEGIN(H5Lcreate_soft_invalid_link_name)
+ {
+ TESTING_2("H5Lcreate_soft with an invalid link name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_soft("/", group_id, NULL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created soft link '%s' with a NULL link name!\n",
+ SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_link_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_soft("/", group_id, "", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created soft link '%s' with an invalid link name of ''!\n",
+ SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_link_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_soft_invalid_link_name);
+
+ PART_BEGIN(H5Lcreate_soft_invalid_lcpl)
+ {
+ TESTING_2("H5Lcreate_soft with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_soft("/", group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created soft link '%s' with an invalid LCPL!\n",
+ SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_soft_invalid_lcpl);
+
+ PART_BEGIN(H5Lcreate_soft_invalid_lapl)
+ {
+ TESTING_2("H5Lcreate_soft with an invalid LAPL");
+#ifndef NO_INVALID_PROPERTY_LIST_TESTS
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_soft("/", group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT,
+ H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created soft link '%s' with an invalid LAPL!\n",
+ SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_lapl);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lcreate_soft_invalid_lapl);
+#endif
+ }
+ PART_END(H5Lcreate_soft_invalid_lapl);
+
+ PART_BEGIN(H5Lcreate_soft_invalid_existence)
+ {
+ TESTING_2("invalid link existence after previous invalid H5Lcreate_soft calls");
+
+ /* Verify the link hasn't been created */
+ if ((link_exists = H5Lexists(group_id, SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n",
+ SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_existence);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' existed!\n", SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_soft_invalid_existence);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_soft_invalid_existence);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an external link can be created
+ * using H5Lcreate_external.
+ */
+static int
+test_create_external_link(void)
+{
+#ifndef NO_EXTERNAL_LINKS
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t root_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING("external link creation to existing object");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic link, or external link aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, EXTERNAL_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", EXTERNAL_LINK_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", group_id, EXTERNAL_LINK_TEST_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", EXTERNAL_LINK_TEST_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, EXTERNAL_LINK_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", EXTERNAL_LINK_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ if ((root_id = H5Gopen2(group_id, EXTERNAL_LINK_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open root group of other file using external link '%s'\n",
+ EXTERNAL_LINK_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(root_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(root_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that an external link, which points to an
+ * object that doesn't exist by using an absolute path, can
+ * be created.
+ */
+static int
+test_create_external_link_dangling(void)
+{
+#ifndef NO_EXTERNAL_LINKS
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t object_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING("dangling external link creation");
+
+#ifndef NO_EXTERNAL_LINKS
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic link, or external link aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, EXTERNAL_LINK_TEST_DANGLING_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", EXTERNAL_LINK_TEST_DANGLING_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/" EXTERNAL_LINK_TEST_DANGLING_OBJECT_NAME, group_id,
+ EXTERNAL_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dangling external link '%s'\n", EXTERNAL_LINK_TEST_DANGLING_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, EXTERNAL_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", EXTERNAL_LINK_TEST_DANGLING_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ object_id = H5Gopen2(group_id, EXTERNAL_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (object_id >= 0) {
+ H5_FAILED();
+ HDprintf(" opened non-existent object in other file using dangling external link '%s'!\n",
+ EXTERNAL_LINK_TEST_DANGLING_LINK_NAME);
+ H5Gclose(object_id);
+ goto error;
+ }
+
+ if ((object_id = H5Gcreate2(ext_file_id, EXTERNAL_LINK_TEST_DANGLING_OBJECT_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create object '%s' for external link's target\n",
+ EXTERNAL_LINK_TEST_DANGLING_OBJECT_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+
+ if ((object_id = H5Gopen2(group_id, EXTERNAL_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open object pointed to by external link '%s'\n",
+ EXTERNAL_LINK_TEST_DANGLING_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(object_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(ext_file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(object_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ H5Fclose(ext_file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that an external link to an object
+ * that crosses several files using H5Lcreate_external.
+ */
+static int
+test_create_external_link_multi(void)
+{
+#ifndef NO_EXTERNAL_LINKS
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID, group_id3 = H5I_INVALID_HID;
+ hid_t root_id = H5I_INVALID_HID;
+ char ext_link_filename1[H5_API_TEST_FILENAME_MAX_LENGTH];
+ char ext_link_filename2[H5_API_TEST_FILENAME_MAX_LENGTH];
+ char ext_link_filename3[H5_API_TEST_FILENAME_MAX_LENGTH];
+ char objname[EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE];
+#endif
+
+ TESTING_MULTIPART("external link creation to an object across several files");
+
+#ifndef NO_EXTERNAL_LINKS
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or external link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lcreate_external_first_file)
+ {
+ TESTING_2("Create the first external file to be pointed to");
+
+ HDsnprintf(ext_link_filename1, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n",
+ ext_link_filename1);
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ /* Create object down a path */
+ if ((group_id = H5Gcreate2(file_id, "A", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ if ((group_id = H5Gcreate2(file_id, "A/B", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ if ((group_id = H5Gcreate2(file_id, "A/B/C", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ /* Close file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_first_file);
+
+ PART_BEGIN(H5Lcreate_external_second_file)
+ {
+ TESTING_2("Create the second external file to be pointed to");
+
+ HDsnprintf(ext_link_filename2, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME2);
+
+ if ((file_id = H5Fcreate(ext_link_filename2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n",
+ ext_link_filename2);
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ /* Create object down a path */
+ if ((group_id = H5Gcreate2(file_id, "D", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ if ((group_id = H5Gcreate2(file_id, "D/E", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ /* Create external link to object in first file */
+ if (H5Lcreate_external(ext_link_filename1, "/A/B/C", group_id, "F", H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link 'F'\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ /* Close file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a file\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_second_file);
+
+ PART_BEGIN(H5Lcreate_external_third_file)
+ {
+ TESTING_2("Create the third external file to be pointed to");
+
+ HDsnprintf(ext_link_filename3, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME3);
+
+ if ((file_id = H5Fcreate(ext_link_filename3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n",
+ ext_link_filename3);
+ PART_ERROR(H5Lcreate_external_third_file);
+ }
+
+ /* Create object down a path */
+ if ((group_id = H5Gcreate2(file_id, "G", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group\n");
+ PART_ERROR(H5Lcreate_external_third_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_third_file);
+ }
+
+ if ((group_id = H5Gcreate2(file_id, "G/H", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group\n");
+ PART_ERROR(H5Lcreate_external_third_file);
+ }
+
+ /* Create external link to object in second file */
+ if (H5Lcreate_external(ext_link_filename2, "/D/E/F", group_id, "I", H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link 'I'\n");
+ PART_ERROR(H5Lcreate_external_third_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_third_file);
+ }
+
+ /* Close file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a file\n");
+ PART_ERROR(H5Lcreate_external_third_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_third_file);
+
+ PART_BEGIN(H5Lcreate_external_final_file)
+ {
+ TESTING_2("Open the file and create the final external link");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, EXTERNAL_LINK_TEST_MULTI_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", EXTERNAL_LINK_TEST_MULTI_NAME);
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if (H5Lcreate_external(ext_link_filename3, "/G/H/I", group_id, "ext_link", H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link 'ext_link'\n");
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if ((group_id2 = H5Gopen2(group_id, "ext_link", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open the group that is the external link\n");
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ /* Check name */
+ if (H5Iget_name(group_id2, objname, (size_t)EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the name of the object '%s'\n",
+ HARD_LINK_TEST_GROUP_MANY_FINAL_NAME);
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if (HDstrcmp(objname, "/A/B/C")) {
+ H5_FAILED();
+ HDprintf(" wrong name of the object '%s'\n", objname);
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ /* Create an object in the external file */
+ if ((group_id3 = H5Gcreate2(group_id2, "new_group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group 'new_group' in the external file\n");
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if (H5Gclose(group_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if (H5Gclose(group_id3) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a file\n");
+ PART_ERROR(H5Lcreate_external_final_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_final_file);
+
+ PART_BEGIN(H5Lcreate_external_object_created)
+ {
+ TESTING_2("Check the group being created through the external link");
+
+ if ((file_id = H5Fopen(ext_link_filename1, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", ext_link_filename1);
+ PART_ERROR(H5Lcreate_external_object_created);
+ }
+
+ if ((group_id = H5Gopen2(file_id, "/A/B/C/new_group", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open a group 'new_group' in the external file\n");
+ PART_ERROR(H5Lcreate_external_object_created);
+ }
+
+ /* Check name */
+ if (H5Iget_name(group_id, objname, (size_t)EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the name of the object '/A/B/C/new_group'\n");
+ PART_ERROR(H5Lcreate_external_object_created);
+ }
+
+ if (HDstrcmp(objname, "/A/B/C/new_group")) {
+ H5_FAILED();
+ HDprintf(" wrong name of the object '%s'\n", objname);
+ PART_ERROR(H5Lcreate_external_object_created);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close the group\n");
+ PART_ERROR(H5Lcreate_external_object_created);
+ }
+
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close the file\n");
+ PART_ERROR(H5Lcreate_external_object_created);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_object_created);
+ }
+ END_MULTIPART;
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(root_id);
+ H5Gclose(group_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id3);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to build a file with external link to object that
+ * goes back and forth between two files a couple of times:
+ *
+ * file1:/link1 -> file2: /link2
+ * file2:/link2 -> file1: /link3
+ * file1:/link3 -> file2: /link4
+ * file2:/link4 -> file1: /link5
+ * file1:/link5 -> file2: /link6
+ * file2:/link6 -> file1: /final
+ */
+static int
+test_create_external_link_ping_pong(void)
+{
+#ifndef NO_EXTERNAL_LINKS
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ char ext_link_filename1[H5_API_TEST_FILENAME_MAX_LENGTH];
+ char ext_link_filename2[H5_API_TEST_FILENAME_MAX_LENGTH];
+ char objname[EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE];
+#endif
+
+ TESTING_MULTIPART("external link creation to an object in ping pong style");
+
+#ifndef NO_EXTERNAL_LINKS
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or external link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ HDsnprintf(ext_link_filename1, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_PING_PONG_NAME1);
+ HDsnprintf(ext_link_filename2, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_PING_PONG_NAME2);
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lcreate_external_first_file)
+ {
+ TESTING_2("Create the first external file");
+
+ /* Create the first file */
+ if ((file_id = H5Fcreate(ext_link_filename1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n",
+ ext_link_filename1);
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ /* Create external links for chain */
+ if (H5Lcreate_external(ext_link_filename2, "/link2", file_id, "link1", H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ if (H5Lcreate_external(ext_link_filename2, "/link4", file_id, "link3", H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ if (H5Lcreate_external(ext_link_filename2, "/link6", file_id, "link5", H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ /* Create final object */
+ if ((group_id = H5Gcreate2(file_id, "final", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ /* Close file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_first_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_first_file);
+
+ PART_BEGIN(H5Lcreate_external_second_file)
+ {
+ TESTING_2("Create the second external file");
+
+ /* Create the second file */
+ if ((file_id = H5Fcreate(ext_link_filename2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link\n", ext_link_filename2);
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ /* Create external links for chain */
+ if (H5Lcreate_external(ext_link_filename1, "/link3", file_id, "link2", H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ if (H5Lcreate_external(ext_link_filename1, "/link5", file_id, "link4", H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ if (H5Lcreate_external(ext_link_filename1, "/final", file_id, "link6", H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ /* Close file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close the file\n");
+ PART_ERROR(H5Lcreate_external_second_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_second_file);
+
+ PART_BEGIN(H5Lcreate_external_verify)
+ {
+ TESTING_2("Open the first file to verify the object being pointed to");
+
+ if ((file_id = H5Fopen(ext_link_filename1, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", ext_link_filename1);
+ PART_ERROR(H5Lcreate_external_verify);
+ }
+
+ /* Open object through external link */
+ if ((group_id = H5Gopen2(file_id, "link1", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open the group that is the external link 'link1'\n");
+ PART_ERROR(H5Lcreate_external_verify);
+ }
+
+ /* Check the name of the object being pointed to */
+ if (H5Iget_name(group_id, objname, (size_t)EXTERNAL_LINK_TEST_PING_PONG_NAME_BUF_SIZE) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the name of the object\n");
+ PART_ERROR(H5Lcreate_external_verify);
+ }
+
+ if (HDstrcmp(objname, "/final")) {
+ H5_FAILED();
+ HDprintf(" wrong name of the object '%s'\n", objname);
+ PART_ERROR(H5Lcreate_external_verify);
+ }
+
+ /* Create an object in the external file */
+ if ((group_id2 = H5Gcreate2(group_id, "new_group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a new group 'new_group'\n");
+ PART_ERROR(H5Lcreate_external_verify);
+ }
+
+ if (H5Gclose(group_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_verify);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_verify);
+ }
+
+ /* Close file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close the file\n");
+ PART_ERROR(H5Lcreate_external_verify);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_verify);
+
+ PART_BEGIN(H5Lcreate_external_verify_again)
+ {
+ TESTING_2("Open the first file to verify the object being created");
+
+ if ((file_id = H5Fopen(ext_link_filename1, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", ext_link_filename1);
+ PART_ERROR(H5Lcreate_external_verify_again);
+ }
+
+ /* Open object through external link */
+ if ((group_id = H5Gopen2(file_id, "/final/new_group", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open the group that is the external link\n");
+ PART_ERROR(H5Lcreate_external_verify_again);
+ }
+
+ /* Check the name of the object being pointed to */
+ if (H5Iget_name(group_id, objname, (size_t)EXTERNAL_LINK_TEST_PING_PONG_NAME_BUF_SIZE) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get the name of the object\n");
+ PART_ERROR(H5Lcreate_external_verify_again);
+ }
+
+ if (HDstrcmp(objname, "/final/new_group")) {
+ H5_FAILED();
+ HDprintf(" wrong name of the object '%s'\n", objname);
+ PART_ERROR(H5Lcreate_external_verify_again);
+ }
+
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close a group\n");
+ PART_ERROR(H5Lcreate_external_verify_again);
+ }
+
+ /* Close file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close the file\n");
+ PART_ERROR(H5Lcreate_external_verify_again);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_verify_again);
+ }
+ END_MULTIPART;
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(group_id2);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that an external link can't be created
+ * when H5Lcreate_external is passed invalid parameters.
+ */
+static int
+test_create_external_link_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+
+ TESTING_MULTIPART("H5Lcreate_external with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or basic link or external link aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, EXTERNAL_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lcreate_external_invalid_file_name)
+ {
+ TESTING_2("H5Lcreate_external with an invalid file name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_external(NULL, "/", group_id, EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using a NULL file name!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_file_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_external("", "/", group_id, EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using an invalid file name of ''!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_file_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_invalid_file_name);
+
+ PART_BEGIN(H5Lcreate_external_invalid_ext_obj_name)
+ {
+ TESTING_2("H5Lcreate_external with an invalid external object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcreate_external(ext_link_filename, NULL, group_id,
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using a NULL external object name!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_ext_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcreate_external(ext_link_filename, "", group_id,
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using an invalid external object name of ''!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_ext_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_invalid_ext_obj_name);
+
+ PART_BEGIN(H5Lcreate_external_invalid_link_loc_id)
+ {
+ TESTING_2("H5Lcreate_external with an invalid link_loc_id");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcreate_external(ext_link_filename, "/", H5I_INVALID_HID,
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using an invalid link_loc_id!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_link_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_invalid_link_loc_id);
+
+ PART_BEGIN(H5Lcreate_external_invalid_link_name)
+ {
+ TESTING_2("H5Lcreate_external with an invalid link name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcreate_external(ext_link_filename, "/", group_id, NULL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using a NULL link_loc_id!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_link_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_external(ext_link_filename, "/", group_id, "", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using an invalid link name of ''!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_link_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_invalid_link_name);
+
+ PART_BEGIN(H5Lcreate_external_invalid_lcpl)
+ {
+ TESTING_2("H5Lcreate_external with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_external(ext_link_filename, "/", group_id,
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5I_INVALID_HID,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using an invalid LCPL!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_invalid_lcpl);
+
+ PART_BEGIN(H5Lcreate_external_invalid_lapl)
+ {
+ TESTING_2("H5Lcreate_external with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_external(ext_link_filename, "/", group_id,
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT,
+ H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created external link '%s' using an invalid LAPL!\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_invalid_lapl);
+
+ PART_BEGIN(H5Lcreate_external_invalid_existence)
+ {
+ TESTING_2("invalid link existence after previous invalid H5Lcreate_external calls");
+
+ /* Verify the link hasn't been created */
+ if ((link_exists =
+ H5Lexists(group_id, EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n",
+ EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_existence);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' existed!\n", EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_external_invalid_existence);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_external_invalid_existence);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a user-defined link can be created.
+ */
+static int
+test_create_user_defined_link(void)
+{
+#ifndef NO_USER_DEFINED_LINKS
+ ssize_t udata_size;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ char udata[UD_LINK_TEST_UDATA_MAX_SIZE];
+#endif
+
+ TESTING("user-defined link creation");
+
+#ifndef NO_USER_DEFINED_LINKS
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_UD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or user-defined link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, UD_LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", UD_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((udata_size = HDsnprintf(udata, UD_LINK_TEST_UDATA_MAX_SIZE, "udata")) < 0)
+ TEST_ERROR;
+
+ if (H5Lcreate_ud(group_id, UD_LINK_TEST_LINK_NAME, H5L_TYPE_EXTERNAL, udata, (size_t)udata_size,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create user-defined link '%s'\n", UD_LINK_TEST_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, UD_LINK_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", UD_LINK_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' didn't exist!\n", UD_LINK_TEST_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that H5Lcreate_ud fails when
+ * it is given invalid parameters.
+ */
+static int
+test_create_user_defined_link_invalid_params(void)
+{
+ ssize_t udata_size;
+ htri_t link_exists;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ char udata[UD_LINK_INVALID_PARAMS_TEST_UDATA_MAX_SIZE];
+
+ TESTING_MULTIPART("H5Lcreate_ud with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_UD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or link aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, UD_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", UD_LINK_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((udata_size = HDsnprintf(udata, UD_LINK_INVALID_PARAMS_TEST_UDATA_MAX_SIZE, "udata")) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lcreate_ud_invalid_link_loc_id)
+ {
+ TESTING_2("H5Lcreate_ud with an invalid link location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcreate_ud(H5I_INVALID_HID, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_EXTERNAL,
+ udata, (size_t)udata_size, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created user-defined link '%s' with an invalid link location ID!\n",
+ UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_link_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_ud_invalid_link_loc_id);
+
+ PART_BEGIN(H5Lcreate_ud_invalid_link_name)
+ {
+ TESTING_2("H5Lcreate_ud with an invalid link name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_ud(group_id, NULL, H5L_TYPE_EXTERNAL, udata, (size_t)udata_size,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created user-defined link '%s' with a NULL link name!\n",
+ UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_link_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_ud(group_id, "", H5L_TYPE_EXTERNAL, udata, (size_t)udata_size,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created user-defined link '%s' with an invalid link name of ''!\n",
+ UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_link_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_ud_invalid_link_name);
+
+ PART_BEGIN(H5Lcreate_ud_invalid_link_type)
+ {
+ TESTING_2("H5Lcreate_ud with an invalid link type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_ud(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_HARD, udata,
+ (size_t)udata_size, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created user-defined link '%s' with an invalid link type!\n",
+ UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_link_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_ud_invalid_link_type);
+
+ PART_BEGIN(H5Lcreate_ud_invalid_udata_pointer)
+ {
+ TESTING_2("H5Lcreate_ud with an invalid udata pointer");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_ud(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_EXTERNAL,
+ NULL, (size_t)udata_size, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created user-defined link '%s' with an invalid udata pointer!\n",
+ UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_udata_pointer);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_ud_invalid_udata_pointer);
+
+ PART_BEGIN(H5Lcreate_ud_invalid_lcpl)
+ {
+ TESTING_2("H5Lcreate_ud with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_ud(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_EXTERNAL,
+ udata, (size_t)udata_size, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created user-defined link '%s' with an invalid LCPL!\n",
+ UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_ud_invalid_lcpl);
+
+ PART_BEGIN(H5Lcreate_ud_invalid_lapl)
+ {
+ TESTING_2("H5Lcreate_ud with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcreate_ud(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5L_TYPE_EXTERNAL,
+ udata, (size_t)udata_size, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" created user-defined link '%s' with an invalid LAPL!\n",
+ UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_ud_invalid_lapl);
+
+ PART_BEGIN(H5Lcreate_ud_invalid_existence)
+ {
+ TESTING_2("invalid link existence after previous invalid H5Lcreate_ud calls");
+
+ /* Verify the link hasn't been created */
+ if ((link_exists = H5Lexists(group_id, UD_LINK_INVALID_PARAMS_TEST_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n",
+ UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_existence);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' existed!\n", UD_LINK_INVALID_PARAMS_TEST_LINK_NAME);
+ PART_ERROR(H5Lcreate_ud_invalid_existence);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcreate_ud_invalid_existence);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a link can be deleted
+ * using H5Ldelete and H5Ldelete_by_idx.
+ */
+static int
+test_delete_link(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t nested_grp_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+#ifndef NO_EXTERNAL_LINKS
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link deletion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or link, hard, soft, or external link aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_DELETE_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ldelete_hard)
+ {
+ TESTING_2("H5Ldelete on hard link");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP1_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP1_NAME);
+ PART_ERROR(H5Ldelete_hard);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_hard);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_hard);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" first hard link did not exist\n");
+ PART_ERROR(H5Ldelete_hard);
+ }
+
+ if (H5Ldelete(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_hard);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_hard);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" first hard link exists!\n");
+ PART_ERROR(H5Ldelete_hard);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP1_NAME);
+ PART_ERROR(H5Ldelete_hard);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_hard);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_hard_indirect)
+ {
+ TESTING_2("H5Ldelete on nested hard link");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_NESTED_SUBGROUP_NAME1, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_NESTED_SUBGROUP_NAME1);
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if ((nested_grp_id = H5Gcreate2(subgroup_id, LINK_DELETE_TEST_NESTED_GRP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_NESTED_GRP_NAME);
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if (H5Lcreate_hard(nested_grp_id, ".", nested_grp_id, LINK_DELETE_TEST_HARD_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if ((link_exists = H5Lexists(nested_grp_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" first hard link did not exist\n");
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if (H5Ldelete(subgroup_id, LINK_DELETE_TEST_NESTED_HARD_LINK_NAME, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete\n",
+ LINK_DELETE_TEST_NESTED_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if ((link_exists = H5Lexists(nested_grp_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" first hard link exists!\n");
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if (H5Gclose(nested_grp_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_NESTED_GRP_NAME);
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP1_NAME);
+ PART_ERROR(H5Ldelete_hard_indirect);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_hard_indirect);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(nested_grp_id);
+ nested_grp_id = H5I_INVALID_HID;
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_soft)
+ {
+ TESTING_2("H5Ldelete on soft link");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP2_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP2_NAME);
+ PART_ERROR(H5Ldelete_soft);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP2_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_soft);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_soft);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" first soft link did not exist\n");
+ PART_ERROR(H5Ldelete_soft);
+ }
+
+ if (H5Ldelete(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_soft);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_soft);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" first soft link exists!\n");
+ PART_ERROR(H5Ldelete_soft);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP2_NAME);
+ PART_ERROR(H5Ldelete_soft);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_soft);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_external)
+ {
+ TESTING_2("H5Ldelete on external link");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP3_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP3_NAME);
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first external link '%s'\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" first external link did not exist\n");
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if (H5Ldelete(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" first external link exists!\n");
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP3_NAME);
+ PART_ERROR(H5Ldelete_external);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_external);
+#endif
+ }
+ PART_END(H5Ldelete_external);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_ud)
+ {
+ TESTING_2("H5Ldelete on user-defined link");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_ud);
+ }
+ PART_END(H5Ldelete_ud);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_hard_crt_order_increasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on hard link by creation order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP5_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP5_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP5_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_hard_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_hard_crt_order_decreasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on hard link by creation order in decreasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP6_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP6_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP6_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_hard_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_hard_name_order_increasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on hard link by alphabetical order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP7_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP7_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP7_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_hard_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_hard_name_order_decreasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on hard link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP8_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP8_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete hard link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' exists after deletion!\n", LINK_DELETE_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP8_NAME);
+ PART_ERROR(H5Ldelete_by_idx_hard_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_hard_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Ldelete_by_idx_hard_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_soft_crt_order_increasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on soft link by creation order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP9_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP9_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP9_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP9_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP9_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP9_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_soft_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_soft_crt_order_decreasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on soft link by creation order in decreasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP10_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP10_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP10_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP10_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP10_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP10_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_soft_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_soft_name_order_increasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on soft link by alphabetical order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP11_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP11_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP11_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP11_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP11_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP11_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_soft_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_soft_name_order_decreasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on soft link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP12_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP12_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP12_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP12_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_DELETE_TEST_SUBGROUP_NAME
+ "/" LINK_DELETE_TEST_SUBGROUP12_NAME,
+ subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete soft link '%s' using H5Ldelete_by_idx by alphabetical order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' exists after deletion!\n", LINK_DELETE_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP12_NAME);
+ PART_ERROR(H5Ldelete_by_idx_soft_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_soft_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Ldelete_by_idx_soft_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_external_crt_order_increasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on external link by creation order in increasing order");
+#ifndef NO_EXTERNAL_LINKS
+ /* Create file for external link to reference */
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP13_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP13_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in "
+ "increasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP13_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_increasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_external_crt_order_increasing);
+#endif
+ }
+ PART_END(H5Ldelete_by_idx_external_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_external_crt_order_decreasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on external link by creation order in decreasing order");
+#ifndef NO_EXTERNAL_LINKS
+ /* Create file for external link to reference */
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP14_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP14_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by creation order in "
+ "decreasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP14_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_crt_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_external_crt_order_decreasing);
+#endif
+ }
+ PART_END(H5Ldelete_by_idx_external_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_external_name_order_increasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on external link by alphabetical order in increasing order");
+#ifndef NO_EXTERNAL_LINKS
+ /* Create file for external link to reference */
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP15_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP15_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical "
+ "order in increasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical "
+ "order in increasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical "
+ "order in increasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP15_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_increasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_external_name_order_increasing);
+#endif
+ }
+ PART_END(H5Ldelete_by_idx_external_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_external_name_order_decreasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on external link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Create file for external link to reference */
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_TEST_SUBGROUP16_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", LINK_DELETE_TEST_SUBGROUP16_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ /* Delete a link */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical "
+ "order in decreasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ /* Ensure that the link is gone and others remain */
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ /* Repeat until all links have been deleted */
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical "
+ "order in decreasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist after deletion of a different link!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Ldelete_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't delete external link '%s' using H5Ldelete_by_idx by alphabetical "
+ "order in decreasing order\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, LINK_DELETE_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' exists after deletion!\n",
+ LINK_DELETE_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", LINK_DELETE_TEST_SUBGROUP16_NAME);
+ PART_ERROR(H5Ldelete_by_idx_external_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_external_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Ldelete_by_idx_external_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_ud_crt_order_increasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on user-defined link by creation order in increasing order");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_ud_crt_order_increasing);
+ }
+ PART_END(H5Ldelete_by_idx_ud_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_ud_crt_order_decreasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on user-defined link by creation order in decreasing order");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_ud_crt_order_decreasing);
+ }
+ PART_END(H5Ldelete_by_idx_ud_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_ud_name_order_increasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on user-defined link by alphabetical order in increasing order");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_ud_name_order_increasing);
+ }
+ PART_END(H5Ldelete_by_idx_ud_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_by_idx_ud_name_order_decreasing)
+ {
+ TESTING_2("H5Ldelete_by_idx on user-defined link by alphabetical order in decreasing order");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_ud_name_order_decreasing);
+ }
+ PART_END(H5Ldelete_by_idx_ud_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(ext_file_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a group's always-increasing
+ * maximum link creation order value gets reset once
+ * all the links have been deleted from the group.
+ */
+static int
+test_delete_link_reset_grp_max_crt_order(void)
+{
+#ifndef NO_MAX_LINK_CRT_ORDER_RESET
+ H5G_info_t grp_info;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char link_name[LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE];
+#endif
+
+ TESTING_MULTIPART("H5Ldelete of all links in group resets group's maximum link creation order value");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, basic and more group, or basic link aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+#ifndef NO_MAX_LINK_CRT_ORDER_RESET
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ldelete_links_bottom_up)
+ {
+ TESTING_2("H5Ldelete from least-recently created link to most-recently created link");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP1_NAME,
+ H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n",
+ LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP1_NAME);
+ PART_ERROR(H5Ldelete_links_bottom_up);
+ }
+
+ /* Create several links inside the group */
+ for (i = 0; i < LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) {
+ snprintf(link_name, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i);
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", link_name);
+ PART_ERROR(H5Ldelete_links_bottom_up);
+ }
+ }
+
+ /* Delete the links, checking the group's maximum creation order value each time */
+ for (i = 0; i < LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) {
+ memset(&grp_info, 0, sizeof(grp_info));
+
+ if (H5Gget_info(subgroup_id, &grp_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group's info\n");
+ PART_ERROR(H5Ldelete_links_bottom_up);
+ }
+
+ if (grp_info.max_corder != LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" group's maximum creation order value got adjusted to %lld during link "
+ "deletion; value should have remained at %lld\n",
+ (long long)grp_info.max_corder, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS);
+ PART_ERROR(H5Ldelete_links_bottom_up);
+ }
+
+ snprintf(link_name, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i);
+
+ if (H5Ldelete(subgroup_id, link_name, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete link '%s'\n", link_name);
+ PART_ERROR(H5Ldelete_links_bottom_up);
+ }
+ }
+
+ /* Ensure the group's maximum creation order value has now reset to 0 after all the links are gone
+ */
+ memset(&grp_info, 0, sizeof(grp_info));
+
+ if (H5Gget_info(subgroup_id, &grp_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group's info\n");
+ PART_ERROR(H5Ldelete_links_bottom_up);
+ }
+
+ if (grp_info.max_corder != 0) {
+ H5_FAILED();
+ HDprintf(" group's maximum creation order value didn't reset to 0 after deleting all "
+ "links from group; value is still %lld\n",
+ (long long)grp_info.max_corder);
+ PART_ERROR(H5Ldelete_links_bottom_up);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_links_bottom_up);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Ldelete_links_top_down)
+ {
+ TESTING_2("H5Ldelete from most-recently created link to least-recently created link");
+
+ if ((subgroup_id = H5Gcreate2(group_id, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP2_NAME,
+ H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n",
+ LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP2_NAME);
+ PART_ERROR(H5Ldelete_links_top_down);
+ }
+
+ /* Create several links inside the group */
+ for (i = 0; i < LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) {
+ snprintf(link_name, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i);
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", link_name);
+ PART_ERROR(H5Ldelete_links_top_down);
+ }
+ }
+
+ /* Delete the links, checking the group's maximum creation order value each time */
+ for (i = 0; i < LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) {
+ memset(&grp_info, 0, sizeof(grp_info));
+
+ if (H5Gget_info(subgroup_id, &grp_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group's info\n");
+ PART_ERROR(H5Ldelete_links_top_down);
+ }
+
+ if (grp_info.max_corder != LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" group's maximum creation order value got adjusted to %lld during link "
+ "deletion; value should have remained at %lld\n",
+ (long long)grp_info.max_corder, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS);
+ PART_ERROR(H5Ldelete_links_top_down);
+ }
+
+ snprintf(link_name, LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d",
+ (int)(LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS - i - 1));
+
+ if (H5Ldelete(subgroup_id, link_name, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete link '%s'\n", link_name);
+ PART_ERROR(H5Ldelete_links_top_down);
+ }
+ }
+
+ /* Ensure the group's maximum creation order value has now reset to 0 after all the links are gone
+ */
+ memset(&grp_info, 0, sizeof(grp_info));
+
+ if (H5Gget_info(subgroup_id, &grp_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group's info\n");
+ PART_ERROR(H5Ldelete_links_top_down);
+ }
+
+ if (grp_info.max_corder != 0) {
+ H5_FAILED();
+ HDprintf(" group's maximum creation order value didn't reset to 0 after deleting all "
+ "links from group; value is still %lld\n",
+ (long long)grp_info.max_corder);
+ PART_ERROR(H5Ldelete_links_top_down);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_links_top_down);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+static int
+test_delete_link_invalid_params(void)
+{
+ htri_t link_exists;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Ldelete with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_BY_IDX) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or link aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_DELETE_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", LINK_DELETE_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(group_id, ".", group_id, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(group_id, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first hard link '%s' exists\n",
+ LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" first hard link did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ldelete_invalid_loc_id)
+ {
+ TESTING_2("H5Ldelete with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Ldelete(H5I_INVALID_HID, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Ldelete_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_invalid_loc_id);
+
+ PART_BEGIN(H5Ldelete_invalid_link_name)
+ {
+ TESTING_2("H5Ldelete with an invalid link name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete(group_id, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete succeeded with a NULL link name!\n");
+ PART_ERROR(H5Ldelete_invalid_link_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete(group_id, "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete succeeded with an invalid link name of ''!\n");
+ PART_ERROR(H5Ldelete_invalid_link_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_invalid_link_name);
+
+ PART_BEGIN(H5Ldelete_invalid_lapl)
+ {
+ TESTING_2("H5Ldelete with an invalid LAPL");
+#ifndef NO_INVALID_PROPERTY_LIST_TESTS
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Ldelete(group_id, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Ldelete_invalid_lapl);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_invalid_lapl);
+#endif
+ }
+ PART_END(H5Ldelete_invalid_lapl);
+
+ PART_BEGIN(H5Ldelete_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Ldelete_by_idx with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete_by_idx succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Ldelete_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Ldelete_by_idx_invalid_grp_name)
+ {
+ TESTING_2("H5Ldelete_by_idx with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete_by_idx(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete_by_idx succeeded with a NULL group name!\n");
+ PART_ERROR(H5Ldelete_by_idx_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete_by_idx(group_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete_by_idx succeeded with an invalid group name of ''!\n");
+ PART_ERROR(H5Ldelete_by_idx_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_invalid_grp_name);
+
+ PART_BEGIN(H5Ldelete_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Ldelete_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete_by_idx succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Ldelete_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete_by_idx succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Ldelete_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Ldelete_by_idx_invalid_index_order)
+ {
+ TESTING_2("H5Ldelete_by_idx with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete_by_idx succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Ldelete_by_idx_invalid_index_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete_by_idx succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Ldelete_by_idx_invalid_index_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_invalid_index_order);
+
+ PART_BEGIN(H5Ldelete_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Ldelete_by_idx with an invalid LAPL");
+#ifndef NO_INVALID_PROPERTY_LIST_TESTS
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ldelete_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ldelete_by_idx succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Ldelete_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ldelete_by_idx_invalid_lapl);
+#endif
+ }
+ PART_END(H5Ldelete_by_idx_invalid_lapl);
+
+ PART_BEGIN(H5Ldelete_by_idx_link_existence)
+ {
+ TESTING_2("valid link existence after previous invalid H5Ldelete(_by_idx) calls");
+
+ /* Verify that the link hasn't been deleted */
+ if ((link_exists =
+ H5Lexists(group_id, LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Ldelete_by_idx_link_existence);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link didn't exist!\n");
+ PART_ERROR(H5Ldelete_by_idx_link_existence);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ldelete_by_idx_link_existence);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a link can be copied using H5Lcopy.
+ */
+static int
+test_copy_link(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID;
+#ifndef NO_EXTERNAL_LINKS
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link copying");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or link, hard, soft, or external link aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't opewn container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, COPY_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", COPY_LINK_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((src_grp_id = H5Gcreate2(group_id, COPY_LINK_TEST_SRC_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", COPY_LINK_TEST_SRC_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dst_grp_id = H5Gcreate2(group_id, COPY_LINK_TEST_DST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", COPY_LINK_TEST_DST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lcopy_hard_no_check)
+ {
+ TESTING_2("H5Lcopy on hard link (copied link's properties not checked)");
+
+ /* Try to copy a hard link */
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ /* Copy the link */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, dst_grp_id,
+ COPY_LINK_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ /* Verify the link has been copied and still exists in the source group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link copy '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_COPY_NAME);
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link copy did not exist\n");
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original hard link '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original hard link did not exist\n");
+ PART_ERROR(H5Lcopy_hard_no_check);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_hard_no_check);
+
+ PART_BEGIN(H5Lcopy_hard_check)
+ {
+ H5L_info2_t orig_info, new_info;
+ int cmp_value;
+
+ TESTING_2("H5Lcopy on hard link (copied link's properties checked)");
+
+ /* Try to copy a hard link */
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ /* Retrieve the link's info */
+ if (H5Lget_info2(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ /* Copy the link */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, dst_grp_id,
+ COPY_LINK_TEST_HARD_LINK_COPY_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ /* Verify the link has been copied and still exists in the source group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_HARD_LINK_COPY_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link copy '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_COPY_NAME2);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link copy did not exist\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original hard link '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original hard link did not exist\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ /* Retrieve the new link's info */
+ if (H5Lget_info2(dst_grp_id, COPY_LINK_TEST_HARD_LINK_COPY_NAME2, &new_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_HARD_LINK_COPY_NAME2);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (new_info.type != orig_info.type) {
+ H5_FAILED();
+ HDprintf(" copied link's link type doesn't match original link's type\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (H5Otoken_cmp(dst_grp_id, &new_info.u.token, &orig_info.u.token, &cmp_value) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to compare link target tokens\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (cmp_value != 0) {
+ H5_FAILED();
+ HDprintf(" copied hard link's object token doesn't match original link's object token\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (new_info.corder_valid != orig_info.corder_valid) {
+ H5_FAILED();
+ HDprintf(" copied link's 'corder_valid' field doesn't match original link's "
+ "'corder_valid' field\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) {
+ H5_FAILED();
+ HDprintf(" copied link's creation order value %" PRId64
+ " doesn't match original link's creation order value %" PRId64 "\n",
+ new_info.corder, orig_info.corder);
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ if (new_info.cset != orig_info.cset) {
+ H5_FAILED();
+ HDprintf(" copied link's character set doesn't match original link's character set\n");
+ PART_ERROR(H5Lcopy_hard_check);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_hard_check);
+
+ PART_BEGIN(H5Lcopy_hard_same_loc)
+ {
+ TESTING_2("H5Lcopy on hard link using H5L_SAME_LOC");
+
+ /* Try to copy a hard link */
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", COPY_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", COPY_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ /* Verify the links don't currently exist in the target group */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ /* Copy the link using H5L_SAME_LOC as the first parameter to H5Lcopy */
+ if (H5Lcopy(H5L_SAME_LOC, COPY_LINK_TEST_HARD_LINK_NAME3, src_grp_id,
+ COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(
+ " failed to copy hard link '%s' using H5L_SAME_LOC as first parameter to H5Lcopy\n",
+ COPY_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ /* Copy the link using H5L_SAME_LOC as the third parameter to H5Lcopy */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME3, H5L_SAME_LOC,
+ COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(
+ " failed to copy hard link '%s' using H5L_SAME_LOC as third parameter to H5Lcopy\n",
+ COPY_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ /* Verify the links have been copied and the original still exist in the source group */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link copy '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link copy did not exist\n");
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link copy '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link copy did not exist\n");
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original hard link '%s' exists\n",
+ COPY_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original hard link did not exist\n");
+ PART_ERROR(H5Lcopy_hard_same_loc);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_hard_same_loc);
+
+ PART_BEGIN(H5Lcopy_soft_no_check)
+ {
+ TESTING_2("H5Lcopy on soft link (copied link's properties not checked)");
+
+ /* Try to copy a soft link */
+ if (H5Lcreate_soft(COPY_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id,
+ COPY_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ /* Copy the link */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME, dst_grp_id,
+ COPY_LINK_TEST_SOFT_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ /* Verify the link has been copied and still exists in the source group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_COPY_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' copy exists\n",
+ COPY_LINK_TEST_SOFT_LINK_COPY_NAME);
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link copy did not exist\n");
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original soft link '%s' exists\n",
+ COPY_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original soft link did not exist\n");
+ PART_ERROR(H5Lcopy_soft_no_check);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_soft_no_check);
+
+ PART_BEGIN(H5Lcopy_soft_check)
+ {
+ H5L_info2_t orig_info, new_info;
+ char orig_link_val[COPY_LINK_TEST_LINK_VAL_BUF_SIZE];
+ char new_link_val[COPY_LINK_TEST_LINK_VAL_BUF_SIZE];
+
+ TESTING_2("H5Lcopy on soft link (copied link's properties checked)");
+
+ /* Try to copy a soft link */
+ if (H5Lcreate_soft(COPY_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id,
+ COPY_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ /* Retrieve the link's info */
+ if (H5Lget_info2(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ /* Retrieve the link's value */
+ if (H5Lget_val(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, orig_link_val,
+ COPY_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve value for soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ /* Copy the link */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, dst_grp_id,
+ COPY_LINK_TEST_SOFT_LINK_COPY_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ /* Verify the link has been copied and still exists in the source group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_COPY_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' copy exists\n",
+ COPY_LINK_TEST_SOFT_LINK_COPY_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link copy did not exist\n");
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original soft link '%s' exists\n",
+ COPY_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original soft link did not exist\n");
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ /* Retrieve the new link's info */
+ if (H5Lget_info2(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_COPY_NAME2, &new_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_SOFT_LINK_COPY_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (new_info.type != orig_info.type) {
+ H5_FAILED();
+ HDprintf(" copied link's link type doesn't match original link's type\n");
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (new_info.u.val_size != orig_info.u.val_size) {
+ H5_FAILED();
+ HDprintf(" copied soft link's value size of %llu doesn't match original link's value size "
+ "of %llu\n",
+ (unsigned long long)new_info.u.val_size, (unsigned long long)orig_info.u.val_size);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (new_info.corder_valid != orig_info.corder_valid) {
+ H5_FAILED();
+ HDprintf(" copied link's 'corder_valid' field doesn't match original link's "
+ "'corder_valid' field\n");
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) {
+ H5_FAILED();
+ HDprintf(" copied link's creation order value %" PRId64
+ " doesn't match original link's creation order value %" PRId64 "\n",
+ new_info.corder, orig_info.corder);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (new_info.cset != orig_info.cset) {
+ H5_FAILED();
+ HDprintf(" copied link's character set doesn't match original link's character set\n");
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ /* Check the soft link's value */
+ if (H5Lget_val(dst_grp_id, COPY_LINK_TEST_SOFT_LINK_COPY_NAME2, new_link_val,
+ COPY_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve value for soft link '%s'\n",
+ COPY_LINK_TEST_SOFT_LINK_COPY_NAME2);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ if (HDstrncmp(orig_link_val, new_link_val, COPY_LINK_TEST_LINK_VAL_BUF_SIZE)) {
+ H5_FAILED();
+ HDprintf(" copied soft link's value '%s' doesn't match original link's value '%s'\n",
+ new_link_val, orig_link_val);
+ PART_ERROR(H5Lcopy_soft_check);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_soft_check);
+
+ PART_BEGIN(H5Lcopy_soft_same_loc)
+ {
+ TESTING_2("H5Lcopy on soft link using H5L_SAME_LOC");
+
+ /* Try to copy a soft link */
+ if (H5Lcreate_soft(COPY_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id,
+ COPY_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", COPY_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", COPY_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ /* Verify the links don't currently exist in the target group */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ /* Copy the link using H5L_SAME_LOC as the first parameter to H5Lcopy */
+ if (H5Lcopy(H5L_SAME_LOC, COPY_LINK_TEST_SOFT_LINK_NAME3, src_grp_id,
+ COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(
+ " failed to copy soft link '%s' using H5L_SAME_LOC as first parameter to H5Lcopy\n",
+ COPY_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ /* Copy the link using H5L_SAME_LOC as the third parameter to H5Lcopy */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME3, H5L_SAME_LOC,
+ COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(
+ " failed to copy soft link '%s' using H5L_SAME_LOC as third parameter to H5Lcopy\n",
+ COPY_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ /* Verify the links have been copied and the original still exists in the source group */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' copy exists\n",
+ COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link copy did not exist\n");
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' copy exists\n",
+ COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link copy did not exist\n");
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original soft link '%s' exists\n",
+ COPY_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original soft link did not exist\n");
+ PART_ERROR(H5Lcopy_soft_same_loc);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_soft_same_loc);
+
+ PART_BEGIN(H5Lcopy_external_no_check)
+ {
+ TESTING_2("H5Lcopy on external link (copied link's properties not checked)");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ /* Try to copy an external link */
+ if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ /* Copy the link */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, dst_grp_id,
+ COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ /* Verify the link has been copied and still exists in the source group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link copy '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME);
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link copy did not exist\n");
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original external link did not exist\n");
+ PART_ERROR(H5Lcopy_external_no_check);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lcopy_external_no_check);
+#endif
+ }
+ PART_END(H5Lcopy_external_no_check);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lcopy_external_check)
+ {
+#ifndef NO_EXTERNAL_LINKS
+ H5L_info2_t orig_info, new_info;
+ const char *orig_filename, *new_filename;
+ const char *orig_objname, *new_objname;
+ unsigned unpack_flags = 0;
+ char orig_link_val[COPY_LINK_TEST_LINK_VAL_BUF_SIZE];
+ char new_link_val[COPY_LINK_TEST_LINK_VAL_BUF_SIZE];
+#endif
+
+ TESTING_2("H5Lcopy on external link (copied link's properties checked)");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Try to copy an external link */
+ if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Retrieve the link's info */
+ if (H5Lget_info2(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Retrieve the link's value */
+ if (H5Lget_val(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, orig_link_val,
+ COPY_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve value for external link '%s'\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (H5Lunpack_elink_val(orig_link_val, orig_info.u.val_size, &unpack_flags, &orig_filename,
+ &orig_objname) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack original external link's value buffer\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Copy the link */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, dst_grp_id,
+ COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Verify the link has been copied and still exists in the source group */
+ if ((link_exists = H5Lexists(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link copy '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link copy did not exist\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original external link did not exist\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Retrieve the new link's info */
+ if (H5Lget_info2(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2, &new_info, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (new_info.type != orig_info.type) {
+ H5_FAILED();
+ HDprintf(" copied link's link type doesn't match original link's type\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (new_info.u.val_size != orig_info.u.val_size) {
+ H5_FAILED();
+ HDprintf(" copied external link's value size of %llu doesn't match original link's value "
+ "size of %llu\n",
+ (unsigned long long)new_info.u.val_size, (unsigned long long)orig_info.u.val_size);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (new_info.corder_valid != orig_info.corder_valid) {
+ H5_FAILED();
+ HDprintf(" copied link's 'corder_valid' field doesn't match original link's "
+ "'corder_valid' field\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) {
+ H5_FAILED();
+ HDprintf(" copied link's creation order value %lld doesn't match original link's creation "
+ "order value %lld\n",
+ new_info.corder, orig_info.corder);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (new_info.cset != orig_info.cset) {
+ H5_FAILED();
+ HDprintf(" copied link's character set doesn't match original link's character set\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ /* Check the external link's value */
+ if (H5Lget_val(dst_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2, new_link_val,
+ COPY_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve value for external link '%s'\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (H5Lunpack_elink_val(new_link_val, new_info.u.val_size, &unpack_flags, &new_filename,
+ &new_objname) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack copied external link's value buffer\n");
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (HDstrncmp(new_filename, orig_filename, strlen(orig_filename)) < 0) {
+ H5_FAILED();
+ HDprintf(" copied external link's filename '%s' doesn't match original external link's "
+ "filename '%s'\n",
+ new_filename, orig_filename);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ if (HDstrncmp(new_objname, orig_objname, strlen(orig_objname)) < 0) {
+ H5_FAILED();
+ HDprintf(" copied external link's object name '%s' doesn't match original external link's "
+ "object name '%s'\n",
+ new_objname, orig_objname);
+ PART_ERROR(H5Lcopy_external_check);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lcopy_external_check);
+#endif
+ }
+ PART_END(H5Lcopy_external_check);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lcopy_external_same_loc)
+ {
+ TESTING_2("H5Lcopy on external link using H5L_SAME_LOC");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ /* Try to copy an external link */
+ if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", COPY_LINK_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ /* Verify the links don't currently exist in the target group */
+ if ((link_exists =
+ H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if ((link_exists =
+ H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link existed in target group before copy!\n");
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ /* Copy the link using H5L_SAME_LOC as the first parameter to H5Lcopy */
+ if (H5Lcopy(H5L_SAME_LOC, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, src_grp_id,
+ COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy external link '%s' using H5L_SAME_LOC as first parameter to "
+ "H5Lcopy\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ /* Copy the link using H5L_SAME_LOC as the third parameter to H5Lcopy */
+ if (H5Lcopy(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, H5L_SAME_LOC,
+ COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy external link '%s' using H5L_SAME_LOC as third parameter to "
+ "H5Lcopy\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ /* Verify the links have been copied and the original still exists in the source group */
+ if ((link_exists =
+ H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link copy '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link copy did not exist\n");
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if ((link_exists =
+ H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link copy '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link copy did not exist\n");
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original external link '%s' exists\n",
+ COPY_LINK_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original external link did not exist\n");
+ PART_ERROR(H5Lcopy_external_same_loc);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lcopy_external_same_loc);
+#endif
+ }
+ PART_END(H5Lcopy_external_same_loc);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lcopy_ud_no_check)
+ {
+ TESTING_2("H5Lcopy on user-defined link (copied link's properties not checked)");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lcopy_ud_no_check);
+ }
+ PART_END(H5Lcopy_ud_no_check);
+
+ PART_BEGIN(H5Lcopy_ud_check)
+ {
+ TESTING_2("H5Lcopy on user-defined link (copied link's properties checked)");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lcopy_ud_check);
+ }
+ PART_END(H5Lcopy_ud_check);
+
+ PART_BEGIN(H5Lcopy_ud_same_loc)
+ {
+ TESTING_2("H5Lcopy on user-defined link using H5L_SAME_LOC");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lcopy_ud_same_loc);
+ }
+ PART_END(H5Lcopy_ud_same_loc);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(dst_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(src_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(dst_grp_id);
+ H5Gclose(src_grp_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(ext_file_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that using H5Lcopy to copy links into a
+ * group which already contains links will cause the new links
+ * to have creation order values ranging from the target group's
+ * maximum link creation order value and upwards. This is to
+ * check that it is not possible to run into the situation where
+ * H5Lcopy might cause a group to have two links with the same
+ * creation order values.
+ */
+static int
+test_copy_links_into_group_with_links(void)
+{
+ TESTING("H5Lcopy adjusting creation order values for copied links");
+
+ SKIPPED();
+
+ return 1;
+}
+
+/*
+ * A test to check the behavior of copying a link across files.
+ * This should fail for hard links but succeed for soft and
+ * external links (and user-defined links of those types).
+ *
+ * TODO: Ideally, tests should be written to verify that the
+ * copied links retain the properties of the original
+ * links.
+ */
+static int
+test_copy_link_across_files(void)
+{
+ TESTING("link copying across files");
+
+ /* TODO */
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that a link can't be copied
+ * when H5Lcopy is passed invalid parameters.
+ */
+static int
+test_copy_link_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+ hid_t ext_file_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Lcopy with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or basic and more link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, COPY_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", COPY_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((src_grp_id = H5Gcreate2(group_id, COPY_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", COPY_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dst_grp_id = H5Gcreate2(group_id, COPY_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", COPY_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lcopy_invalid_src_loc_id)
+ {
+ TESTING_2("H5Lcopy with an invalid source location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcopy(H5I_INVALID_HID, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id,
+ COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with an invalid source location ID\n");
+ PART_ERROR(H5Lcopy_invalid_src_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_invalid_src_loc_id);
+
+ PART_BEGIN(H5Lcopy_invalid_src_name)
+ {
+ TESTING_2("H5Lcopy with an invalid source name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcopy(src_grp_id, NULL, dst_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with a NULL source name\n");
+ PART_ERROR(H5Lcopy_invalid_src_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcopy(src_grp_id, "", dst_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with an invalid source name of ''\n");
+ PART_ERROR(H5Lcopy_invalid_src_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_invalid_src_name);
+
+ PART_BEGIN(H5Lcopy_invalid_dst_loc_id)
+ {
+ TESTING_2("H5Lcopy with an invalid destination location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5I_INVALID_HID,
+ COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with an invalid destination location ID\n");
+ PART_ERROR(H5Lcopy_invalid_dst_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_invalid_dst_loc_id);
+
+ PART_BEGIN(H5Lcopy_invalid_dst_name)
+ {
+ TESTING_2("H5Lcopy with an invalid destination name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, NULL,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with a NULL destination name\n");
+ PART_ERROR(H5Lcopy_invalid_dst_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, "",
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with an invalid destination name of ''\n");
+ PART_ERROR(H5Lcopy_invalid_dst_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_invalid_dst_name);
+
+ PART_BEGIN(H5Lcopy_invalid_lcpl)
+ {
+ TESTING_2("H5Lcopy with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id,
+ COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with an invalid LCPL\n");
+ PART_ERROR(H5Lcopy_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_invalid_lcpl);
+
+ PART_BEGIN(H5Lcopy_invalid_lapl)
+ {
+ TESTING_2("H5Lcopy with an invalid LAPL");
+#ifndef NO_INVALID_PROPERTY_LIST_TESTS
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id,
+ COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with an invalid LAPL\n");
+ PART_ERROR(H5Lcopy_invalid_lapl);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lcopy_invalid_lapl);
+#endif
+ }
+ PART_END(H5Lcopy_invalid_lapl);
+
+ PART_BEGIN(H5Lcopy_invalid_same_location)
+ {
+ TESTING_2("H5Lcopy with an invalid same location");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcopy(H5L_SAME_LOC, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5L_SAME_LOC,
+ COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded with an invalid same location\n");
+ PART_ERROR(H5Lcopy_invalid_same_location);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_invalid_same_location);
+
+ PART_BEGIN(H5Lcopy_invalid_across_files)
+ {
+ TESTING_2("H5Lcopy invalid across files");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lcopy_invalid_across_files);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lcopy(src_grp_id, COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, ext_file_id,
+ COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lcopy succeeded in copying a hard link across files!\n");
+ PART_ERROR(H5Lcopy_invalid_across_files);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lcopy_invalid_across_files);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lcopy_invalid_across_files);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(dst_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(src_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(dst_grp_id);
+ H5Gclose(src_grp_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(ext_file_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a link can be moved with H5Lmove.
+ */
+static int
+test_move_link(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+ hid_t ext_file_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link moving");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or link, hard, soft, or external link aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, MOVE_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((src_grp_id = H5Gcreate2(group_id, MOVE_LINK_TEST_SRC_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_TEST_SRC_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dst_grp_id = H5Gcreate2(group_id, MOVE_LINK_TEST_DST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_TEST_DST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lmove_hard_no_check)
+ {
+ TESTING_2("H5Lmove on hard link (moved link's properties not checked)");
+
+ /* Try to move a hard link */
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ /* Move the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ /* Verify the link has been moved */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old hard link '%s' exists\n",
+ MOVE_LINK_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old hard link exists\n");
+ PART_ERROR(H5Lmove_hard_no_check);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_hard_no_check);
+
+ PART_BEGIN(H5Lmove_hard_check)
+ {
+ H5L_info2_t orig_info, new_info;
+ int cmp_value;
+
+ TESTING_2("H5Lmove on hard link (moved link's properties checked)");
+
+ /* Try to move a hard link */
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ /* Retrieve the link's info */
+ if (H5Lget_info2(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ /* Move the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, dst_grp_id,
+ MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ /* Verify the link has been moved */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old hard link '%s' exists\n",
+ MOVE_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old hard link exists\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ /* Retrieve the moved link's info */
+ if (H5Lget_info2(dst_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME2, &new_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (new_info.type != orig_info.type) {
+ H5_FAILED();
+ HDprintf(" moved link's link type doesn't match original link's type\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (H5Otoken_cmp(dst_grp_id, &new_info.u.token, &orig_info.u.token, &cmp_value) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to compare link target tokens\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (cmp_value != 0) {
+ H5_FAILED();
+ HDprintf(" moved hard link's object token doesn't match original link's object token\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (new_info.corder_valid != orig_info.corder_valid) {
+ H5_FAILED();
+ HDprintf(" moved link's 'corder_valid' field doesn't match original link's 'corder_valid' "
+ "field\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) {
+ H5_FAILED();
+ HDprintf(" moved link's creation order value %" PRId64
+ " doesn't match original link's creation order value %" PRId64 "\n",
+ new_info.corder, orig_info.corder);
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ if (new_info.cset != orig_info.cset) {
+ H5_FAILED();
+ HDprintf(" moved link's character set doesn't match original link's character set\n");
+ PART_ERROR(H5Lmove_hard_check);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_hard_check);
+
+ PART_BEGIN(H5Lmove_hard_same_loc)
+ {
+ TESTING_2("H5Lmove on hard link using H5L_SAME_LOC");
+
+ /* Try to move a hard link */
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ /* Rename the link using H5L_SAME_LOC as the first parameter to H5Lmove */
+ if (H5Lmove(H5L_SAME_LOC, MOVE_LINK_TEST_HARD_LINK_NAME3, src_grp_id,
+ MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s' using H5L_SAME_LOC as first parameter to H5Lmove\n",
+ MOVE_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ /* Ensure the link has been renamed */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" original hard link existed in target group after move!\n");
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist after move!\n");
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ /* Rename the link back using H5L_SAME_LOC as the third parameter to H5Lmove */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5L_SAME_LOC,
+ MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s' using H5L_SAME_LOC as third parameter to H5Lmove\n",
+ MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ /* Verify the link has been renamed back */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original hard link did not exist after moving the link back!\n");
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old hard link '%s' exists\n",
+ MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed hard link exists after moving the link back!\n");
+ PART_ERROR(H5Lmove_hard_same_loc);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_hard_same_loc);
+
+ PART_BEGIN(H5Lmove_hard_rename)
+ {
+ TESTING_2("H5Lmove to rename hard link without moving it");
+
+ /* Try to rename a hard link */
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME4, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME4);
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME4, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n", MOVE_LINK_TEST_HARD_LINK_NAME4);
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ /* Verify the renamed link doesn't currently exist in the source group */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NEW_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if renamed hard link '%s' exists\n",
+ MOVE_LINK_TEST_HARD_LINK_NEW_NAME);
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed hard link existed in source group before move!\n");
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ /* Rename the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME4, src_grp_id,
+ MOVE_LINK_TEST_HARD_LINK_NEW_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to rename link '%s'\n", MOVE_LINK_TEST_HARD_LINK_NAME4);
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ /* Verify the link has been renamed */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NEW_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if renamed hard link '%s' exists\n",
+ MOVE_LINK_TEST_HARD_LINK_NEW_NAME);
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed hard link did not exist\n");
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_HARD_LINK_NAME4, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old hard link '%s' exists\n",
+ MOVE_LINK_TEST_HARD_LINK_NAME4);
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old hard link exists\n");
+ PART_ERROR(H5Lmove_hard_rename);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_hard_rename);
+
+ PART_BEGIN(H5Lmove_soft_no_check)
+ {
+ TESTING_2("H5Lmove on soft link (moved link's properties not checked)");
+
+ /* Try to move a soft link */
+ if (H5Lcreate_soft(MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id,
+ MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ /* Move the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ /* Verify the link has been moved */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old soft link '%s' exists\n",
+ MOVE_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old soft link exists\n");
+ PART_ERROR(H5Lmove_soft_no_check);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_soft_no_check);
+
+ PART_BEGIN(H5Lmove_soft_check)
+ {
+ H5L_info2_t orig_info, new_info;
+ char orig_link_val[MOVE_LINK_TEST_LINK_VAL_BUF_SIZE];
+ char new_link_val[MOVE_LINK_TEST_LINK_VAL_BUF_SIZE];
+
+ TESTING_2("H5Lmove on soft link (moved link's properties checked)");
+
+ /* Try to move a soft link */
+ if (H5Lcreate_soft(MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id,
+ MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Retrieve the link's info */
+ if (H5Lget_info2(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Retrieve the link's value */
+ if (H5Lget_val(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, orig_link_val,
+ MOVE_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve value for soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Move the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, dst_grp_id,
+ MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Verify the link has been moved */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old soft link '%s' exists\n",
+ MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old soft link exists\n");
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Retrieve the moved link's info */
+ if (H5Lget_info2(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, &new_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (new_info.type != orig_info.type) {
+ H5_FAILED();
+ HDprintf(" moved link's link type doesn't match original link's type\n");
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (new_info.u.val_size != orig_info.u.val_size) {
+ H5_FAILED();
+ HDprintf(" moved soft link's value size of %llu doesn't match original link's value size "
+ "of %llu\n",
+ (unsigned long long)new_info.u.val_size, (unsigned long long)orig_info.u.val_size);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (new_info.corder_valid != orig_info.corder_valid) {
+ H5_FAILED();
+ HDprintf(" moved link's 'corder_valid' field doesn't match original link's 'corder_valid' "
+ "field\n");
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) {
+ H5_FAILED();
+ HDprintf(" moved link's creation order value %" PRId64
+ " doesn't match original link's creation order value %" PRId64 "\n",
+ new_info.corder, orig_info.corder);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (new_info.cset != orig_info.cset) {
+ H5_FAILED();
+ HDprintf(" moved link's character set doesn't match original link's character set\n");
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ /* Check the soft link's value */
+ if (H5Lget_val(dst_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME2, new_link_val,
+ MOVE_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve value for soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ if (HDstrncmp(orig_link_val, new_link_val, MOVE_LINK_TEST_LINK_VAL_BUF_SIZE)) {
+ H5_FAILED();
+ HDprintf(" moved soft link's value '%s' doesn't match original link's value '%s'\n",
+ new_link_val, orig_link_val);
+ PART_ERROR(H5Lmove_soft_check);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_soft_check);
+
+ PART_BEGIN(H5Lmove_soft_same_loc)
+ {
+ TESTING_2("H5Lmove on soft link using H5L_SAME_LOC");
+
+ /* Try to move a soft link */
+ if (H5Lcreate_soft(MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id,
+ MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ /* Rename the link using H5L_SAME_LOC as the first parameter to H5Lmove */
+ if (H5Lmove(H5L_SAME_LOC, MOVE_LINK_TEST_SOFT_LINK_NAME3, src_grp_id,
+ MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s' using H5L_SAME_LOC as first parameter to H5Lmove\n",
+ MOVE_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ /* Ensure the link has been renamed */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" original soft link existed in target group after move!\n");
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist after move!\n");
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ /* Rename the link back using H5L_SAME_LOC as the third parameter to H5Lmove */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5L_SAME_LOC,
+ MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s' using H5L_SAME_LOC as third parameter to H5Lmove\n",
+ MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ /* Verify the link has been renamed back */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original hard link did not exist after moving the link back!\n");
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old soft link '%s' exists\n",
+ MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed soft link exists after moving the link back!\n");
+ PART_ERROR(H5Lmove_soft_same_loc);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_soft_same_loc);
+
+ PART_BEGIN(H5Lmove_soft_rename)
+ {
+ TESTING_2("H5Lmove to rename soft link without moving it");
+
+ /* Try to rename a soft link */
+ if (H5Lcreate_soft(MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH, src_grp_id,
+ MOVE_LINK_TEST_SOFT_LINK_NAME4, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME4);
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME4, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n", MOVE_LINK_TEST_SOFT_LINK_NAME4);
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ /* Verify the renamed link doesn't currently exist in the source group */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NEW_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if renamed soft link '%s' exists\n",
+ MOVE_LINK_TEST_SOFT_LINK_NEW_NAME);
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed soft link existed in source group before move!\n");
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ /* Rename the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME4, src_grp_id,
+ MOVE_LINK_TEST_SOFT_LINK_NEW_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to rename link '%s'\n", MOVE_LINK_TEST_SOFT_LINK_NAME4);
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ /* Verify the link has been renamed */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NEW_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if renamed soft link '%s' exists\n",
+ MOVE_LINK_TEST_SOFT_LINK_NEW_NAME);
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed soft link did not exist\n");
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_SOFT_LINK_NAME4, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old soft link '%s' exists\n",
+ MOVE_LINK_TEST_SOFT_LINK_NAME4);
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old soft link exists\n");
+ PART_ERROR(H5Lmove_soft_rename);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_soft_rename);
+
+ PART_BEGIN(H5Lmove_external_no_check)
+ {
+ TESTING_2("H5Lmove on external link (moved link's properties not checked)");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ /* Try to move an external link */
+ if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME);
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME);
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME);
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ /* Move the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, dst_grp_id,
+ MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME);
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ /* Verify the link has been moved */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME);
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME);
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old external link exists\n");
+ PART_ERROR(H5Lmove_external_no_check);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lmove_external_no_check);
+#endif
+ }
+ PART_END(H5Lmove_external_no_check);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lmove_external_check)
+ {
+#ifndef NO_EXTERNAL_LINKS
+ H5L_info2_t orig_info, new_info;
+ const char *orig_filename, *new_filename;
+ const char *orig_objname, *new_objname;
+ unsigned unpack_flags = 0;
+ char orig_link_val[MOVE_LINK_TEST_LINK_VAL_BUF_SIZE];
+ char new_link_val[MOVE_LINK_TEST_LINK_VAL_BUF_SIZE];
+#endif
+
+ TESTING_2("H5Lmove on external link (moved link's properties checked)");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Try to move an external link */
+ if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Retrieve the link's info */
+ if (H5Lget_info2(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, &orig_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Retrieve the link's value */
+ if (H5Lget_val(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, orig_link_val,
+ MOVE_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve value for external link '%s'\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (H5Lunpack_elink_val(orig_link_val, orig_info.u.val_size, &unpack_flags, &orig_filename,
+ &orig_objname) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack original external link's value buffer\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Move the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, dst_grp_id,
+ MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Verify the link has been moved */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old external link exists\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Retrieve the moved link's info */
+ if (H5Lget_info2(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, &new_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve info for link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (new_info.type != orig_info.type) {
+ H5_FAILED();
+ HDprintf(" moved link's link type doesn't match original link's type\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (new_info.u.val_size != orig_info.u.val_size) {
+ H5_FAILED();
+ HDprintf(" moved external link's value size of %llu doesn't match original link's value "
+ "size of %llu\n",
+ (unsigned long long)new_info.u.val_size, (unsigned long long)orig_info.u.val_size);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (new_info.corder_valid != orig_info.corder_valid) {
+ H5_FAILED();
+ HDprintf(" moved link's 'corder_valid' field doesn't match original link's 'corder_valid' "
+ "field\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (new_info.corder_valid && orig_info.corder_valid && (new_info.corder != orig_info.corder)) {
+ H5_FAILED();
+ HDprintf(" moved link's creation order value %lld doesn't match original link's creation "
+ "order value %lld\n",
+ new_info.corder, orig_info.corder);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (new_info.cset != orig_info.cset) {
+ H5_FAILED();
+ HDprintf(" moved link's character set doesn't match original link's character set\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ /* Check the external link's value */
+ if (H5Lget_val(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME2, new_link_val,
+ MOVE_LINK_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't retrieve value for external link '%s'\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME2);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (H5Lunpack_elink_val(new_link_val, new_info.u.val_size, &unpack_flags, &new_filename,
+ &new_objname) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack moved external link's value buffer\n");
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (HDstrncmp(new_filename, orig_filename, strlen(orig_filename)) < 0) {
+ H5_FAILED();
+ HDprintf(" moved external link's filename '%s' doesn't match original external link's "
+ "filename '%s'\n",
+ new_filename, orig_filename);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ if (HDstrncmp(new_objname, orig_objname, strlen(orig_objname)) < 0) {
+ H5_FAILED();
+ HDprintf(" moved external link's object name '%s' doesn't match original external link's "
+ "object name '%s'\n",
+ new_objname, orig_objname);
+ PART_ERROR(H5Lmove_external_check);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lmove_external_check);
+#endif
+ }
+ PART_END(H5Lmove_external_check);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lmove_external_same_loc)
+ {
+ TESTING_2("H5Lmove on external link using H5L_SAME_LOC");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ /* Try to move an external link */
+ if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME3);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME3);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" external link existed in target group before move!\n");
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ /* Rename the link using H5L_SAME_LOC as the first parameter to H5Lmove */
+ if (H5Lmove(H5L_SAME_LOC, MOVE_LINK_TEST_EXTERN_LINK_NAME3, src_grp_id,
+ MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME3);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ /* Ensure the link has been renamed */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME3);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" original external link existed in target group after move!\n");
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist after move!\n");
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ /* Rename the link back using H5L_SAME_LOC as the third parameter to H5Lmove */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5L_SAME_LOC,
+ MOVE_LINK_TEST_EXTERN_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ /* Verify the link has been renamed back */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME3);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original external link did not exist after moving the link back!\n");
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME);
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed external link exists after moving the link back!\n");
+ PART_ERROR(H5Lmove_external_same_loc);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lmove_external_same_loc);
+#endif
+ }
+ PART_END(H5Lmove_external_same_loc);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lmove_external_rename)
+ {
+ TESTING_2("H5Lmove to rename external link without moving it");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ /* Try to move an external link */
+ if (H5Lcreate_external(ext_link_filename, "/", src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME4,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME4);
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME4, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME4);
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ /* Verify the renamed link doesn't currently exist in the source group */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if renamed external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME);
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed external link existed in source group before move!\n");
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ /* Rename the link */
+ if (H5Lmove(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME4, src_grp_id,
+ MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to rename link '%s'\n", MOVE_LINK_TEST_EXTERN_LINK_NAME4);
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ /* Verify the link has been renamed */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if renamed external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME);
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" renamed external link did not exist\n");
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ /* Verify the old link is gone */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_TEST_EXTERN_LINK_NAME4, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if old external link '%s' exists\n",
+ MOVE_LINK_TEST_EXTERN_LINK_NAME4);
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" old external link exists\n");
+ PART_ERROR(H5Lmove_external_rename);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lmove_external_rename);
+#endif
+ }
+ PART_END(H5Lmove_external_rename);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lmove_ud_no_check)
+ {
+ TESTING_2("H5Lmove on user-defined link (moved link's properties not checked)");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lmove_ud_no_check);
+ }
+ PART_END(H5Lmove_ud_no_check);
+
+ PART_BEGIN(H5Lmove_ud_check)
+ {
+ TESTING_2("H5Lmove on user-defined link (moved link's properties checked)");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lmove_ud_check);
+ }
+ PART_END(H5Lmove_ud_check);
+
+ PART_BEGIN(H5Lmove_ud_same_loc)
+ {
+ TESTING_2("H5Lmove on user-defined link using H5L_SAME_LOC");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lmove_ud_same_loc);
+ }
+ PART_END(H5Lmove_ud_same_loc);
+
+ PART_BEGIN(H5Lmove_ud_rename)
+ {
+ TESTING_2("H5Lmove to rename user-defined link without moving it");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lmove_ud_rename);
+ }
+ PART_END(H5Lmove_ud_rename);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(dst_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(src_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(dst_grp_id);
+ H5Gclose(src_grp_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ H5Fclose(ext_file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that using H5Lmove to move links into a
+ * group which already contains links will cause the new links
+ * to have creation order values ranging from the target group's
+ * maximum link creation order value and upwards. This is to
+ * check that it is not possible to run into the situation where
+ * H5Lmove might cause a group to have two links with the same
+ * creation order values.
+ */
+static int
+test_move_links_into_group_with_links(void)
+{
+ H5L_info2_t link_info;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char link_name[MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE];
+
+ TESTING("H5Lmove adjusting creation order values for moved links");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or basic or hard link, or creation order aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((src_grp_id = H5Gcreate2(group_id, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SRC_GRP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SRC_GRP_NAME);
+ goto error;
+ }
+
+ if ((dst_grp_id = H5Gcreate2(group_id, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_DST_GRP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_DST_GRP_NAME);
+ goto error;
+ }
+
+ /* Create several links in the source group */
+ for (i = 0; i < MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS; i++) {
+ snprintf(link_name, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE, "link_to_move%d", (int)i);
+
+ if (H5Lcreate_hard(src_grp_id, ".", src_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create link '%s' in source group\n", link_name);
+ goto error;
+ }
+
+ /* Check the current creation order value for each link */
+ memset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info2(src_grp_id, link_name, &link_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve info for link '%s'\n", link_name);
+ goto error;
+ }
+
+ if (!link_info.corder_valid) {
+ H5_FAILED();
+ HDprintf(" creation order value for newly-created link '%s' was marked as not valid!\n",
+ link_name);
+ goto error;
+ }
+
+ if (link_info.corder != (int64_t)i) {
+ H5_FAILED();
+ HDprintf(" creation order value %lld for link '%s' did not match expected value %lld\n",
+ (long long)link_info.corder, link_name, (long long)i);
+ goto error;
+ }
+ }
+
+ /* Create several links in the destination group */
+ for (i = 0; i < MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS; i++) {
+ snprintf(link_name, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE, "link%d", (int)i);
+
+ if (H5Lcreate_hard(dst_grp_id, ".", dst_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create link '%s' in destination group\n", link_name);
+ goto error;
+ }
+ }
+
+ /* Move all the links from the source group into the destination group */
+ for (i = 0; i < MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS; i++) {
+ snprintf(link_name, MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE, "link_to_move%d", (int)i);
+
+ if (H5Lmove(src_grp_id, link_name, dst_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s' from source group to destination group\n", link_name);
+ goto error;
+ }
+
+ /* Check that the creation order value for each moved link has been adjusted */
+ memset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info2(dst_grp_id, link_name, &link_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve info for link '%s'\n", link_name);
+ goto error;
+ }
+
+ if (!link_info.corder_valid) {
+ H5_FAILED();
+ HDprintf(" creation order value for moved link '%s' was marked as not valid!\n", link_name);
+ goto error;
+ }
+
+ if (link_info.corder != (int64_t)(i + MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS)) {
+ H5_FAILED();
+ HDprintf(" creation order value for moved link '%s' was not adjusted after move! It should "
+ "have been %lld but was %lld\n",
+ link_name, (long long)(i + MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS),
+ (long long)link_info.corder);
+ goto error;
+ }
+ }
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(dst_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(src_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(dst_grp_id);
+ H5Gclose(src_grp_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the behavior of moving a link across files.
+ * This should fail for hard links but succeed for soft and
+ * external links (and user-defined links of those types).
+ *
+ * TODO: Ideally, tests should be written to verify that the
+ * moved links retain their original properties.
+ */
+static int
+test_move_link_across_files(void)
+{
+ TESTING("link moving across files");
+
+ /* TODO */
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that a group's always-increasing
+ * maximum link creation order value gets reset once
+ * all the links have been moved out of the group.
+ */
+static int
+test_move_link_reset_grp_max_crt_order(void)
+{
+#ifndef NO_MAX_LINK_CRT_ORDER_RESET
+ H5G_info_t grp_info;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char link_name[MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE];
+#endif
+
+ TESTING("H5Lmove of all links out of group resets group's maximum link creation order value");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, more or hard link, or creation order aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+#ifndef NO_MAX_LINK_CRT_ORDER_RESET
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((src_grp_id = H5Gcreate2(group_id, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SRC_GRP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SRC_GRP_NAME);
+ goto error;
+ }
+
+ if ((dst_grp_id = H5Gcreate2(group_id, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_DST_GRP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_DST_GRP_NAME);
+ goto error;
+ }
+
+ /* Create several links inside the source group */
+ for (i = 0; i < MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) {
+ snprintf(link_name, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i);
+
+ if (H5Lcreate_hard(src_grp_id, ".", src_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s' in source group\n", link_name);
+ goto error;
+ }
+ }
+
+ /*
+ * Move links out of the source group and into the destination group, checking the
+ * source group's maximum creation order value each time.
+ */
+ for (i = 0; i < MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS; i++) {
+ memset(&grp_info, 0, sizeof(grp_info));
+
+ if (H5Gget_info(src_grp_id, &grp_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve source group's info\n");
+ goto error;
+ }
+
+ if (grp_info.max_corder != MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" source group's maximum creation order value got adjusted to %lld during link "
+ "moving; value should have remained at %lld\n",
+ (long long)grp_info.max_corder, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS);
+ goto error;
+ }
+
+ snprintf(link_name, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE, "link%d", (int)i);
+
+ if (H5Lmove(src_grp_id, link_name, dst_grp_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to move link '%s' to destination group\n", link_name);
+ goto error;
+ }
+ }
+
+ /*
+ * Ensure the source group's maximum creation order value has now
+ * reset to 0 after all the links have been moved out of it.
+ */
+ memset(&grp_info, 0, sizeof(grp_info));
+
+ if (H5Gget_info(src_grp_id, &grp_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve source group's info\n");
+ goto error;
+ }
+
+ if (grp_info.max_corder != 0) {
+ H5_FAILED();
+ HDprintf(" source group's maximum creation order value didn't reset to 0 after moving all links "
+ "out of it; value is still %lld\n",
+ (long long)grp_info.max_corder);
+ goto error;
+ }
+
+ /* For good measure, check that destination group's max. creation order value is as expected */
+ memset(&grp_info, 0, sizeof(grp_info));
+
+ if (H5Gget_info(dst_grp_id, &grp_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve destination group's info\n");
+ goto error;
+ }
+
+ if (grp_info.max_corder != MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" destination group's maximum creation order value of %lld didn't match expected value "
+ "of %lld after moving all links into it\n",
+ (long long)grp_info.max_corder, MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS);
+ goto error;
+ }
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(dst_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(src_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(dst_grp_id);
+ H5Gclose(src_grp_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that H5Lmove fails when it is given
+ * invalid parameters.
+ */
+static int
+test_move_link_invalid_params(void)
+{
+ htri_t link_exists;
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t src_grp_id = H5I_INVALID_HID, dst_grp_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+ hid_t ext_file_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Lmove with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, more or hard link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, MOVE_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((src_grp_id = H5Gcreate2(group_id, MOVE_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dst_grp_id = H5Gcreate2(group_id, MOVE_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", MOVE_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(group_id, ".", src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ goto error;
+ }
+
+ /* Verify the link doesn't currently exist in the target group */
+ if ((link_exists = H5Lexists(dst_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group before move!\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lmove_invalid_src_loc_id)
+ {
+ TESTING_2("H5Lmove with an invalid source location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(H5I_INVALID_HID, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id,
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with an invalid source location ID!\n");
+ PART_ERROR(H5Lmove_invalid_src_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_invalid_src_loc_id);
+
+ PART_BEGIN(H5Lmove_invalid_src_name)
+ {
+ TESTING_2("H5Lmove with an invalid source name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(src_grp_id, NULL, dst_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with a NULL source name!\n");
+ PART_ERROR(H5Lmove_invalid_src_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(src_grp_id, "", dst_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with an invalid source name of ''!\n");
+ PART_ERROR(H5Lmove_invalid_src_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_invalid_src_name);
+
+ PART_BEGIN(H5Lmove_invalid_dst_loc_id)
+ {
+ TESTING_2("H5Lmove with an invalid destination location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5I_INVALID_HID,
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with an invalid destination location ID!\n");
+ PART_ERROR(H5Lmove_invalid_dst_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_invalid_dst_loc_id);
+
+ PART_BEGIN(H5Lmove_invalid_dst_name)
+ {
+ TESTING_2("H5Lmove with an invalid destination name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, NULL,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with a NULL destination name!\n");
+ PART_ERROR(H5Lmove_invalid_dst_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id, "",
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with an invalid destination name of ''!\n");
+ PART_ERROR(H5Lmove_invalid_dst_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_invalid_dst_name);
+
+ PART_BEGIN(H5Lmove_invalid_lcpl)
+ {
+ TESTING_2("H5Lmove with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id,
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with an invalid LCPL!\n");
+ PART_ERROR(H5Lmove_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_invalid_lcpl);
+
+ PART_BEGIN(H5Lmove_invalid_lapl)
+ {
+ TESTING_2("H5Lmove with an invalid LAPL");
+#ifndef NO_INVALID_PROPERTY_LIST_TESTS
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, dst_grp_id,
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Lmove_invalid_lapl);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lmove_invalid_lapl);
+#endif
+ }
+ PART_END(H5Lmove_invalid_lapl);
+
+ PART_BEGIN(H5Lmove_existence)
+ {
+ TESTING_2("valid link existence in original group after previous invalid H5Lmove calls");
+
+ /* Verify the link hasn't been moved */
+ if ((link_exists =
+ H5Lexists(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_existence);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link didn't exist in source group after invalid move!\n");
+ PART_ERROR(H5Lmove_existence);
+ }
+
+ if ((link_exists =
+ H5Lexists(dst_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_existence);
+ }
+
+ if (link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link existed in target group after invalid move!\n");
+ PART_ERROR(H5Lmove_existence);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_existence);
+
+ PART_BEGIN(H5Lmove_same_location)
+ {
+ TESTING_2("H5Lmove with an invalid same location");
+
+ /* Move a group within the file. Both of source and destination use
+ * H5L_SAME_LOC. Should fail. */
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(H5L_SAME_LOC, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5L_SAME_LOC,
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded with an invalid same location!\n");
+ PART_ERROR(H5Lmove_same_location);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_same_location);
+
+ PART_BEGIN(H5Lmove_across_files)
+ {
+ TESTING_2("H5Lmove into another file");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lmove_across_files);
+ }
+
+ /* Move a group across files. */
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lmove(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, ext_file_id,
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lmove succeeded in moving a hard link across files!\n");
+ PART_ERROR(H5Lmove_across_files);
+ }
+
+ /* Ensure that original link still exists */
+ if ((link_exists =
+ H5Lexists(src_grp_id, MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if original link '%s' exists after invalid link move\n",
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_across_files);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" original link '%s' didn't exist after failed move!\n",
+ MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lmove_across_files);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close a file!\n");
+ PART_ERROR(H5Lmove_across_files);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lmove_across_files);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(dst_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(src_grp_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(dst_grp_id);
+ H5Gclose(src_grp_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(ext_file_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a soft or external link's value can
+ * be retrieved by using H5Lget_val and H5Lget_val_by_idx.
+ */
+static int
+test_get_link_val(void)
+{
+ H5L_info2_t link_info;
+#ifndef NO_EXTERNAL_LINKS
+ const char *ext_link_filepath;
+ const char *ext_link_val;
+ unsigned ext_link_flags;
+#endif
+ htri_t link_exists;
+ size_t link_val_size;
+ char link_val_buf[GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE];
+ hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+#ifndef NO_EXTERNAL_LINKS
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link value retrieval");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic, more, soft, external link, or creation "
+ "order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, GET_LINK_VAL_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lget_val_soft)
+ {
+ const char *link_target = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME;
+
+ TESTING_2("H5Lget_val on soft link");
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP1_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP1_NAME);
+ PART_ERROR(H5Lget_val_soft);
+ }
+
+ if (H5Lcreate_soft(link_target, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_soft);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_soft);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_soft);
+ }
+
+ if (H5Lget_info2(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, &link_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info\n");
+ PART_ERROR(H5Lget_val_soft);
+ }
+
+ link_val_size = strlen(link_target) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu did not match expected size of %zu\n", link_info.u.val_size,
+ link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lget_val(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value\n");
+ PART_ERROR(H5Lget_val_soft);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target, link_val_size)) {
+ H5_FAILED();
+ HDprintf(" soft link value did not match\n");
+ PART_ERROR(H5Lget_val_soft);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP1_NAME);
+ PART_ERROR(H5Lget_val_soft);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_soft);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_external)
+ {
+#ifndef NO_EXTERNAL_LINKS
+ const char *ext_obj_name = "/";
+#endif
+
+ TESTING_2("H5Lget_val on external link");
+#ifndef NO_EXTERNAL_LINKS
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP2_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP2_NAME);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (H5Lget_info2(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, &link_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info\n");
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %lld did not match expected size of %lld\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (H5Lget_val(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value\n");
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name, strlen(ext_obj_name) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP2_NAME);
+ PART_ERROR(H5Lget_val_external);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_external);
+#endif
+ }
+ PART_END(H5Lget_val_external);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_ud)
+ {
+ TESTING_2("H5Lget_val on user-defined link");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_ud);
+ }
+ PART_END(H5Lget_val_ud);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_soft_crt_order_increasing)
+ {
+ const char *link_target_a = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP4_NAME "A";
+ const char *link_target_b = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP4_NAME "B";
+ const char *link_target_c = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP4_NAME "C";
+
+ TESTING_2("H5Lget_val_by_idx on soft link by creation order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP4_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP4_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft(link_target_a, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lcreate_soft(link_target_b, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lcreate_soft(link_target_c, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Verify the links exist */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Retrieve the info and value of each link in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ link_val_size = strlen(link_target_a) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 0, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_a, strlen(link_target_a) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 0, link_target_a);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ link_val_size = strlen(link_target_b) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 1, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_b, strlen(link_target_b) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 1, link_target_b);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ link_val_size = strlen(link_target_c) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 2, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_c, strlen(link_target_c) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 2, link_target_c);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP4_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_by_idx_soft_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_soft_crt_order_decreasing)
+ {
+ const char *link_target_a = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP5_NAME "A";
+ const char *link_target_b = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP5_NAME "B";
+ const char *link_target_c = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP5_NAME "C";
+
+ TESTING_2("H5Lget_val_by_idx on soft link by creation order in decreasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP5_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP5_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft(link_target_a, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_soft(link_target_b, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_soft(link_target_c, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Verify the links exist */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Retrieve the info and value of each link in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ link_val_size = strlen(link_target_a) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 2, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_a, strlen(link_target_a) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 2, link_target_a);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ link_val_size = strlen(link_target_b) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 1, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_b, strlen(link_target_b) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 1, link_target_b);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ link_val_size = strlen(link_target_c) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 0, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_c, strlen(link_target_c) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 0, link_target_c);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP5_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_by_idx_soft_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_soft_name_order_increasing)
+ {
+ const char *link_target_a = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP6_NAME "A";
+ const char *link_target_b = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP6_NAME "B";
+ const char *link_target_c = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP6_NAME "C";
+
+ TESTING_2("H5Lget_val_by_idx on soft link by alphabetical order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP6_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP6_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft(link_target_a, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lcreate_soft(link_target_b, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lcreate_soft(link_target_c, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ /* Verify the links exist */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ /* Retrieve the info and value of each link in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ link_val_size = strlen(link_target_a) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 0, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_a, strlen(link_target_a) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 0, link_target_a);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ link_val_size = strlen(link_target_b) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 1, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_b, strlen(link_target_b) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 1, link_target_b);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %d\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ link_val_size = strlen(link_target_c) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link value size %zu for link at index %d did not match expected size of %zu\n",
+ link_info.u.val_size, 2, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %d\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_c, strlen(link_target_c) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %d did not match expected value '%s'\n",
+ link_val_buf, 2, link_target_c);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP6_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_by_idx_soft_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_soft_name_order_decreasing)
+ {
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ const char *link_target_a = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP7_NAME "A";
+ const char *link_target_b = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP7_NAME "B";
+ const char *link_target_c = "/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_TEST_SUBGROUP_NAME
+ "/" GET_LINK_VAL_TEST_SUBGROUP7_NAME "C";
+#endif
+
+ TESTING_2("H5Lget_val_by_idx on soft link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP7_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP7_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft(link_target_a, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lcreate_soft(link_target_b, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lcreate_soft(link_target_c, subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Verify the links exist */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Retrieve the info and value of each link in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ link_val_size = strlen(link_target_a) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 2, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_a, strlen(link_target_a) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %lld did not match expected value '%s'\n",
+ link_val_buf, 2, link_target_a);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ link_val_size = strlen(link_target_b) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 1, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_b, strlen(link_target_b) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %lld did not match expected value '%s'\n",
+ link_val_buf, 1, link_target_b);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve soft link's info at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ link_val_size = strlen(link_target_c) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 0, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link value at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_val_buf, link_target_c, strlen(link_target_c) + 1)) {
+ H5_FAILED();
+ HDprintf(" link value '%s' for link at index %lld did not match expected value '%s'\n",
+ link_val_buf, 0, link_target_c);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP7_NAME);
+ PART_ERROR(H5Lget_val_by_idx_soft_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_soft_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_val_by_idx_soft_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_external_crt_order_increasing)
+ {
+#ifndef NO_EXTERNAL_LINKS
+ const char *ext_obj_name_a = "/A";
+ const char *ext_obj_name_b = "/B";
+ const char *ext_obj_name_c = "/C";
+#endif
+
+ TESTING_2("H5Lget_val_by_idx on external link by creation order in increasing order");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP8_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP8_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_a, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_b, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_c, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ /* Verify the links exist */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ /* Retrieve the info and value of each link in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_a) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 0, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_a, strlen(ext_obj_name_a) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_a);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_b) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 1, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_b, strlen(ext_obj_name_b) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_b);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_c) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 2, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_c, strlen(ext_obj_name_c) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_c);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP8_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_increasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_external_crt_order_increasing);
+#endif
+ }
+ PART_END(H5Lget_val_by_idx_external_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_external_crt_order_decreasing)
+ {
+#ifndef NO_EXTERNAL_LINKS
+ const char *ext_obj_name_a = "/A";
+ const char *ext_obj_name_b = "/B";
+ const char *ext_obj_name_c = "/C";
+#endif
+
+ TESTING_2("H5Lget_val_by_idx on external link by creation order in decreasing order");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP9_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP9_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_a, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_b, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_c, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Verify the links exist */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Retrieve the info and value of each link in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_a) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 2, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_a, strlen(ext_obj_name_a) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_a);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_b) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 1, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_b, strlen(ext_obj_name_b) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_b);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_c) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 0, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_c, strlen(ext_obj_name_c) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_c);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP9_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_crt_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_external_crt_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_val_by_idx_external_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_external_name_order_increasing)
+ {
+#ifndef NO_EXTERNAL_LINKS
+ const char *ext_obj_name_a = "/A";
+ const char *ext_obj_name_b = "/B";
+ const char *ext_obj_name_c = "/C";
+#endif
+
+ TESTING_2("H5Lget_val_by_idx on external link by alphabetical order in increasing order");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP10_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP10_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_a, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_b, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_c, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ /* Verify the links exist */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ /* Retrieve the info and value of each link in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_a) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 0, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_a, strlen(ext_obj_name_a) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_a);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_b) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 1, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_b, strlen(ext_obj_name_b) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_b);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_c) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 2, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_c, strlen(ext_obj_name_c) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_c);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP10_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_increasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_external_name_order_increasing);
+#endif
+ }
+ PART_END(H5Lget_val_by_idx_external_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_external_name_order_decreasing)
+ {
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ const char *ext_obj_name_a = "/A";
+ const char *ext_obj_name_b = "/B";
+ const char *ext_obj_name_c = "/C";
+#endif
+
+ TESTING_2("H5Lget_val_by_idx on external link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_VAL_TEST_SUBGROUP11_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_VAL_TEST_SUBGROUP11_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_a, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_b, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_obj_name_c, subgroup_id,
+ GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ /* Verify the links exist */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_VAL_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", GET_LINK_VAL_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ /* Retrieve the info and value of each link in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_a) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 2, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 2);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_a, strlen(ext_obj_name_a) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_a);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_b) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 1, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 1);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_b, strlen(ext_obj_name_b) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_b);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve external link's info at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_obj_name_c) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(
+ " link value size %lld for link at index %lld did not match expected size of %lld\n",
+ link_info.u.val_size, 0, link_val_size);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ HDmemset(link_val_buf, 0, GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE);
+ if (H5Lget_val_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_val_buf,
+ GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link value at index %lld\n", 0);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lunpack_elink_val(link_val_buf, link_info.u.val_size, &ext_link_flags, &ext_link_filepath,
+ &ext_link_val) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't unpack external link value buffer\n");
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_filepath, ext_link_filename, strlen(ext_link_filename) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link target file '%s' did not match expected '%s'\n",
+ ext_link_filepath, ext_link_filename);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(ext_link_val, ext_obj_name_c, strlen(ext_obj_name_c) + 1)) {
+ H5_FAILED();
+ HDprintf(" external link value '%s' did not match expected '%s'\n", ext_link_val,
+ ext_obj_name_c);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_VAL_TEST_SUBGROUP11_NAME);
+ PART_ERROR(H5Lget_val_by_idx_external_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_external_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_val_by_idx_external_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_ud_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_val_by_idx on user-defined link by creation order in increasing order");
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_ud_crt_order_increasing);
+ }
+ PART_END(H5Lget_val_by_idx_ud_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_ud_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_val_by_idx on user-defined link by creation order in decreasing order");
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_ud_crt_order_decreasing);
+ }
+ PART_END(H5Lget_val_by_idx_ud_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_ud_name_order_increasing)
+ {
+ TESTING_2("H5Lget_val_by_idx on user-defined link by alphabetical order in increasing order");
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_ud_name_order_increasing);
+ }
+ PART_END(H5Lget_val_by_idx_ud_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_val_by_idx_ud_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_val_by_idx on user-defined link by alphabetical order in decreasing order");
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_val_by_idx_ud_name_order_decreasing);
+ }
+ PART_END(H5Lget_val_by_idx_ud_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(ext_file_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a soft or external link's value can't be
+ * retrieved when H5Lget_val(_by_idx) is passed invalid parameters.
+ */
+static int
+test_get_link_val_invalid_params(void)
+{
+ H5L_info2_t link_info;
+ htri_t link_exists;
+ herr_t err_ret = -1;
+ size_t link_val_buf_size = 0;
+ char *link_val_buf = NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link value retrieval with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic, more, soft, external link, or creation "
+ "order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, GET_LINK_VAL_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_VAL_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_VAL_INVALID_PARAMS_TEST_GROUP_NAME, group_id,
+ GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(group_id, GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n",
+ GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ link_val_buf_size = 100;
+ if (NULL == (link_val_buf = (char *)HDmalloc(link_val_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for storing link value\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lget_val_invalid_loc_id)
+ {
+ TESTING_2("H5Lget_val with an invalid location ID");
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val(H5I_INVALID_HID, GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME,
+ link_val_buf, link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val succeeded with an invalid location ID\n");
+ PART_ERROR(H5Lget_val_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_invalid_loc_id);
+
+ PART_BEGIN(H5Lget_val_invalid_link_name)
+ {
+ TESTING_2("H5Lget_val with an invalid link name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val(group_id, NULL, link_val_buf, link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val succeeded with a NULL link name\n");
+ PART_ERROR(H5Lget_val_invalid_link_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val(group_id, "", link_val_buf, link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val succeeded with an invalid link name of ''\n");
+ PART_ERROR(H5Lget_val_invalid_link_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_invalid_link_name);
+
+ PART_BEGIN(H5Lget_val_invalid_lapl)
+ {
+ TESTING_2("H5Lget_val with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val(group_id, GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME, link_val_buf,
+ link_val_buf_size, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val succeeded with an invalid LAPL\n");
+ PART_ERROR(H5Lget_val_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_invalid_lapl);
+
+ PART_BEGIN(H5Lget_val_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Lget_val_by_idx with an invalid location ID");
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val_by_idx(H5I_INVALID_HID, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0,
+ link_val_buf, link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val_by_idx succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Lget_val_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Lget_val_by_idx_invalid_grp_name)
+ {
+ TESTING_2("H5Lget_val_by_idx with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val_by_idx(group_id, NULL, H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf,
+ link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val_by_idx succeeded with a NULL group name!\n");
+ PART_ERROR(H5Lget_val_by_idx_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val_by_idx(group_id, "", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf,
+ link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val_by_idx succeeded with an invalid group name of ''!\n");
+ PART_ERROR(H5Lget_val_by_idx_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_by_idx_invalid_grp_name);
+
+ PART_BEGIN(H5Lget_val_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Lget_val_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, link_val_buf,
+ link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val_by_idx succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Lget_val_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, link_val_buf,
+ link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val_by_idx succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Lget_val_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Lget_val_by_idx_invalid_iter_order)
+ {
+ TESTING_2("H5Lget_val_by_idx with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_UNKNOWN, 0,
+ link_val_buf, link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " H5Lget_val_by_idx succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Lget_val_by_idx_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_N, 0, link_val_buf,
+ link_val_buf_size, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val_by_idx succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Lget_val_by_idx_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_by_idx_invalid_iter_order);
+
+ PART_BEGIN(H5Lget_val_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Lget_val_by_idx with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_val_by_idx(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_val_buf,
+ link_val_buf_size, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_val_by_idx succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Lget_val_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_val_by_idx_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (link_val_buf) {
+ HDfree(link_val_buf);
+ link_val_buf = NULL;
+ }
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (link_val_buf)
+ HDfree(link_val_buf);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of H5Lget_info2 and
+ * H5Lget_info_by_idx2.
+ */
+static int
+test_get_link_info(void)
+{
+ H5L_info2_t link_info;
+ htri_t link_exists;
+ size_t link_val_size;
+ hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+#ifndef NO_EXTERNAL_LINKS
+ char *ext_objname;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link info retrieval");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic, more, soft, hard, external link, or "
+ "creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, GET_LINK_INFO_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", GET_LINK_INFO_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lget_info_hard)
+ {
+ TESTING_2("H5Lget_info2 on hard link");
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP1_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP1_NAME);
+ PART_ERROR(H5Lget_info_hard);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_hard);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_hard);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ PART_ERROR(H5Lget_info_hard);
+ }
+
+ if (H5Lget_info2(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, &link_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info\n");
+ PART_ERROR(H5Lget_info_hard);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_hard);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_hard);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP1_NAME);
+ PART_ERROR(H5Lget_info_hard);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_hard);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_soft)
+ {
+ TESTING_2("H5Lget_info2 on soft link");
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP2_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP2_NAME);
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP2_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link did not exist\n");
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ if (H5Lget_info2(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, &link_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info\n");
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP2_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP2_NAME);
+ PART_ERROR(H5Lget_info_soft);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_soft);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_external)
+ {
+ TESTING_2("H5Lget_info2 on external link");
+#ifndef NO_EXTERNAL_LINKS
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP3_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP3_NAME);
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ ext_objname = "/";
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link did not exist\n");
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ if (H5Lget_info2(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, &link_info, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info\n");
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP3_NAME);
+ PART_ERROR(H5Lget_info_external);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_external);
+#endif
+ }
+ PART_END(H5Lget_info_external);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_ud)
+ {
+ TESTING_2("H5Lget_info2 on user-defined link");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_ud);
+ }
+ PART_END(H5Lget_info_ud);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_hard_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on hard link by creation order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP5_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP5_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP5_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_hard_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_hard_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on hard link by creation order in decreasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP6_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP6_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP6_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_hard_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_hard_name_order_increasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on hard link by alphabetical order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP7_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP7_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %d\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP7_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_hard_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_hard_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on hard link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP8_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP8_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %lld\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %lld\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get hard link info for index %lld\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP8_NAME);
+ PART_ERROR(H5Lget_info_by_idx_hard_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_hard_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_info_by_idx_hard_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_soft_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on soft link by creation order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP9_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP9_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP9_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP9_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_soft_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_soft_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on soft link by creation order in decreasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP10_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP10_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP10_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP10_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_soft_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_soft_name_order_increasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on soft link by alphabetical order in increasing order");
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP11_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP11_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %d\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP11_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%zu' did not match expected value '%zu'\n",
+ link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP11_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_soft_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_soft_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on soft link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP12_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP12_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME,
+ subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %lld\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %lld\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get soft link info for index %lld\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_SOFT) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ link_val_size = strlen("/" LINK_TEST_GROUP_NAME "/" GET_LINK_INFO_TEST_GROUP_NAME
+ "/" GET_LINK_INFO_TEST_SUBGROUP12_NAME) +
+ 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP12_NAME);
+ PART_ERROR(H5Lget_info_by_idx_soft_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_soft_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_info_by_idx_soft_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_external_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on external link by creation order in increasing order");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP13_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP13_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ /* Create several external links */
+ ext_objname = "/";
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP13_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_increasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_external_crt_order_increasing);
+#endif
+ }
+ PART_END(H5Lget_info_by_idx_external_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_external_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on external link by creation order in decreasing order");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP14_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP14_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Create several external links */
+ ext_objname = "/";
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP14_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_crt_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_external_crt_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_info_by_idx_external_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_external_name_order_increasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on external link by alphabetical order in increasing order");
+#ifndef NO_EXTERNAL_LINKS
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP15_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP15_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ /* Create several external links */
+ ext_objname = "/";
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP15_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_increasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_external_name_order_increasing);
+#endif
+ }
+ PART_END(H5Lget_info_by_idx_external_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_external_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on external link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_INFO_TEST_SUBGROUP16_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", GET_LINK_INFO_TEST_SUBGROUP16_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ /* Create several external links */
+ ext_objname = "/";
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, ext_objname, subgroup_id,
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_INFO_TEST_EXT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before deletion\n",
+ GET_LINK_INFO_TEST_EXT_LINK_NAME3);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ /* Retrieve info of links in turn */
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 0)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)0);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 1);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 1)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)1);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info_by_idx2(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, &link_info,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get external link info for index %lld\n", 0);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_info.type != H5L_TYPE_EXTERNAL) {
+ H5_FAILED();
+ HDprintf(" incorrect link type returned\n");
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ link_val_size = 1 + strlen(ext_link_filename) + 1 + strlen(ext_objname) + 1;
+ if (link_info.u.val_size != link_val_size) {
+ H5_FAILED();
+ HDprintf(" link's value size '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.u.val_size, link_val_size);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (link_info.corder_valid && (link_info.corder != 2)) {
+ H5_FAILED();
+ HDprintf(" link's creation order value '%lld' did not match expected value '%lld'\n",
+ (long long)link_info.corder, (long long)2);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", GET_LINK_INFO_TEST_SUBGROUP16_NAME);
+ PART_ERROR(H5Lget_info_by_idx_external_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_external_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_info_by_idx_external_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_ud_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on user-defined link by creation order in increasing order");
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_ud_crt_order_increasing);
+ }
+ PART_END(H5Lget_info_by_idx_ud_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_ud_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on user-defined link by creation order in decreasing order");
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_ud_crt_order_decreasing);
+ }
+ PART_END(H5Lget_info_by_idx_ud_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_ud_name_order_increasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on user-defined link by alphabetical order in increasing order");
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_ud_name_order_increasing);
+ }
+ PART_END(H5Lget_info_by_idx_ud_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_info_by_idx_ud_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_info_by_idx2 on user-defined link by alphabetical order in decreasing order");
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_info_by_idx_ud_name_order_decreasing);
+ }
+ PART_END(H5Lget_info_by_idx_ud_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(ext_file_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a link's info can't be retrieved
+ * when H5Lget_info(_by_idx)2 is passed invalid parameters.
+ */
+static int
+test_get_link_info_invalid_params(void)
+{
+ H5L_info2_t link_info;
+ herr_t err_ret = -1;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link info retrieval with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic, more, soft, hard, external link, or "
+ "creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, GET_LINK_INFO_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_INFO_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(group_id, ".", group_id, GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lget_info_invalid_loc_id)
+ {
+ TESTING_2("H5Lget_info2 with an invalid location ID");
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info2(H5I_INVALID_HID, GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME,
+ &link_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info2 succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Lget_info_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_invalid_loc_id);
+
+ PART_BEGIN(H5Lget_info_invalid_link_name)
+ {
+ TESTING_2("H5Lget_info2 with an invalid link name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info2(group_id, NULL, &link_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info2 succeeded with a NULL link name!\n");
+ PART_ERROR(H5Lget_info_invalid_link_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info2(group_id, "", &link_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info2 succeeded with an invalid link name of ''!\n");
+ PART_ERROR(H5Lget_info_invalid_link_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_invalid_link_name);
+
+ PART_BEGIN(H5Lget_info_invalid_lapl)
+ {
+ TESTING_2("H5Lget_info2 with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info2(group_id, GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME, &link_info,
+ H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info2 succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Lget_info_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_invalid_lapl);
+
+ HDmemset(&link_info, 0, sizeof(link_info));
+
+ PART_BEGIN(H5Lget_info_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Lget_info_by_idx2 with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info_by_idx2(H5I_INVALID_HID, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0,
+ &link_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info_by_idx2 succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Lget_info_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Lget_info_by_idx_invalid_grp_name)
+ {
+ TESTING_2("H5Lget_info_by_idx2 with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info_by_idx2(group_id, NULL, H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info_by_idx2 succeeded with a NULL group name!\n");
+ PART_ERROR(H5Lget_info_by_idx_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info_by_idx2(group_id, "", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info_by_idx2 succeeded with an invalid group name of ''!\n");
+ PART_ERROR(H5Lget_info_by_idx_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_invalid_grp_name);
+
+ PART_BEGIN(H5Lget_info_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Lget_info_by_idx2 with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info_by_idx2(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, &link_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info_by_idx2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Lget_info_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lget_info_by_idx2(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, &link_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info_by_idx2 succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Lget_info_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Lget_info_by_idx_invalid_iter_order)
+ {
+ TESTING_2("H5Lget_info_by_idx2 with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info_by_idx2(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_UNKNOWN, 0,
+ &link_info, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " H5Lget_info_by_idx2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Lget_info_by_idx_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info_by_idx2(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_N, 0, &link_info,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info_by_idx2 succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Lget_info_by_idx_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_invalid_iter_order);
+
+ PART_BEGIN(H5Lget_info_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Lget_info_by_idx2 with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lget_info_by_idx2(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, &link_info,
+ H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_info_by_idx2 succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Lget_info_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_info_by_idx_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a link's name can be correctly
+ * retrieved by using H5Lget_name_by_idx.
+ */
+static int
+test_get_link_name(void)
+{
+ ssize_t link_name_buf_size = 0;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID, ext_file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char link_name_buf[GET_LINK_NAME_TEST_BUF_SIZE];
+#ifndef NO_EXTERNAL_LINKS
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link name retrieval");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic, more, soft, hard, external link, or "
+ "creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, GET_LINK_NAME_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", GET_LINK_NAME_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lget_name_by_idx_hard_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on hard link by creation order in increasing order");
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Create several hard links in reverse order to test creation order */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_hard_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_hard_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on hard link by creation order in decreasing order");
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME2, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Create several hard links in reverse order to test creation order */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_hard_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_hard_name_order_increasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on hard link by alphabetical order in increasing order");
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME3, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_hard_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_hard_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on hard link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME4, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME4);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Create several hard links */
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_HARD_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if hard link '%s' exists\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" hard link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_HARD_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_HARD_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_HARD_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_hard_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_hard_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_name_by_idx_hard_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_soft_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on soft link by creation order in increasing order");
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Create several soft links in reverse order to test creation order */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_soft_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_soft_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on soft link by creation order in decreasing order");
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME2, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Create several soft links in reverse order to test creation order */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_crt_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_soft_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_soft_name_order_increasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on soft link by alphabetical order in increasing order");
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME3, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_soft_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_soft_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on soft link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME4, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME4);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Create several soft links */
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" GET_LINK_NAME_TEST_GROUP_NAME, subgroup_id,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_SOFT_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if soft link '%s' exists\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" soft link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_SOFT_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_SOFT_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_soft_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_soft_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_name_by_idx_soft_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_external_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on external link by creation order in increasing order");
+#ifndef NO_EXTERNAL_LINKS
+ /* Create file for external link to reference */
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ /* Create several external links in reverse order to test creation order */
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_increasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_external_crt_order_increasing);
+#endif
+ }
+ PART_END(H5Lget_name_by_idx_external_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_external_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on external link by creation order in decreasing order");
+#ifndef NO_EXTERNAL_LINKS
+ /* Create file for external link to reference */
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME2, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Create several external links in reverse order to test creation order */
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_external_crt_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_external_crt_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_name_by_idx_external_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_external_name_order_increasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on external link by alphabetical order in increasing order");
+#ifndef NO_EXTERNAL_LINKS
+ /* Create file for external link to reference */
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME3, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_increasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_external_name_order_increasing);
+#endif
+ }
+ PART_END(H5Lget_name_by_idx_external_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_external_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on external link by alphabetical order in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Create file for external link to reference */
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s",
+ EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((ext_file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Fclose(ext_file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close file '%s'\n", ext_link_filename);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ /* Create group to hold some links */
+ if ((subgroup_id = H5Gcreate2(group_id, GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME4, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME4);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ /* Create several external links */
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create external link '%s'\n", GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_exists = H5Lexists(subgroup_id, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if external link '%s' exists\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" external link '%s' did not exist before name retrieval\n",
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ /* Retrieve link names */
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 0, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 1, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if ((link_name_buf_size = H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2,
+ NULL, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Lget_name_by_idx(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC, 2, link_name_buf,
+ (size_t)link_name_buf_size + 1, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (HDstrncmp(link_name_buf, GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME,
+ strlen(GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME) + 1)) {
+ H5_FAILED();
+ HDprintf(" link name '%s' did not match expected name '%s'\n", link_name_buf,
+ GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME);
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group\n");
+ PART_ERROR(H5Lget_name_by_idx_external_name_order_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_external_name_order_decreasing);
+#endif
+ }
+ PART_END(H5Lget_name_by_idx_external_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ H5Fclose(ext_file_id);
+ ext_file_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_ud_crt_order_increasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on user-defined link by creation order in increasing order");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_ud_crt_order_increasing);
+ }
+ PART_END(H5Lget_name_by_idx_ud_crt_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_ud_crt_order_decreasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on user-defined link by creation order in decreasing order");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_ud_crt_order_decreasing);
+ }
+ PART_END(H5Lget_name_by_idx_ud_crt_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_ud_name_order_increasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on user-defined link by alphabetical order in increasing order");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_ud_name_order_increasing);
+ }
+ PART_END(H5Lget_name_by_idx_ud_name_order_increasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+
+ PART_BEGIN(H5Lget_name_by_idx_ud_name_order_decreasing)
+ {
+ TESTING_2("H5Lget_name_by_idx on user-defined link by alphabetical order in decreasing order");
+
+ /* TODO */
+
+ SKIPPED();
+ PART_EMPTY(H5Lget_name_by_idx_ud_name_order_decreasing);
+ }
+ PART_END(H5Lget_name_by_idx_ud_name_order_decreasing);
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ subgroup_id = H5I_INVALID_HID;
+ }
+ H5E_END_TRY;
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(ext_file_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a link's name can't be retrieved
+ * when H5Lget_name_by_idx is passed invalid parameters.
+ */
+static int
+test_get_link_name_invalid_params(void)
+{
+ ssize_t ret;
+ htri_t link_exists;
+ size_t link_name_buf_size = 0;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ char *link_name_buf = NULL;
+
+ TESTING_MULTIPART("link name retrieval with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, basic, more, soft, hard, external link, or "
+ "creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, GET_LINK_NAME_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ GET_LINK_NAME_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(group_id, ".", group_id, GET_LINK_NAME_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create hard link '%s'\n", GET_LINK_NAME_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, GET_LINK_NAME_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link did not exist\n");
+ goto error;
+ }
+
+ if ((ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, link_name_buf_size,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve link name size\n");
+ goto error;
+ }
+
+ link_name_buf_size = (size_t)ret;
+ if (NULL == (link_name_buf = (char *)HDmalloc(link_name_buf_size + 1)))
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lget_name_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Lget_name_by_idx with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Lget_name_by_idx(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf,
+ link_name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_name_by_idx succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Lget_name_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Lget_name_by_idx_invalid_grp_name)
+ {
+ TESTING_2("H5Lget_name_by_idx with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Lget_name_by_idx(group_id, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf,
+ link_name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_name_by_idx succeeded with a NULL group name!\n");
+ PART_ERROR(H5Lget_name_by_idx_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Lget_name_by_idx(group_id, "", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf,
+ link_name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_name_by_idx succeeded with an invalid group name of ''!\n");
+ PART_ERROR(H5Lget_name_by_idx_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_invalid_grp_name);
+
+ PART_BEGIN(H5Lget_name_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Lget_name_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_INC, 0, link_name_buf,
+ link_name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_name_by_idx succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Lget_name_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_N, H5_ITER_INC, 0, link_name_buf,
+ link_name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_name_by_idx succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Lget_name_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Lget_name_by_idx_invalid_iter_order)
+ {
+ TESTING_2("H5Lget_name_by_idx with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, link_name_buf,
+ link_name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " H5Lget_name_by_idx succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Lget_name_by_idx_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_N, 0, link_name_buf,
+ link_name_buf_size + 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_name_by_idx succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Lget_name_by_idx_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_invalid_iter_order);
+
+ PART_BEGIN(H5Lget_name_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Lget_name_by_idx with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Lget_name_by_idx(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, 0, link_name_buf,
+ link_name_buf_size + 1, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lget_name_by_idx succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Lget_name_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lget_name_by_idx_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (link_name_buf) {
+ HDfree(link_name_buf);
+ link_name_buf = NULL;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (link_name_buf)
+ HDfree(link_name_buf);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of link
+ * iteration using H5Literate(_by_name)2 with
+ * only hard links. Iteration is done in
+ * increasing and decreasing order of both link
+ * name and link creation order.
+ */
+static int
+test_link_iterate_hard_links(void)
+{
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dset_dspace = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link iteration (only hard links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, link, or iterate aren't supported with "
+ "this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dspace =
+ generate_random_dataspace(LINK_ITER_HARD_LINKS_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ for (i = 0; i < LINK_ITER_HARD_LINKS_TEST_NUM_LINKS; i++) {
+ char dset_name[LINK_ITER_HARD_LINKS_TEST_BUF_SIZE];
+
+ /* Create the datasets with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(dset_name, LINK_ITER_HARD_LINKS_TEST_BUF_SIZE, LINK_ITER_HARD_LINKS_TEST_LINK_NAME "%d",
+ (int)(LINK_ITER_HARD_LINKS_TEST_NUM_LINKS - i - 1));
+
+ if ((dset_id = H5Dcreate2(group_id, dset_name, dset_dtype, dset_dspace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", dset_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, dset_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", dset_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", dset_name);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close dataset '%s'\n", dset_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Literate_link_name_increasing)
+ {
+ TESTING_2("H5Literate2 by link name in increasing order");
+
+ i = 0;
+
+ /* Test basic link iteration capability using both index types and both index orders */
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_hard_links_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_link_name_increasing);
+ }
+
+ if (i != LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_name_increasing);
+
+ PART_BEGIN(H5Literate_link_name_decreasing)
+ {
+ TESTING_2("H5Literate2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_ITER_HARD_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_hard_links_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_link_name_decreasing);
+
+ PART_BEGIN(H5Literate_link_creation_increasing)
+ {
+ TESTING_2("H5Literate2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_hard_links_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_creation_increasing);
+
+ PART_BEGIN(H5Literate_link_creation_decreasing)
+ {
+ TESTING_2("H5Literate2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_hard_links_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_creation_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_link_name_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_hard_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_link_name_increasing);
+ }
+
+ if (i != LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_link_name_increasing);
+
+ PART_BEGIN(H5Literate_by_name_link_name_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_ITER_HARD_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_hard_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_by_name_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_by_name_link_name_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_creation_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_hard_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_creation_increasing);
+ }
+
+ if (i != 3 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_creation_increasing);
+
+ PART_BEGIN(H5Literate_by_name_creation_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_hard_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dset_dspace) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dset_dspace);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of link
+ * iteration using H5Literate(_by_name)2 with
+ * only soft links. Iteration is done in
+ * increasing and decreasing order of both link
+ * name and link creation order.
+ */
+static int
+test_link_iterate_soft_links(void)
+{
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link iteration (only soft links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, or iterate aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS; i++) {
+ char link_name[LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE];
+ char link_target[LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE];
+
+ /* Create the links with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(link_name, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE, LINK_ITER_SOFT_LINKS_TEST_LINK_NAME "%d",
+ (int)(LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS - i - 1));
+
+ HDsnprintf(link_target, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE, "target%d",
+ (int)(LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS - i - 1));
+
+ if (H5Lcreate_soft(link_target, group_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", link_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, link_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", link_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", link_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Literate_link_name_increasing)
+ {
+ TESTING_2("H5Literate2 by link name in increasing order");
+
+ i = 0;
+
+ /* Test basic link iteration capability using both index types and both index orders */
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_soft_links_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_link_name_increasing);
+ }
+
+ if (i != LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_name_increasing);
+
+ PART_BEGIN(H5Literate_link_name_decreasing)
+ {
+ TESTING_2("H5Literate2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_soft_links_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_link_name_decreasing);
+
+ PART_BEGIN(H5Literate_link_creation_increasing)
+ {
+ TESTING_2("H5Literate2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_soft_links_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_creation_increasing);
+
+ PART_BEGIN(H5Literate_link_creation_decreasing)
+ {
+ TESTING_2("H5Literate2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_soft_links_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_creation_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_link_name_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_soft_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_link_name_increasing);
+ }
+
+ if (i != LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_link_name_increasing);
+
+ PART_BEGIN(H5Literate_by_name_link_name_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_soft_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_by_name_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_by_name_link_name_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_creation_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_soft_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_creation_increasing);
+ }
+
+ if (i != 3 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_creation_increasing);
+
+ PART_BEGIN(H5Literate_by_name_creation_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_soft_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of link
+ * iteration using H5Literate(_by_name)2 with
+ * only external links. Iteration is done in
+ * increasing and decreasing order of both link
+ * name and link creation order.
+ */
+static int
+test_link_iterate_external_links(void)
+{
+#ifndef NO_EXTERNAL_LINKS
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link iteration (only external links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, or iterate aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+#ifndef NO_EXTERNAL_LINKS
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < LINK_ITER_EXT_LINKS_TEST_NUM_LINKS; i++) {
+ char link_name[LINK_ITER_EXT_LINKS_TEST_BUF_SIZE];
+
+ /* Create the links with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(link_name, LINK_ITER_EXT_LINKS_TEST_BUF_SIZE, LINK_ITER_EXT_LINKS_TEST_LINK_NAME "%d",
+ (int)(LINK_ITER_EXT_LINKS_TEST_NUM_LINKS - i - 1));
+
+ if (H5Lcreate_external(ext_link_filename, "/", group_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", link_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(group_id, link_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", link_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", link_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Literate_link_name_increasing)
+ {
+ TESTING_2("H5Literate2 by link name in increasing order");
+
+ i = 0;
+
+ /* Test basic link iteration capability using both index types and both index orders */
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_external_links_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_link_name_increasing);
+ }
+
+ if (i != LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_name_increasing);
+
+ PART_BEGIN(H5Literate_link_name_decreasing)
+ {
+ TESTING_2("H5Literate2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_ITER_EXT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_external_links_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_link_name_decreasing);
+
+ PART_BEGIN(H5Literate_link_creation_increasing)
+ {
+ TESTING_2("H5Literate2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_external_links_cb,
+ &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_creation_increasing);
+
+ PART_BEGIN(H5Literate_link_creation_decreasing)
+ {
+ TESTING_2("H5Literate2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_external_links_cb,
+ &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_creation_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_link_name_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_external_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_link_name_increasing);
+ }
+
+ if (i != LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_link_name_increasing);
+
+ PART_BEGIN(H5Literate_by_name_link_name_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_ITER_EXT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_external_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_by_name_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_by_name_link_name_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_creation_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(file_id,
+ "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_external_links_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_creation_increasing);
+ }
+
+ if (i != 3 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_creation_increasing);
+
+ PART_BEGIN(H5Literate_by_name_creation_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(file_id,
+ "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_external_links_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check the functionality of link
+ * iteration using H5Literate(_by_name)2 with
+ * only user-defined links. Iteration is done
+ * in increasing and decreasing order of both
+ * link name and link creation order.
+ *
+ * TODO refactor test so that creation order tests
+ * actually test the order that objects were created in.
+ */
+static int
+test_link_iterate_ud_links(void)
+{
+ TESTING("link iteration (only user-defined links)");
+
+ SKIPPED();
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of link
+ * iteration using H5Literate(_by_name)2 with
+ * mixed link types. Iteration is done in
+ * increasing and decreasing order of both link
+ * name and link creation order.
+ *
+ * TODO refactor test so that creation order tests
+ * actually test the order that objects were created in.
+ *
+ * TODO add UD links
+ *
+ * TODO refactor link saving portion into its own test
+ */
+static int
+test_link_iterate_mixed_links(void)
+{
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+ hsize_t saved_idx;
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dset_dspace = H5I_INVALID_HID;
+ int halted;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link iteration (mixed link types)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, soft or external link, iterate, or creation "
+ "order aren't supported with this connector\n");
+ return 0;
+ }
+
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dspace =
+ generate_random_dataspace(LINK_ITER_MIXED_LINKS_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME, dset_dtype, dset_dspace,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME
+ "/" LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME,
+ group_id, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", group_id, LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME);
+ goto error;
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(group_id, LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" first link did not exist\n");
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(group_id, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" second link did not exist\n");
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(group_id, LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" third link did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Literate_link_name_increasing)
+ {
+ TESTING_2("H5Literate2 by link name in increasing order");
+
+ i = 0;
+
+ /* Test basic link iteration capability using both index types and both index orders */
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_mixed_links_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_link_name_increasing);
+ }
+
+ if (i != LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_name_increasing);
+
+ PART_BEGIN(H5Literate_link_name_decreasing)
+ {
+ TESTING_2("H5Literate2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_mixed_links_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_link_name_decreasing);
+
+ PART_BEGIN(H5Literate_link_creation_increasing)
+ {
+ TESTING_2("H5Literate2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_mixed_links_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_creation_increasing);
+
+ PART_BEGIN(H5Literate_link_creation_decreasing)
+ {
+ TESTING_2("H5Literate2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_mixed_links_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_link_creation_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_link_name_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_mixed_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_link_name_increasing);
+ }
+
+ if (i != LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_link_name_increasing);
+
+ PART_BEGIN(H5Literate_by_name_link_name_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_mixed_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_by_name_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_by_name_link_name_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_creation_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_mixed_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_creation_increasing);
+ }
+
+ if (i != 3 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_creation_increasing);
+
+ PART_BEGIN(H5Literate_by_name_creation_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS;
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_mixed_links_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not iterated over!\n");
+ PART_ERROR(H5Literate_by_name_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_creation_decreasing);
+
+ PART_BEGIN(H5Literate_index_saving_increasing)
+ {
+ TESTING_2("H5Literate2 index-saving capabilities in increasing order");
+
+ /* Test the H5Literate2 index-saving capabilities */
+ saved_idx = 0;
+ halted = 0;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, &saved_idx, link_iter_idx_saving_cb,
+ &halted) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 index-saving capability test failed\n");
+ PART_ERROR(H5Literate_index_saving_increasing);
+ }
+
+ if (saved_idx != 2) {
+ H5_FAILED();
+ HDprintf(" saved index after iteration was wrong\n");
+ PART_ERROR(H5Literate_index_saving_increasing);
+ }
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, &saved_idx, link_iter_idx_saving_cb,
+ &halted) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't finish iterating when beginning from saved index\n");
+ PART_ERROR(H5Literate_index_saving_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_index_saving_increasing);
+
+ PART_BEGIN(H5Literate_index_saving_decreasing)
+ {
+ TESTING_2("H5Literate2 index-saving capabilities in decreasing order");
+
+ saved_idx = LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS - 1;
+ halted = 0;
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, &saved_idx, link_iter_idx_saving_cb,
+ &halted) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 index-saving capability test failed\n");
+ PART_ERROR(H5Literate_index_saving_decreasing);
+ }
+
+ if (saved_idx != 2) {
+ H5_FAILED();
+ HDprintf(" saved index after iteration was wrong\n");
+ PART_ERROR(H5Literate_index_saving_decreasing);
+ }
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, &saved_idx, link_iter_idx_saving_cb,
+ &halted) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't finish iterating when beginning from saved index\n");
+ PART_ERROR(H5Literate_index_saving_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_index_saving_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dset_dspace) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dset_dspace);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that H5Literate(_by_name)2 fails
+ * when given invalid parameters.
+ */
+static int
+test_link_iterate_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dset_dspace = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+
+ TESTING_MULTIPART("link iteration with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, link, soft or external link, iterate, or "
+ "creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dspace =
+ generate_random_dataspace(LINK_ITER_INVALID_PARAMS_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME, dset_dtype, dset_dspace,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME
+ "/" LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME,
+ group_id, LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME);
+ goto error;
+ }
+#ifndef NO_EXTERNAL_LINKS
+ if (H5Lcreate_external(ext_link_filename, "/", group_id, LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME);
+ goto error;
+ }
+#endif
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(group_id, LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n",
+ LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" first link did not exist\n");
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(group_id, LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n",
+ LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" second link did not exist\n");
+ goto error;
+ }
+#ifndef NO_EXTERNAL_LINKS
+ if ((link_exists = H5Lexists(group_id, LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" third link did not exist\n");
+ goto error;
+ }
+#endif
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ i = 0;
+
+ PART_BEGIN(H5Literate_invalid_grp_id)
+ {
+ TESTING_2("H5Literate2 with an invalid group ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate2(H5I_INVALID_HID, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ link_iter_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 succeeded with an invalid group ID!\n");
+ PART_ERROR(H5Literate_invalid_grp_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_invalid_grp_id);
+
+ PART_BEGIN(H5Literate_invalid_index_type)
+ {
+ TESTING_2("H5Literate2 with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate2(group_id, H5_INDEX_UNKNOWN, H5_ITER_INC, NULL,
+ link_iter_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Literate_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Literate2(group_id, H5_INDEX_N, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Literate_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_invalid_index_type);
+
+ PART_BEGIN(H5Literate_invalid_iter_order)
+ {
+ TESTING_2("H5Literate2 with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_UNKNOWN, NULL,
+ link_iter_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Literate_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_N, NULL, link_iter_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Literate_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_invalid_iter_order);
+
+ PART_BEGIN(H5Literate_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Literate_by_name2 with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate_by_name2(
+ H5I_INVALID_HID, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Literate_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Literate_by_name_invalid_grp_name)
+ {
+ TESTING_2("H5Literate_by_name2 with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate_by_name2(file_id, NULL, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ link_iter_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 succeeded with a NULL group name!\n");
+ PART_ERROR(H5Literate_by_name_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate_by_name2(file_id, "", H5_INDEX_NAME, H5_ITER_INC, NULL,
+ link_iter_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 succeeded with an invalid group name of ''!\n");
+ PART_ERROR(H5Literate_by_name_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_invalid_grp_name);
+
+ PART_BEGIN(H5Literate_by_name_invalid_index_type)
+ {
+ TESTING_2("H5Literate_by_name2 with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_UNKNOWN, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Literate_by_name_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_N, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Literate_by_name_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_invalid_index_type);
+
+ PART_BEGIN(H5Literate_by_name_invalid_iter_order)
+ {
+ TESTING_2("H5Literate_by_name2 with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_UNKNOWN, NULL, link_iter_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " H5Literate_by_name2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Literate_by_name_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_N, NULL, link_iter_invalid_params_cb, &i, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Literate_by_name_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_invalid_iter_order);
+
+ PART_BEGIN(H5Literate_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Literate_by_name2 with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_invalid_params_cb, NULL, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Literate_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dset_dspace) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dset_dspace);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that link iteration performed on a
+ * group with no links in it is not problematic.
+ */
+static int
+test_link_iterate_0_links(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link iteration on group with 0 links");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link iterate, or creation order aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create GCPL for link creation order tracking\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set link creation order tracking\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Literate_0_links_name_increasing)
+ {
+ TESTING_2("H5Literate2 by link name in increasing order");
+
+ /* Test basic link iteration capability using both index types and both index orders */
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_INC, NULL, link_iter_0_links_cb, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_0_links_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_0_links_name_increasing);
+
+ PART_BEGIN(H5Literate_0_links_name_decreasing)
+ {
+ TESTING_2("H5Literate2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (H5Literate2(group_id, H5_INDEX_NAME, H5_ITER_DEC, NULL, link_iter_0_links_cb, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_0_links_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_0_links_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_0_links_name_decreasing);
+
+ PART_BEGIN(H5Literate_0_links_creation_increasing)
+ {
+ TESTING_2("H5Literate2 by creation order in increasing order");
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_0_links_cb, NULL) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_0_links_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_0_links_creation_increasing);
+
+ PART_BEGIN(H5Literate_0_links_creation_decreasing)
+ {
+ TESTING_2("H5Literate2 by creation order in decreasing order");
+
+ if (H5Literate2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_0_links_cb, NULL) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_0_links_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_0_links_creation_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_0_links_name_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in increasing order");
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, NULL, link_iter_0_links_cb, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_0_links_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_0_links_name_increasing);
+
+ PART_BEGIN(H5Literate_by_name_0_links_name_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, NULL, link_iter_0_links_cb, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_0_links_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Literate_by_name_0_links_name_decreasing);
+#endif
+ }
+ PART_END(H5Literate_by_name_0_links_name_decreasing);
+
+ PART_BEGIN(H5Literate_by_name_0_links_creation_increasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in increasing order");
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, NULL, link_iter_0_links_cb, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Literate_by_name_0_links_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_0_links_creation_increasing);
+
+ PART_BEGIN(H5Literate_by_name_0_links_creation_decreasing)
+ {
+ TESTING_2("H5Literate_by_name2 by creation order in decreasing order");
+
+ if (H5Literate_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, NULL, link_iter_0_links_cb, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Literate_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Literate_by_name_0_links_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Literate_by_name_0_links_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * only hard links and where there are no cyclic
+ * links. Iteration is done in increasing and
+ * decreasing order of both link name and link
+ * creation order.
+ */
+static int
+test_link_visit_hard_links_no_cycles(void)
+{
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dset_dspace = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link visiting without cycles (only hard links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link iterate, or creation order aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dspace = generate_random_dataspace(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_DSET_SPACE_RANK, NULL,
+ NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ for (i = 0; i < LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS; i++) {
+ size_t j;
+ char grp_name[LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the groups with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(grp_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - i - 1));
+
+ if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", grp_name);
+ goto error;
+ }
+
+ for (j = 0; j < LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) {
+ char dset_name[LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the datasets with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(dset_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1));
+
+ if ((dset_id = H5Dcreate2(subgroup_id, dset_name, dset_dtype, dset_dspace, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", dset_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(subgroup_id, dset_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", dset_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", dset_name);
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close dataset '%s'\n", dset_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close subgroup '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_hard_links_no_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_hard_links_no_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_no_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_no_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_hard_links_no_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_hard_links_no_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_hard_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_hard_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_hard_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_hard_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dset_dspace) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dset_dspace);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * only soft links and where there are no cyclic
+ * links. Iteration is done in increasing and
+ * decreasing order of both link name and link
+ * creation order.
+ */
+static int
+test_link_visit_soft_links_no_cycles(void)
+{
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link visiting without cycles (only soft links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, soft link, iterate, or creation order "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS; i++) {
+ size_t j;
+ char grp_name[LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the groups with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(grp_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - i - 1));
+
+ if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", grp_name);
+ goto error;
+ }
+
+ for (j = 0; j < LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) {
+ char link_name[LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+ char link_target[LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the links with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1));
+
+ HDsnprintf(link_target, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE, "target%d",
+ (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1));
+
+ if (H5Lcreate_soft(link_target, subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", link_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", link_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", link_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close subgroup '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_soft_links_no_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_soft_links_no_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_no_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_no_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_soft_links_no_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_soft_links_no_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_soft_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_soft_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_soft_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_soft_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * only external links and where there are no cyclic
+ * links. Iteration is done in increasing and
+ * decreasing order of both link name and link
+ * creation order.
+ */
+static int
+test_link_visit_external_links_no_cycles(void)
+{
+#ifndef NO_EXTERNAL_LINKS
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link visiting without cycles (only external links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, external link, iterate, or creation order "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+#ifndef NO_EXTERNAL_LINKS
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS; i++) {
+ size_t j;
+ char grp_name[LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the groups with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(grp_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - i - 1));
+
+ if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", grp_name);
+ goto error;
+ }
+
+ for (j = 0; j < LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) {
+ char link_name[LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the links with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1));
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", link_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", link_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", link_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close subgroup '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_external_links_no_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_external_links_no_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_no_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_no_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_external_links_no_cycles_cb,
+ &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_external_links_no_cycles_cb,
+ &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_external_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_external_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_external_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_external_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * only user-defined links and where there are no
+ * cyclic links. Iteration is done in increasing
+ * and decreasing order of both link name and link
+ * creation order.
+ *
+ * TODO refactor test so that creation order tests
+ * actually test the order that objects were created in.
+ */
+static int
+test_link_visit_ud_links_no_cycles(void)
+{
+ TESTING("link visiting without cycles (only user-defined links)");
+
+ SKIPPED();
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * mixed link types and where there are no cyclic
+ * links. Iteration is done in increasing and
+ * decreasing order of both link name and link
+ * creation order.
+ *
+ * TODO refactor test so that creation order tests
+ * actually test the order that objects were created in.
+ *
+ * TODO add UD links
+ *
+ * TODO refactor test to create a macroed number of subgroups
+ */
+static int
+test_link_visit_mixed_links_no_cycles(void)
+{
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup1 = H5I_INVALID_HID, subgroup2 = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link visiting without cycles (mixed link types)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, hard, soft, external link, iterate, or "
+ "creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((subgroup1 = H5Gcreate2(group_id, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first subgroup '%s'\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2);
+ goto error;
+ }
+
+ if ((subgroup2 = H5Gcreate2(group_id, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second subgroup '%s'\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3);
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = generate_random_dataspace(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_SPACE_RANK, NULL,
+ NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(subgroup1, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME, dset_dtype,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first dataset '%s'\n", LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME);
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2, dset_dtype,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second dataset '%s'\n", LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME);
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if (H5Lcreate_hard(subgroup1, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME, subgroup1,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first hard link '%s'\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1);
+ goto error;
+ }
+
+ if (H5Lcreate_soft(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME, subgroup1,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2);
+ goto error;
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2, subgroup2,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second hard link '%s'\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4);
+ goto error;
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first link '%s' exists\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 1 did not exist\n");
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if second link '%s' exists\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 2 did not exist\n");
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if third link '%s' exists\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 3 did not exist\n");
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if fourth link '%s' exists\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 4 did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_mixed_links_no_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_mixed_links_no_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_no_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_no_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_mixed_links_no_cycles_cb,
+ &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_no_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_mixed_links_no_cycles_cb,
+ &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_no_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_no_cycles_link_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_mixed_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_mixed_links_no_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_mixed_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_no_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_mixed_links_no_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_no_cycles_link_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(subgroup1) < 0)
+ TEST_ERROR;
+ if (H5Gclose(subgroup2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup1);
+ H5Gclose(subgroup2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * only hard links and where there are cyclic links.
+ * Iteration is done in increasing and decreasing
+ * order of both link name and link creation order.
+ */
+static int
+test_link_visit_hard_links_cycles(void)
+{
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link visiting with cycles (only hard links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, hard link, iterate, or creation order "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS; i++) {
+ size_t j;
+ char grp_name[LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the groups with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(grp_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS - i - 1));
+
+ if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", grp_name);
+ goto error;
+ }
+
+ for (j = 0; j < LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) {
+ char link_name[LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the links with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1));
+
+ if (H5Lcreate_hard(subgroup_id, ".", subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create hard link '%s'\n", link_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", link_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", link_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close subgroup '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Lvisit_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_hard_links_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_hard_links_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_hard_links_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_hard_links_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_hard_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_hard_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_hard_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_hard_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * only soft links and where there are cyclic links.
+ * Iteration is done in increasing and decreasing
+ * order of both link name and link creation order.
+ */
+static int
+test_link_visit_soft_links_cycles(void)
+{
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link visiting with cycles (only soft links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, soft link, iterate, or creation order "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS; i++) {
+ size_t j;
+ char grp_name[LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the groups with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(grp_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - i - 1));
+
+ if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", grp_name);
+ goto error;
+ }
+
+ for (j = 0; j < LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) {
+ char link_name[LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE];
+ char link_target[2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the links with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1));
+
+ HDsnprintf(link_target, 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE,
+ "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME "/%s",
+ grp_name);
+
+ if (H5Lcreate_soft(link_target, subgroup_id, link_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", link_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", link_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", link_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close subgroup '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Lvisit_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_soft_links_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_soft_links_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_soft_links_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_soft_links_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_soft_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_soft_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_soft_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_soft_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * only external links and where there are cyclic
+ * links. Iteration is done in increasing and
+ * decreasing order of both link name and link
+ * creation order.
+ */
+static int
+test_link_visit_external_links_cycles(void)
+{
+#ifndef NO_EXTERNAL_LINKS
+ size_t i;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+#endif
+
+ TESTING_MULTIPART("link visiting with cycles (only external links)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, external link, iterate, or creation order "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+#ifndef NO_EXTERNAL_LINKS
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS; i++) {
+ size_t j;
+ char grp_name[LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the groups with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(grp_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - i - 1));
+
+ if ((subgroup_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup '%s'\n", grp_name);
+ goto error;
+ }
+
+ for (j = 0; j < LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP; j++) {
+ char link_name[LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE];
+ char link_target_obj[2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE];
+
+ /* Create the links with a reverse-ordering naming scheme to test creation order later */
+ HDsnprintf(link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - j - 1));
+
+ HDsnprintf(link_target_obj, 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE,
+ "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME "/%s",
+ grp_name);
+
+ if (H5Lcreate_external(H5_api_test_filename, link_target_obj, subgroup_id, link_name, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", link_name);
+ goto error;
+ }
+
+ /* Verify the link has been created */
+ if ((link_exists = H5Lexists(subgroup_id, link_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' exists\n", link_name);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' did not exist\n", link_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(subgroup_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close subgroup '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Lvisit_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_external_links_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_external_links_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_external_links_cycles_cb,
+ &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_external_links_cycles_cb,
+ &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_external_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_external_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(file_id,
+ "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_external_links_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ if (H5Lvisit_by_name2(file_id,
+ "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_external_links_cycles_cb, &i,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * only user-defined links and where there are
+ * cyclic links. Iteration is done in increasing
+ * and decreasing order of both link name and link
+ * creation order.
+ *
+ * TODO refactor test so that creation order tests
+ * actually test the order that objects were created in.
+ */
+static int
+test_link_visit_ud_links_cycles(void)
+{
+ TESTING("link visiting with cycles (only user-defined links)");
+
+ SKIPPED();
+
+ return 1;
+}
+
+/*
+ * A test to check the functionality of recursive
+ * link iteration using H5Lvisit(_by_name)2 with
+ * mixed link types and where there are cyclic links.
+ * Iteration is done in increasing and decreasing
+ * order of both link name and link creation order.
+ *
+ * TODO refactor test so that creation order tests
+ * actually test the order that objects were created in.
+ */
+static int
+test_link_visit_mixed_links_cycles(void)
+{
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+ htri_t link_exists;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup1 = H5I_INVALID_HID, subgroup2 = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+#endif
+
+ TESTING_MULTIPART("link visiting with cycles (mixed link types)");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link, hard, soft, external link, iterate, or "
+ "creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((subgroup1 = H5Gcreate2(group_id, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first subgroup '%s'\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2);
+ goto error;
+ }
+
+ if ((subgroup2 = H5Gcreate2(group_id, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second subgroup '%s'\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(group_id, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2, subgroup1,
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first hard link '%s'\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2,
+ subgroup1, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2);
+ goto error;
+ }
+
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup2, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3);
+ goto error;
+ }
+
+ if (H5Lcreate_hard(group_id, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3, subgroup2,
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second hard link '%s'\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4);
+ goto error;
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first link '%s' exists\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" first link '%s' did not exist\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1);
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if second link '%s' exists\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" second link '%s' did not exist\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2);
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if third link '%s' exists\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" third link '%s' did not exist\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3);
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if fourth link '%s' exists\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" fourth link '%s' did not exist\n", LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up the
+ * expected links with a given step throughout all of the following
+ * iterations. This is to try and check that the links are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Lvisit_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_mixed_links_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_mixed_links_cycles_cb, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_mixed_links_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_mixed_links_cycles_cb, &i) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_cycles_link_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ i = 0;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_mixed_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing);
+ }
+
+ if (i != LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_mixed_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing);
+ }
+
+ if (i != 2 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_cycles_link_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_mixed_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing);
+ }
+
+ if (i != 3 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_cycles_link_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS;
+
+ if (H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_mixed_links_cycles_cb, &i, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+
+ if (i != 4 * LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS) {
+ H5_FAILED();
+ HDprintf(" some links were not visited!\n");
+ PART_ERROR(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_cycles_link_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(subgroup1) < 0)
+ TEST_ERROR;
+ if (H5Gclose(subgroup2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(subgroup1);
+ H5Gclose(subgroup2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#else
+ SKIPPED();
+ return 0;
+#endif
+}
+
+/*
+ * A test to check that H5Lvisit(_by_name)2 fails when
+ * it is given invalid parameters.
+ */
+static int
+test_link_visit_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup1 = H5I_INVALID_HID, subgroup2 = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ char ext_link_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+
+ TESTING_MULTIPART("link visiting with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_EXTERNAL_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, link, external link, iterate, or "
+ "creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ HDsnprintf(ext_link_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s", EXTERNAL_LINK_TEST_FILE_NAME);
+
+ if ((file_id = H5Fcreate(ext_link_filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s' for external link to reference\n", ext_link_filename);
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((subgroup1 = H5Gcreate2(group_id, LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first subgroup '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME2);
+ goto error;
+ }
+
+ if ((subgroup2 = H5Gcreate2(group_id, LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME3, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second subgroup '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME3);
+ goto error;
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = generate_random_dataspace(LINK_VISIT_INVALID_PARAMS_TEST_DSET_SPACE_RANK, NULL, NULL,
+ FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(subgroup1, LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first dataset '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME);
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second dataset '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME);
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if (H5Lcreate_hard(subgroup1, LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, subgroup1,
+ LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create first hard link '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1);
+ goto error;
+ }
+
+ if (H5Lcreate_soft(LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, subgroup1,
+ LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2);
+ goto error;
+ }
+#ifndef NO_EXTERNAL_LINKS
+ if (H5Lcreate_external(ext_link_filename, "/", subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3,
+ H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create external link '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3);
+ goto error;
+ }
+#endif
+ if (H5Lcreate_hard(subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME, subgroup2,
+ LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create second hard link '%s'\n", LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4);
+ goto error;
+ }
+
+ /* Verify the links have been created */
+ if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if first link '%s' exists\n",
+ LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 1 did not exist\n");
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(subgroup1, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if second link '%s' exists\n",
+ LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 2 did not exist\n");
+ goto error;
+ }
+#ifndef NO_EXTERNAL_LINKS
+ if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if third link '%s' exists\n",
+ LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 3 did not exist\n");
+ goto error;
+ }
+#endif
+ if ((link_exists = H5Lexists(subgroup2, LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if fourth link '%s' exists\n",
+ LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4);
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" link 4 did not exist\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lvisit_invalid_grp_id)
+ {
+ TESTING_2("H5Lvisit2 with an invalid group ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit2(H5I_INVALID_HID, H5_INDEX_NAME, H5_ITER_INC, link_visit_invalid_params_cb,
+ NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 succeeded with an invalid group ID!\n");
+ PART_ERROR(H5Lvisit_invalid_grp_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_invalid_grp_id);
+
+ PART_BEGIN(H5Lvisit_invalid_index_type)
+ {
+ TESTING_2("H5Lvisit2 with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lvisit2(group_id, H5_INDEX_UNKNOWN, H5_ITER_INC, link_visit_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Lvisit_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit2(group_id, H5_INDEX_N, H5_ITER_INC, link_visit_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Lvisit_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_invalid_index_type);
+
+ PART_BEGIN(H5Lvisit_invalid_iter_order)
+ {
+ TESTING_2("H5Lvisit2 with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_UNKNOWN, link_visit_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Lvisit_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_N, link_visit_invalid_params_cb, NULL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Lvisit_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_invalid_iter_order);
+
+ PART_BEGIN(H5Lvisit_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Lvisit_by_name2 with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit_by_name2(
+ H5I_INVALID_HID,
+ "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, link_visit_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Lvisit_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Lvisit_by_name_invalid_grp_name)
+ {
+ TESTING_2("H5Lvisit_by_name2 with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit_by_name2(file_id, NULL, H5_INDEX_NAME, H5_ITER_INC,
+ link_visit_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 succeeded with a NULL group name!\n");
+ PART_ERROR(H5Lvisit_by_name_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit_by_name2(file_id, "", H5_INDEX_NAME, H5_ITER_INC,
+ link_visit_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 succeeded with an invalid group name of ''!\n");
+ PART_ERROR(H5Lvisit_by_name_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_invalid_grp_name);
+
+ PART_BEGIN(H5Lvisit_by_name_invalid_index_type)
+ {
+ TESTING_2("H5Lvisit_by_name2 with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_UNKNOWN, H5_ITER_INC, link_visit_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Lvisit_by_name_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_N, H5_ITER_INC, link_visit_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Lvisit_by_name_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_invalid_index_type);
+
+ PART_BEGIN(H5Lvisit_by_name_invalid_iter_order)
+ {
+ TESTING_2("H5Lvisit_by_name2 with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_UNKNOWN, link_visit_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " H5Lvisit_by_name2 succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Lvisit_by_name_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_N, link_visit_invalid_params_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Lvisit_by_name_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_invalid_iter_order);
+
+ PART_BEGIN(H5Lvisit_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Lvisit_by_name2 with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Lvisit_by_name2(
+ file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_invalid_params_cb, NULL, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Lvisit_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(subgroup1) < 0)
+ TEST_ERROR;
+ if (H5Gclose(subgroup2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(subgroup1);
+ H5Gclose(subgroup2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that recursive link iteration
+ * performed on a group with no links in it is
+ * not problematic.
+ */
+static int
+test_link_visit_0_links(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("link visiting on group with subgroups containing 0 links");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, link iterate, or creation order aren't supported "
+ "with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Lvisit_0_links_name_increasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in increasing order");
+
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_INC, link_visit_0_links_cb, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_0_links_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_0_links_name_increasing);
+
+ PART_BEGIN(H5Lvisit_0_links_name_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (H5Lvisit2(group_id, H5_INDEX_NAME, H5_ITER_DEC, link_visit_0_links_cb, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_0_links_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_0_links_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_0_links_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_0_links_creation_increasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in increasing order");
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_0_links_cb, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_0_links_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_0_links_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_0_links_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit2 by creation order in decreasing order");
+
+ if (H5Lvisit2(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_0_links_cb, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_0_links_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_0_links_creation_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_0_links_name_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in increasing order");
+
+ if (H5Lvisit_by_name2(file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, link_visit_0_links_cb, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_0_links_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_0_links_name_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_0_links_name_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by link name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (H5Lvisit_by_name2(file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_DEC, link_visit_0_links_cb, NULL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type name in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_0_links_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Lvisit_by_name_0_links_name_decreasing);
+#endif
+ }
+ PART_END(H5Lvisit_by_name_0_links_name_decreasing);
+
+ PART_BEGIN(H5Lvisit_by_name_0_links_creation_increasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in increasing order");
+
+ if (H5Lvisit_by_name2(file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_INC, link_visit_0_links_cb, NULL,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in increasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_0_links_creation_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_0_links_creation_increasing);
+
+ PART_BEGIN(H5Lvisit_by_name_0_links_creation_decreasing)
+ {
+ TESTING_2("H5Lvisit_by_name2 by creation order in decreasing order");
+
+ if (H5Lvisit_by_name2(file_id, "/" LINK_TEST_GROUP_NAME "/" LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME,
+ H5_INDEX_CRT_ORDER, H5_ITER_DEC, link_visit_0_links_cb, NULL,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Lvisit_by_name2 by index type creation order in decreasing order failed\n");
+ PART_ERROR(H5Lvisit_by_name_0_links_creation_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Lvisit_by_name_0_links_creation_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Link iteration callback for the hard links test which iterates
+ * through all of the links in the test group and checks to make sure
+ * their names and link classes match what is expected.
+ */
+static herr_t
+link_iter_hard_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ char expected_link_name[LINK_ITER_HARD_LINKS_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name);
+ goto done;
+ }
+
+ /*
+ * Four tests are run in the following order per link iteration API call:
+ *
+ * - iteration by link name in increasing order
+ * - iteration by link name in decreasing order
+ * - iteration by link creation order in increasing order
+ * - iteration by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the dataset names
+ * will run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = (counter_val / LINK_ITER_HARD_LINKS_TEST_NUM_LINKS);
+ if (test_iteration == 0 || test_iteration == 3) {
+ HDsnprintf(expected_link_name, LINK_ITER_HARD_LINKS_TEST_BUF_SIZE,
+ LINK_ITER_HARD_LINKS_TEST_LINK_NAME "%d",
+ (int)(counter_val % LINK_ITER_HARD_LINKS_TEST_NUM_LINKS));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_ITER_HARD_LINKS_TEST_BUF_SIZE,
+ LINK_ITER_HARD_LINKS_TEST_LINK_NAME "%d",
+ (int)(LINK_ITER_HARD_LINKS_TEST_NUM_LINKS -
+ (counter_val % LINK_ITER_HARD_LINKS_TEST_NUM_LINKS) - 1));
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_ITER_HARD_LINKS_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+/*
+ * Link iteration callback for the soft links test which iterates
+ * through all of the links in the test group and checks to make sure
+ * their names and link classes match what is expected.
+ */
+static herr_t
+link_iter_soft_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ char expected_link_name[LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ if (H5L_TYPE_SOFT != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", name);
+ goto done;
+ }
+
+ /*
+ * Four tests are run in the following order per link iteration API call:
+ *
+ * - iteration by link name in increasing order
+ * - iteration by link name in decreasing order
+ * - iteration by link creation order in increasing order
+ * - iteration by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the link names
+ * will run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = (counter_val / LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS);
+ if (test_iteration == 0 || test_iteration == 3) {
+ HDsnprintf(expected_link_name, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE,
+ LINK_ITER_SOFT_LINKS_TEST_LINK_NAME "%d",
+ (int)(counter_val % LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE,
+ LINK_ITER_SOFT_LINKS_TEST_LINK_NAME "%d",
+ (int)(LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS -
+ (counter_val % LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS) - 1));
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+/*
+ * Link iteration callback for the external links test which iterates
+ * through all of the links in the test group and checks to make sure
+ * their names and link classes match what is expected.
+ */
+#ifndef NO_EXTERNAL_LINKS
+static herr_t
+link_iter_external_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ char expected_link_name[LINK_ITER_EXT_LINKS_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ if (H5L_TYPE_EXTERNAL != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", name);
+ goto done;
+ }
+
+ /*
+ * Four tests are run in the following order per link iteration API call:
+ *
+ * - iteration by link name in increasing order
+ * - iteration by link name in decreasing order
+ * - iteration by link creation order in increasing order
+ * - iteration by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the link names
+ * will run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = (counter_val / LINK_ITER_EXT_LINKS_TEST_NUM_LINKS);
+ if (test_iteration == 0 || test_iteration == 3) {
+ HDsnprintf(expected_link_name, LINK_ITER_EXT_LINKS_TEST_BUF_SIZE,
+ LINK_ITER_EXT_LINKS_TEST_LINK_NAME "%d",
+ (int)(counter_val % LINK_ITER_EXT_LINKS_TEST_NUM_LINKS));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_ITER_EXT_LINKS_TEST_BUF_SIZE,
+ LINK_ITER_EXT_LINKS_TEST_LINK_NAME "%d",
+ (int)(LINK_ITER_EXT_LINKS_TEST_NUM_LINKS -
+ (counter_val % LINK_ITER_EXT_LINKS_TEST_NUM_LINKS) - 1));
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_ITER_EXT_LINKS_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+#endif
+#ifndef NO_USER_DEFINED_LINKS
+static herr_t link_iter_ud_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data);
+#endif
+/*
+ * Link iteration callback for the mixed link types test which iterates
+ * through all of the links in the test group and checks to make sure
+ * their names and link classes match what is expected.
+ */
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+static herr_t
+link_iter_mixed_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ herr_t ret_val = 0;
+
+ UNUSED(group_id);
+
+ if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME,
+ strlen(LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME) + 1) &&
+ (counter_val == 1 || counter_val == 4 || counter_val == 6 || counter_val == 11)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME,
+ strlen(LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME) + 1) &&
+ (counter_val == 2 || counter_val == 3 || counter_val == 7 || counter_val == 10)) {
+ if (H5L_TYPE_SOFT != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", name);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME,
+ strlen(LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME) + 1) &&
+ (counter_val == 0 || counter_val == 5 || counter_val == 8 || counter_val == 9)) {
+ if (H5L_TYPE_EXTERNAL != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", name);
+ }
+
+ goto done;
+ }
+
+ HDprintf(" link name '%s' didn't match known names or came in an incorrect order\n", name);
+
+ ret_val = -1;
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+#endif
+
+/*
+ * Link iteration callback for the H5Literate(_by_name)2 invalid
+ * parameters test which simply does nothing.
+ */
+static herr_t
+link_iter_invalid_params_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ UNUSED(group_id);
+ UNUSED(name);
+ UNUSED(info);
+ UNUSED(op_data);
+
+ return 0;
+}
+
+/*
+ * Link iteration callback for the 0 links iteration test which
+ * simply does nothing.
+ */
+static herr_t
+link_iter_0_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ UNUSED(group_id);
+ UNUSED(name);
+ UNUSED(info);
+ UNUSED(op_data);
+
+ return 0;
+}
+
+/*
+ * Link iteration callback to test that the index-saving behavior of H5Literate2
+ * works correctly.
+ */
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+static herr_t
+link_iter_idx_saving_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ int *broken = (int *)op_data;
+
+ UNUSED(group_id);
+
+ if (broken && !*broken &&
+ !HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME,
+ strlen(LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME) + 1)) {
+ return (*broken = 1);
+ }
+
+ if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME,
+ strlen(LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME) + 1)) {
+ if (H5L_TYPE_HARD != info->type) {
+ H5_FAILED();
+ HDprintf(" link type did not match\n");
+ goto error;
+ }
+ }
+ else if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME,
+ strlen(LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME) + 1)) {
+ if (H5L_TYPE_SOFT != info->type) {
+ H5_FAILED();
+ HDprintf(" link type did not match\n");
+ goto error;
+ }
+ }
+ else if (!HDstrncmp(name, LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME,
+ strlen(LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME) + 1)) {
+ if (H5L_TYPE_EXTERNAL != info->type) {
+ H5_FAILED();
+ HDprintf(" link type did not match\n");
+ goto error;
+ }
+ }
+ else {
+ H5_FAILED();
+ HDprintf(" link name didn't match known names\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return -1;
+}
+#endif
+
+/*
+ * Link visiting callback for the hard links + no cycles test which
+ * iterates recursively through all of the links in the test group and
+ * checks to make sure their names and link classes match what is expected.
+ */
+static herr_t
+link_visit_hard_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ hbool_t is_subgroup_link;
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ size_t subgroup_number;
+ size_t link_idx_val;
+ char expected_link_name[LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name);
+ goto done;
+ }
+
+ /*
+ * Four tests are run in the following order per link visiting API call:
+ *
+ * - visitation by link name in increasing order
+ * - visitation by link name in decreasing order
+ * - visitation by link creation order in increasing order
+ * - visitation by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the dataset and group
+ * names will run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = counter_val / LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ /* Determine which subgroup is currently being processed */
+ subgroup_number =
+ /* Take the current counter value modulo the total number of links per test iteration (datasets +
+ subgroups) */
+ (counter_val % LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */
+ / (LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1);
+
+ /* Determine whether the current link points to the current subgroup itself */
+ is_subgroup_link = (counter_val % (LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0);
+ if (!is_subgroup_link) {
+ /* Determine the index number of this link within its containing subgroup */
+ link_idx_val =
+ /* Take the current counter value modulo the total number of links per test iteration (datasets +
+ subgroups) */
+ (counter_val % LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the
+ link's index number. */
+ % (LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) -
+ 1;
+ }
+
+ if (test_iteration == 0 || test_iteration == 3) {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number);
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)subgroup_number, (int)link_idx_val);
+ }
+ }
+ else {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1),
+ (int)(LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1));
+ }
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+/*
+ * Link visiting callback for the soft links + no cycles test which
+ * iterates recursively through all of the links in the test group and
+ * checks to make sure their names and link classes match what is expected.
+ */
+static herr_t
+link_visit_soft_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ hbool_t is_subgroup_link;
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ size_t subgroup_number;
+ size_t link_idx_val;
+ char expected_link_name[LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ /* Determine whether the current link points to the current subgroup itself */
+ is_subgroup_link = (counter_val % (LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0);
+
+ if (is_subgroup_link) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name);
+ goto done;
+ }
+ }
+ else {
+ if (H5L_TYPE_SOFT != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", name);
+ goto done;
+ }
+ }
+
+ /*
+ * Four tests are run in the following order per link visiting API call:
+ *
+ * - visitation by link name in increasing order
+ * - visitation by link name in decreasing order
+ * - visitation by link creation order in increasing order
+ * - visitation by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the link names will
+ * run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = counter_val / LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ /* Determine which subgroup is currently being processed */
+ subgroup_number =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */
+ / (LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1);
+
+ if (!is_subgroup_link) {
+ /* Determine the index number of this link within its containing subgroup */
+ link_idx_val =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the
+ link's index number. */
+ % (LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) -
+ 1;
+ }
+
+ if (test_iteration == 0 || test_iteration == 3) {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number);
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)subgroup_number, (int)link_idx_val);
+ }
+ }
+ else {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1),
+ (int)(LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1));
+ }
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+/*
+ * Link visiting callback for the external links + no cycles test which
+ * iterates recursively through all of the links in the test group and
+ * checks to make sure their names and link classes match what is expected.
+ */
+#ifndef NO_EXTERNAL_LINKS
+static herr_t
+link_visit_external_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data)
+{
+ hbool_t is_subgroup_link;
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ size_t subgroup_number;
+ size_t link_idx_val;
+ char expected_link_name[LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ /* Determine whether the current link points to the current subgroup itself */
+ is_subgroup_link = (counter_val % (LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0);
+
+ if (is_subgroup_link) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name);
+ goto done;
+ }
+ }
+ else {
+ if (H5L_TYPE_EXTERNAL != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", name);
+ goto done;
+ }
+ }
+
+ /*
+ * Four tests are run in the following order per link visiting API call:
+ *
+ * - visitation by link name in increasing order
+ * - visitation by link name in decreasing order
+ * - visitation by link creation order in increasing order
+ * - visitation by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the link names will
+ * run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = counter_val / LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ /* Determine which subgroup is currently being processed */
+ subgroup_number =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */
+ / (LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1);
+
+ if (!is_subgroup_link) {
+ /* Determine the index number of this link within its containing subgroup */
+ link_idx_val =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the
+ link's index number. */
+ % (LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) -
+ 1;
+ }
+
+ if (test_iteration == 0 || test_iteration == 3) {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number);
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)subgroup_number, (int)link_idx_val);
+ }
+ }
+ else {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1),
+ (int)(LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1));
+ }
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+#endif
+#ifndef NO_USER_DEFINED_LINKS
+static herr_t link_visit_ud_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+/*
+ * Link visiting callback for the mixed link types + no cycles test which
+ * iterates recursively through all of the links in the test group and
+ * checks to make sure their names and link classes match what is expected.
+ */
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+static herr_t
+link_visit_mixed_links_no_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ herr_t ret_val = 0;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1,
+ strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1) +
+ 1) &&
+ (counter_val == 2 || counter_val == 14 || counter_val == 18 || counter_val == 30)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2,
+ strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2) +
+ 1) &&
+ (counter_val == 3 || counter_val == 13 || counter_val == 19 || counter_val == 29)) {
+ if (H5L_TYPE_SOFT != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3,
+ strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3) +
+ 1) &&
+ (counter_val == 6 || counter_val == 10 || counter_val == 22 || counter_val == 26)) {
+ if (H5L_TYPE_EXTERNAL != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4,
+ strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4) +
+ 1) &&
+ (counter_val == 7 || counter_val == 9 || counter_val == 23 || counter_val == 25)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME,
+ strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME) +
+ 1) &&
+ (counter_val == 1 || counter_val == 15 || counter_val == 17 || counter_val == 31)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2,
+ strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2) +
+ 1) &&
+ (counter_val == 5 || counter_val == 11 || counter_val == 21 || counter_val == 27)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2,
+ strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2) + 1) &&
+ (counter_val == 0 || counter_val == 12 || counter_val == 16 || counter_val == 28)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name, LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3,
+ strlen(LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3) + 1) &&
+ (counter_val == 4 || counter_val == 8 || counter_val == 20 || counter_val == 24)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3);
+ }
+
+ goto done;
+ }
+
+ HDprintf(" link name '%s' didn't match known names or came in an incorrect order\n", name);
+
+ ret_val = -1;
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+#endif
+
+/*
+ * Link visiting callback for the hard links + cycles test which
+ * iterates recursively through all of the links in the test group and
+ * checks to make sure their names and link classes match what is expected.
+ */
+static herr_t
+link_visit_hard_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ hbool_t is_subgroup_link;
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ size_t subgroup_number;
+ size_t link_idx_val;
+ char expected_link_name[LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name);
+ goto done;
+ }
+
+ /*
+ * Four tests are run in the following order per link visiting API call:
+ *
+ * - visitation by link name in increasing order
+ * - visitation by link name in decreasing order
+ * - visitation by link creation order in increasing order
+ * - visitation by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the link and group
+ * names will run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = counter_val / LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ /* Determine which subgroup is currently being processed */
+ subgroup_number =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */
+ / (LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1);
+
+ /* Determine whether the current link points to the current subgroup itself */
+ is_subgroup_link = (counter_val % (LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0);
+ if (!is_subgroup_link) {
+ /* Determine the index number of this link within its containing subgroup */
+ link_idx_val =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the
+ link's index number. */
+ % (LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) -
+ 1;
+ }
+
+ if (test_iteration == 0 || test_iteration == 3) {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number);
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)subgroup_number, (int)link_idx_val);
+ }
+ }
+ else {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_HARD_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1),
+ (int)(LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1));
+ }
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+/*
+ * Link visiting callback for the soft links + cycles test which
+ * iterates recursively through all of the links in the test group and
+ * checks to make sure their names and link classes match what is expected.
+ */
+static herr_t
+link_visit_soft_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ hbool_t is_subgroup_link;
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ size_t subgroup_number;
+ size_t link_idx_val;
+ char expected_link_name[LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ /* Determine whether the current link points to the current subgroup itself */
+ is_subgroup_link = (counter_val % (LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0);
+
+ if (is_subgroup_link) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name);
+ goto done;
+ }
+ }
+ else {
+ if (H5L_TYPE_SOFT != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n", name);
+ goto done;
+ }
+ }
+
+ /*
+ * Four tests are run in the following order per link visiting API call:
+ *
+ * - visitation by link name in increasing order
+ * - visitation by link name in decreasing order
+ * - visitation by link creation order in increasing order
+ * - visitation by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the link and group
+ * names will run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = counter_val / LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ /* Determine which subgroup is currently being processed */
+ subgroup_number =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */
+ / (LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1);
+
+ if (!is_subgroup_link) {
+ /* Determine the index number of this link within its containing subgroup */
+ link_idx_val =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the
+ link's index number. */
+ % (LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) -
+ 1;
+ }
+
+ if (test_iteration == 0 || test_iteration == 3) {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number);
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)subgroup_number, (int)link_idx_val);
+ }
+ }
+ else {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_SOFT_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1),
+ (int)(LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1));
+ }
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+/*
+ * Link visiting callback for the external links + cycles test which
+ * iterates recursively through all of the links in the test group and
+ * checks to make sure their names and link classes match what is expected.
+ */
+#ifndef NO_EXTERNAL_LINKS
+static herr_t
+link_visit_external_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ hbool_t is_subgroup_link;
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ size_t test_iteration;
+ size_t subgroup_number;
+ size_t link_idx_val;
+ char expected_link_name[LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE];
+ herr_t ret_val = H5_ITER_CONT;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ /* Determine whether the current link points to the current subgroup itself */
+ is_subgroup_link = (counter_val % (LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) == 0);
+
+ if (is_subgroup_link) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n", name);
+ goto done;
+ }
+ }
+ else {
+ if (H5L_TYPE_EXTERNAL != info->type) {
+ ret_val = H5_ITER_ERROR;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n", name);
+ goto done;
+ }
+ }
+
+ /*
+ * Four tests are run in the following order per link visiting API call:
+ *
+ * - visitation by link name in increasing order
+ * - visitation by link name in decreasing order
+ * - visitation by link creation order in increasing order
+ * - visitation by link creation order in decreasing order
+ *
+ * Based on how the test is written, this will mean that the link and group
+ * names will run in increasing order on the first and fourth tests and decreasing
+ * order on the second and third tests.
+ */
+ test_iteration = counter_val / LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST;
+
+ /* Determine which subgroup is currently being processed */
+ subgroup_number =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and divide it by the number of links per subgroup + 1 to get the subgroup's index number. */
+ / (LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1);
+
+ if (!is_subgroup_link) {
+ /* Determine the index number of this link within its containing subgroup */
+ link_idx_val =
+ /* Take the current counter value modulo the total number of links per test iteration (links +
+ subgroups) */
+ (counter_val % LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST)
+ /* and take it modulo the number of links per subgroup + 1, finally subtracting 1 to get the
+ link's index number. */
+ % (LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP + 1) -
+ 1;
+ }
+
+ if (test_iteration == 0 || test_iteration == 3) {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d", (int)subgroup_number);
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)subgroup_number, (int)link_idx_val);
+ }
+ }
+ else {
+ if (is_subgroup_link) {
+ HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "%d",
+ (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1));
+ }
+ else {
+ HDsnprintf(expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE,
+ LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME
+ "%d"
+ "/" LINK_VISIT_EXT_LINKS_CYCLE_TEST_LINK_NAME "%d",
+ (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS - subgroup_number - 1),
+ (int)(LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP - link_idx_val - 1));
+ }
+ }
+
+ if (HDstrncmp(name, expected_link_name, LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' didn't match expected name '%s'\n", name, expected_link_name);
+ ret_val = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+#endif
+#ifndef NO_USER_DEFINED_LINKS
+static herr_t link_visit_ud_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info,
+ void *op_data);
+#endif
+/*
+ * Link visiting callback for the mixed link types + cycles test which
+ * iterates recursively through all of the links in the test group and
+ * checks to make sure their names and link classes match what is expected.
+ */
+#if !defined(NO_EXTERNAL_LINKS) && !defined(NO_USER_DEFINED_LINKS)
+static herr_t
+link_visit_mixed_links_cycles_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ herr_t ret_val = 0;
+
+ UNUSED(group_id);
+ UNUSED(op_data);
+
+ if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1,
+ strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1) +
+ 1) &&
+ (counter_val == 1 || counter_val == 11 || counter_val == 13 || counter_val == 23)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2,
+ strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2) +
+ 1) &&
+ (counter_val == 2 || counter_val == 10 || counter_val == 14 || counter_val == 22)) {
+ if (H5L_TYPE_SOFT != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_SOFT!\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3,
+ strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3) +
+ 1) &&
+ (counter_val == 4 || counter_val == 8 || counter_val == 16 || counter_val == 20)) {
+ if (H5L_TYPE_EXTERNAL != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_EXTERNAL!\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name,
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4,
+ strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4) +
+ 1) &&
+ (counter_val == 5 || counter_val == 7 || counter_val == 17 || counter_val == 19)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3
+ "/" LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2,
+ strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2) + 1) &&
+ (counter_val == 0 || counter_val == 9 || counter_val == 12 || counter_val == 21)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2);
+ }
+
+ goto done;
+ }
+ else if (!HDstrncmp(name, LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3,
+ strlen(LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3) + 1) &&
+ (counter_val == 3 || counter_val == 6 || counter_val == 15 || counter_val == 18)) {
+ if (H5L_TYPE_HARD != info->type) {
+ ret_val = -1;
+ HDprintf(" link type for link '%s' was not H5L_TYPE_HARD!\n",
+ LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3);
+ }
+
+ goto done;
+ }
+
+ HDprintf(" link name '%s' didn't match known names or came in an incorrect order\n", name);
+
+ ret_val = -1;
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+#endif
+
+/*
+ * Link visiting callback for the H5Lvisit(_by_name)2 invalid
+ * parameters test which simply does nothing.
+ */
+static herr_t
+link_visit_invalid_params_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ UNUSED(group_id);
+ UNUSED(name);
+ UNUSED(info);
+ UNUSED(op_data);
+
+ return 0;
+}
+
+/*
+ * Link visiting callback for the 0 links visiting test which
+ * simply does nothing.
+ */
+static herr_t
+link_visit_0_links_cb(hid_t group_id, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ UNUSED(group_id);
+ UNUSED(name);
+ UNUSED(info);
+ UNUSED(op_data);
+
+ return 0;
+}
+
+/*
+ * Cleanup temporary test files
+ */
+static void
+cleanup_files(void)
+{
+ H5Fdelete(EXTERNAL_LINK_TEST_FILE_NAME, H5P_DEFAULT);
+ H5Fdelete(EXTERNAL_LINK_INVALID_PARAMS_TEST_FILE_NAME, H5P_DEFAULT);
+}
+
+int
+H5_api_link_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Link Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(link_tests); i++) {
+ nerrors += (*link_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ HDprintf("Cleaning up testing files\n");
+ cleanup_files();
+
+ return nerrors;
+}
diff --git a/test/API/H5_api_link_test.h b/test/API/H5_api_link_test.h
new file mode 100644
index 0000000..e161517
--- /dev/null
+++ b/test/API/H5_api_link_test.h
@@ -0,0 +1,437 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_LINK_TEST_H
+#define H5_API_LINK_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_link_test(void);
+
+/*********************************************
+ * *
+ * API Link test defines *
+ * *
+ *********************************************/
+
+#define HARD_LINK_TEST_GROUP_NAME "hard_link_creation_test"
+#define HARD_LINK_TEST_LINK_NAME "hard_link"
+
+#define HARD_LINK_TEST_GROUP_LONG_NAME "hard_link_long_name"
+#define MAX_NAME_LEN ((64 * 1024) + 1024)
+
+#define HARD_LINK_TEST_GROUP_MANY_NAME "hard_link_many_name"
+#define HARD_LINK_TEST_GROUP_MANY_FINAL_NAME "hard_link_final"
+#define HARD_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE 1024
+
+#define H5L_SAME_LOC_TEST_GROUP_NAME "h5l_same_loc_test_group"
+#define H5L_SAME_LOC_TEST_LINK_NAME1 "h5l_same_loc_test_link1"
+#define H5L_SAME_LOC_TEST_LINK_NAME2 "h5l_same_loc_test_link2"
+
+#define HARD_LINK_INVALID_PARAMS_TEST_GROUP_NAME "hard_link_creation_invalid_params_test"
+#define HARD_LINK_INVALID_PARAMS_TEST_LINK_NAME "hard_link"
+
+#define SOFT_LINK_EXISTING_RELATIVE_TEST_SUBGROUP_NAME "soft_link_to_existing_relative_path_test"
+#define SOFT_LINK_EXISTING_RELATIVE_TEST_OBJECT_NAME "group"
+#define SOFT_LINK_EXISTING_RELATIVE_TEST_LINK_NAME "soft_link_to_existing_relative_path"
+
+#define SOFT_LINK_EXISTING_ABSOLUTE_TEST_SUBGROUP_NAME "soft_link_to_existing_absolute_path_test"
+#define SOFT_LINK_EXISTING_ABSOLUTE_TEST_LINK_NAME "soft_link_to_existing_absolute_path"
+
+#define SOFT_LINK_DANGLING_RELATIVE_TEST_SUBGROUP_NAME "soft_link_dangling_relative_path_test"
+#define SOFT_LINK_DANGLING_RELATIVE_TEST_OBJECT_NAME "group"
+#define SOFT_LINK_DANGLING_RELATIVE_TEST_LINK_NAME "soft_link_dangling_relative_path"
+
+#define SOFT_LINK_DANGLING_ABSOLUTE_TEST_SUBGROUP_NAME "soft_link_dangling_absolute_path_test"
+#define SOFT_LINK_DANGLING_ABSOLUTE_TEST_OBJECT_NAME "group"
+#define SOFT_LINK_DANGLING_ABSOLUTE_TEST_LINK_NAME "soft_link_dangling_absolute_path"
+
+#define SOFT_LINK_TEST_GROUP_LONG_NAME "soft_link_long_name"
+#define SOFT_LINK_TEST_LONG_OBJECT_NAME "soft_link_object_name"
+
+#define SOFT_LINK_TEST_GROUP_MANY_NAME "soft_link_many_name"
+#define SOFT_LINK_TEST_GROUP_MANY_FINAL_NAME "soft_link_final"
+#define SOFT_LINK_TEST_GROUP_MANY_NAME_BUF_SIZE 1024
+
+#define SOFT_LINK_INVALID_PARAMS_TEST_GROUP_NAME "soft_link_creation_invalid_params_test"
+#define SOFT_LINK_INVALID_PARAMS_TEST_LINK_NAME "soft_link_to_root"
+
+#define EXTERNAL_LINK_TEST_SUBGROUP_NAME "external_link_test"
+#define EXTERNAL_LINK_TEST_FILE_NAME "ext_link_file.h5"
+#define EXTERNAL_LINK_TEST_LINK_NAME "ext_link"
+
+#define EXTERNAL_LINK_TEST_DANGLING_SUBGROUP_NAME "external_link_dangling_test"
+#define EXTERNAL_LINK_TEST_DANGLING_LINK_NAME "dangling_ext_link"
+#define EXTERNAL_LINK_TEST_DANGLING_OBJECT_NAME "external_group"
+
+#define EXTERNAL_LINK_TEST_MULTI_NAME "external_link_multi_test"
+#define EXTERNAL_LINK_TEST_MULTI_NAME_BUF_SIZE 1024
+#define EXTERNAL_LINK_TEST_FILE_NAME2 "ext_link_file_2.h5"
+#define EXTERNAL_LINK_TEST_FILE_NAME3 "ext_link_file_3.h5"
+#define EXTERNAL_LINK_TEST_FILE_NAME4 "ext_link_file_4.h5"
+
+#define EXTERNAL_LINK_TEST_PING_PONG_NAME1 "ext_link_file_ping_pong_1.h5"
+#define EXTERNAL_LINK_TEST_PING_PONG_NAME2 "ext_link_file_ping_pong_2.h5"
+#define EXTERNAL_LINK_TEST_PING_PONG_NAME_BUF_SIZE 1024
+
+#define EXTERNAL_LINK_INVALID_PARAMS_TEST_GROUP_NAME "external_link_creation_invalid_params_test"
+#define EXTERNAL_LINK_INVALID_PARAMS_TEST_FILE_NAME "ext_link_invalid_params_file.h5"
+#define EXTERNAL_LINK_INVALID_PARAMS_TEST_LINK_NAME "external_link"
+
+#define UD_LINK_TEST_UDATA_MAX_SIZE 256
+#define UD_LINK_TEST_GROUP_NAME "ud_link_creation_test"
+#define UD_LINK_TEST_LINK_NAME "ud_link"
+
+#define UD_LINK_INVALID_PARAMS_TEST_UDATA_MAX_SIZE 256
+#define UD_LINK_INVALID_PARAMS_TEST_GROUP_NAME "ud_link_creation_invalid_params_test"
+#define UD_LINK_INVALID_PARAMS_TEST_LINK_NAME "ud_link"
+
+#define LINK_DELETE_TEST_NESTED_GRP_NAME "nested_grp"
+#define LINK_DELETE_TEST_HARD_LINK_NAME "hard_link"
+#define LINK_DELETE_TEST_NESTED_HARD_LINK_NAME \
+ LINK_DELETE_TEST_NESTED_GRP_NAME "/" LINK_DELETE_TEST_HARD_LINK_NAME
+#define LINK_DELETE_TEST_HARD_LINK_NAME2 LINK_DELETE_TEST_HARD_LINK_NAME "2"
+#define LINK_DELETE_TEST_HARD_LINK_NAME3 LINK_DELETE_TEST_HARD_LINK_NAME "3"
+#define LINK_DELETE_TEST_SOFT_LINK_NAME "soft_link"
+#define LINK_DELETE_TEST_SOFT_LINK_NAME2 LINK_DELETE_TEST_SOFT_LINK_NAME "2"
+#define LINK_DELETE_TEST_SOFT_LINK_NAME3 LINK_DELETE_TEST_SOFT_LINK_NAME "3"
+#define LINK_DELETE_TEST_EXTERNAL_LINK_NAME "external_link"
+#define LINK_DELETE_TEST_EXTERNAL_LINK_NAME2 LINK_DELETE_TEST_EXTERNAL_LINK_NAME "2"
+#define LINK_DELETE_TEST_EXTERNAL_LINK_NAME3 LINK_DELETE_TEST_EXTERNAL_LINK_NAME "3"
+#define LINK_DELETE_TEST_SUBGROUP_NAME "link_delete_test"
+#define LINK_DELETE_TEST_SUBGROUP1_NAME "H5Ldelete_hard_link"
+#define LINK_DELETE_TEST_NESTED_SUBGROUP_NAME1 "H5Ldelete_nested_hard_link"
+#define LINK_DELETE_TEST_SUBGROUP2_NAME "H5Ldelete_soft_link"
+#define LINK_DELETE_TEST_SUBGROUP3_NAME "H5Ldelete_external_link"
+#define LINK_DELETE_TEST_SUBGROUP4_NAME "H5Ldelete_ud_link"
+#define LINK_DELETE_TEST_SUBGROUP5_NAME "H5Ldelete_by_idx_hard_link_crt_order_increasing"
+#define LINK_DELETE_TEST_SUBGROUP6_NAME "H5Ldelete_by_idx_hard_link_crt_order_decreasing"
+#define LINK_DELETE_TEST_SUBGROUP7_NAME "H5Ldelete_by_idx_hard_link_name_order_increasing"
+#define LINK_DELETE_TEST_SUBGROUP8_NAME "H5Ldelete_by_idx_hard_link_name_order_decreasing"
+#define LINK_DELETE_TEST_SUBGROUP9_NAME "H5Ldelete_by_idx_soft_link_crt_order_increasing"
+#define LINK_DELETE_TEST_SUBGROUP10_NAME "H5Ldelete_by_idx_soft_link_crt_order_decreasing"
+#define LINK_DELETE_TEST_SUBGROUP11_NAME "H5Ldelete_by_idx_soft_link_name_order_increasing"
+#define LINK_DELETE_TEST_SUBGROUP12_NAME "H5Ldelete_by_idx_soft_link_name_order_decreasing"
+#define LINK_DELETE_TEST_SUBGROUP13_NAME "H5Ldelete_by_idx_external_link_crt_order_increasing"
+#define LINK_DELETE_TEST_SUBGROUP14_NAME "H5Ldelete_by_idx_external_link_crt_order_decreasing"
+#define LINK_DELETE_TEST_SUBGROUP15_NAME "H5Ldelete_by_idx_external_link_name_order_increasing"
+#define LINK_DELETE_TEST_SUBGROUP16_NAME "H5Ldelete_by_idx_external_link_name_order_decreasing"
+#define LINK_DELETE_TEST_SUBGROUP17_NAME "H5Ldelete_by_idx_ud_link_crt_order_increasing"
+#define LINK_DELETE_TEST_SUBGROUP18_NAME "H5Ldelete_by_idx_ud_link_crt_order_decreasing"
+#define LINK_DELETE_TEST_SUBGROUP19_NAME "H5Ldelete_by_idx_ud_link_name_order_increasing"
+#define LINK_DELETE_TEST_SUBGROUP20_NAME "H5Ldelete_by_idx_ud_link_name_order_decreasing"
+
+#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME "H5Ldelete_reset_grp_max_crt_order_test"
+#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP1_NAME "H5Ldelete_bottom_up"
+#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_SUBGROUP2_NAME "H5Ldelete_top_down"
+#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS 5
+#define LINK_DELETE_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE 1024
+
+#define LINK_DELETE_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link"
+#define LINK_DELETE_INVALID_PARAMS_TEST_GROUP_NAME "link_deletion_invalid_params_test"
+
+#define COPY_LINK_TEST_LINK_VAL_BUF_SIZE 1024
+#define COPY_LINK_TEST_EXTERNAL_LINK_NAME "external_link"
+#define COPY_LINK_TEST_EXTERNAL_LINK_NAME2 COPY_LINK_TEST_EXTERNAL_LINK_NAME "2"
+#define COPY_LINK_TEST_EXTERNAL_LINK_NAME3 COPY_LINK_TEST_EXTERNAL_LINK_NAME "3"
+#define COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME "external_link_copy"
+#define COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME2 COPY_LINK_TEST_EXTERNAL_LINK_COPY_NAME "2"
+#define COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME "external_link_same_loc"
+#define COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME2 COPY_LINK_TEST_EXTERNAL_LINK_SAME_LOC_NAME "2"
+#define COPY_LINK_TEST_HARD_LINK_NAME "hard_link"
+#define COPY_LINK_TEST_HARD_LINK_NAME2 COPY_LINK_TEST_HARD_LINK_NAME "2"
+#define COPY_LINK_TEST_HARD_LINK_NAME3 COPY_LINK_TEST_HARD_LINK_NAME "3"
+#define COPY_LINK_TEST_HARD_LINK_COPY_NAME "hard_link_copy"
+#define COPY_LINK_TEST_HARD_LINK_COPY_NAME2 COPY_LINK_TEST_HARD_LINK_COPY_NAME "2"
+#define COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME "hard_link_same_loc"
+#define COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME2 COPY_LINK_TEST_HARD_LINK_SAME_LOC_NAME "2"
+#define COPY_LINK_TEST_SOFT_LINK_TARGET_PATH "/" LINK_TEST_GROUP_NAME "/" COPY_LINK_TEST_SUBGROUP_NAME
+#define COPY_LINK_TEST_SOFT_LINK_NAME "soft_link"
+#define COPY_LINK_TEST_SOFT_LINK_NAME2 COPY_LINK_TEST_SOFT_LINK_NAME "2"
+#define COPY_LINK_TEST_SOFT_LINK_NAME3 COPY_LINK_TEST_SOFT_LINK_NAME "3"
+#define COPY_LINK_TEST_SOFT_LINK_COPY_NAME "soft_link_copy"
+#define COPY_LINK_TEST_SOFT_LINK_COPY_NAME2 COPY_LINK_TEST_SOFT_LINK_COPY_NAME "2"
+#define COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME "soft_link_same_loc"
+#define COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME2 COPY_LINK_TEST_SOFT_LINK_SAME_LOC_NAME "2"
+#define COPY_LINK_TEST_SRC_GROUP_NAME "src_group"
+#define COPY_LINK_TEST_DST_GROUP_NAME "dst_group"
+#define COPY_LINK_TEST_SUBGROUP_NAME "link_copy_test"
+
+#define COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_COPY_NAME "hard_link_copy"
+#define COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link"
+#define COPY_LINK_INVALID_PARAMS_TEST_HARD_LINK_NEW_NAME "hard_link_new"
+#define COPY_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME "src_group"
+#define COPY_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME "dst_group"
+#define COPY_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME "link_copy_invalid_params_test"
+
+#define MOVE_LINK_TEST_LINK_VAL_BUF_SIZE 1024
+#define MOVE_LINK_TEST_EXTERN_LINK_NAME "extern_link"
+#define MOVE_LINK_TEST_EXTERN_LINK_NAME2 MOVE_LINK_TEST_EXTERN_LINK_NAME "2"
+#define MOVE_LINK_TEST_EXTERN_LINK_NAME3 MOVE_LINK_TEST_EXTERN_LINK_NAME "3"
+#define MOVE_LINK_TEST_EXTERN_LINK_NAME4 MOVE_LINK_TEST_EXTERN_LINK_NAME "4"
+#define MOVE_LINK_TEST_EXTERN_LINK_NEW_NAME "extern_link_renamed"
+#define MOVE_LINK_TEST_EXTERN_LINK_SAME_LOC_NAME "extern_link_same_loc"
+#define MOVE_LINK_TEST_HARD_LINK_NAME "hard_link"
+#define MOVE_LINK_TEST_HARD_LINK_NAME2 MOVE_LINK_TEST_HARD_LINK_NAME "2"
+#define MOVE_LINK_TEST_HARD_LINK_NAME3 MOVE_LINK_TEST_HARD_LINK_NAME "3"
+#define MOVE_LINK_TEST_HARD_LINK_NAME4 MOVE_LINK_TEST_HARD_LINK_NAME "4"
+#define MOVE_LINK_TEST_HARD_LINK_NEW_NAME "hard_link_renamed"
+#define MOVE_LINK_TEST_HARD_LINK_SAME_LOC_NAME "hard_link_same_loc"
+#define MOVE_LINK_TEST_SOFT_LINK_TARGET_PATH "/" LINK_TEST_GROUP_NAME "/" MOVE_LINK_TEST_SUBGROUP_NAME
+#define MOVE_LINK_TEST_SOFT_LINK_NAME "soft_link"
+#define MOVE_LINK_TEST_SOFT_LINK_NAME2 MOVE_LINK_TEST_SOFT_LINK_NAME "2"
+#define MOVE_LINK_TEST_SOFT_LINK_NAME3 MOVE_LINK_TEST_SOFT_LINK_NAME "3"
+#define MOVE_LINK_TEST_SOFT_LINK_NAME4 MOVE_LINK_TEST_SOFT_LINK_NAME "4"
+#define MOVE_LINK_TEST_SOFT_LINK_NEW_NAME "soft_link_renamed"
+#define MOVE_LINK_TEST_SOFT_LINK_SAME_LOC_NAME "soft_link_same_loc"
+#define MOVE_LINK_TEST_SRC_GROUP_NAME "src_group"
+#define MOVE_LINK_TEST_DST_GROUP_NAME "dst_group"
+#define MOVE_LINK_TEST_SUBGROUP_NAME "link_move_test"
+
+#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SUBGROUP_NAME "link_move_into_group_with_links_test"
+#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_SRC_GRP_NAME "source_group"
+#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_DST_GRP_NAME "dest_group"
+#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_NUM_LINKS 5
+#define MOVE_LINK_INTO_GRP_WITH_LINKS_TEST_BUF_SIZE 1024
+
+#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SUBGROUP_NAME "H5Lmove_reset_grp_max_crt_order_test"
+#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_SRC_GRP_NAME "source_group"
+#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_DST_GRP_NAME "dest_group"
+#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_NUM_LINKS 5
+#define MOVE_LINK_RESET_MAX_CRT_ORDER_TEST_BUF_SIZE 1024
+
+#define MOVE_LINK_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link"
+#define MOVE_LINK_INVALID_PARAMS_TEST_SRC_GROUP_NAME "src_grp"
+#define MOVE_LINK_INVALID_PARAMS_TEST_DST_GROUP_NAME "dst_grp"
+#define MOVE_LINK_INVALID_PARAMS_TEST_SUBGROUP_NAME "link_move_invalid_params_test"
+
+#define GET_LINK_VAL_TEST_LINK_VAL_BUF_SIZE 1024
+#define GET_LINK_VAL_TEST_SUBGROUP_NAME "get_link_val_test"
+#define GET_LINK_VAL_TEST_SOFT_LINK_NAME "soft_link"
+#define GET_LINK_VAL_TEST_SOFT_LINK_NAME2 GET_LINK_VAL_TEST_SOFT_LINK_NAME "2"
+#define GET_LINK_VAL_TEST_SOFT_LINK_NAME3 GET_LINK_VAL_TEST_SOFT_LINK_NAME "3"
+#define GET_LINK_VAL_TEST_EXT_LINK_NAME "ext_link"
+#define GET_LINK_VAL_TEST_EXT_LINK_NAME2 GET_LINK_VAL_TEST_EXT_LINK_NAME "2"
+#define GET_LINK_VAL_TEST_EXT_LINK_NAME3 GET_LINK_VAL_TEST_EXT_LINK_NAME "3"
+#define GET_LINK_VAL_TEST_SUBGROUP1_NAME "H5Lget_val_soft_link"
+#define GET_LINK_VAL_TEST_SUBGROUP2_NAME "H5Lget_val_external_link"
+#define GET_LINK_VAL_TEST_SUBGROUP3_NAME "H5Lget_val_ud_link"
+#define GET_LINK_VAL_TEST_SUBGROUP4_NAME "H5Lget_val_by_idx_soft_link_crt_order_increasing"
+#define GET_LINK_VAL_TEST_SUBGROUP5_NAME "H5Lget_val_by_idx_soft_link_crt_order_decreasing"
+#define GET_LINK_VAL_TEST_SUBGROUP6_NAME "H5Lget_val_by_idx_soft_link_name_order_increasing"
+#define GET_LINK_VAL_TEST_SUBGROUP7_NAME "H5Lget_val_by_idx_soft_link_name_order_decreasing"
+#define GET_LINK_VAL_TEST_SUBGROUP8_NAME "H5Lget_val_by_idx_external_link_crt_order_increasing"
+#define GET_LINK_VAL_TEST_SUBGROUP9_NAME "H5Lget_val_by_idx_external_link_crt_order_decreasing"
+#define GET_LINK_VAL_TEST_SUBGROUP10_NAME "H5Lget_val_by_idx_external_link_name_order_increasing"
+#define GET_LINK_VAL_TEST_SUBGROUP11_NAME "H5Lget_val_by_idx_external_link_name_order_decreasing"
+#define GET_LINK_VAL_TEST_SUBGROUP12_NAME "H5Lget_val_by_idx_ud_link_crt_order_increasing"
+#define GET_LINK_VAL_TEST_SUBGROUP13_NAME "H5Lget_val_by_idx_ud_link_crt_order_decreasing"
+#define GET_LINK_VAL_TEST_SUBGROUP14_NAME "H5Lget_val_by_idx_ud_link_name_order_increasing"
+#define GET_LINK_VAL_TEST_SUBGROUP15_NAME "H5Lget_val_by_idx_ud_link_name_order_decreasing"
+
+#define GET_LINK_VAL_INVALID_PARAMS_TEST_SOFT_LINK_NAME "soft_link"
+#define GET_LINK_VAL_INVALID_PARAMS_TEST_GROUP_NAME "get_link_val_invalid_params_test"
+
+#define GET_LINK_INFO_TEST_HARD_LINK_NAME "hard_link"
+#define GET_LINK_INFO_TEST_HARD_LINK_NAME2 GET_LINK_INFO_TEST_HARD_LINK_NAME "2"
+#define GET_LINK_INFO_TEST_HARD_LINK_NAME3 GET_LINK_INFO_TEST_HARD_LINK_NAME "3"
+#define GET_LINK_INFO_TEST_SOFT_LINK_NAME "soft_link"
+#define GET_LINK_INFO_TEST_SOFT_LINK_NAME2 GET_LINK_INFO_TEST_SOFT_LINK_NAME "2"
+#define GET_LINK_INFO_TEST_SOFT_LINK_NAME3 GET_LINK_INFO_TEST_SOFT_LINK_NAME "3"
+#define GET_LINK_INFO_TEST_EXT_LINK_NAME "ext_link"
+#define GET_LINK_INFO_TEST_EXT_LINK_NAME2 GET_LINK_INFO_TEST_EXT_LINK_NAME "2"
+#define GET_LINK_INFO_TEST_EXT_LINK_NAME3 GET_LINK_INFO_TEST_EXT_LINK_NAME "3"
+#define GET_LINK_INFO_TEST_GROUP_NAME "get_link_info_test"
+#define GET_LINK_INFO_TEST_SUBGROUP1_NAME "H5Lget_info_hard_link"
+#define GET_LINK_INFO_TEST_SUBGROUP2_NAME "H5Lget_info_soft_link"
+#define GET_LINK_INFO_TEST_SUBGROUP3_NAME "H5Lget_info_external_link"
+#define GET_LINK_INFO_TEST_SUBGROUP4_NAME "H5Lget_info_ud_link"
+#define GET_LINK_INFO_TEST_SUBGROUP5_NAME "H5Lget_info_by_idx_hard_link_crt_order_increasing"
+#define GET_LINK_INFO_TEST_SUBGROUP6_NAME "H5Lget_info_by_idx_hard_link_crt_order_decreasing"
+#define GET_LINK_INFO_TEST_SUBGROUP7_NAME "H5Lget_info_by_idx_hard_link_name_order_increasing"
+#define GET_LINK_INFO_TEST_SUBGROUP8_NAME "H5Lget_info_by_idx_hard_link_name_order_decreasing"
+#define GET_LINK_INFO_TEST_SUBGROUP9_NAME "H5Lget_info_by_idx_soft_link_crt_order_increasing"
+#define GET_LINK_INFO_TEST_SUBGROUP10_NAME "H5Lget_info_by_idx_soft_link_crt_order_decreasing"
+#define GET_LINK_INFO_TEST_SUBGROUP11_NAME "H5Lget_info_by_idx_soft_link_name_order_increasing"
+#define GET_LINK_INFO_TEST_SUBGROUP12_NAME "H5Lget_info_by_idx_soft_link_name_order_decreasing"
+#define GET_LINK_INFO_TEST_SUBGROUP13_NAME "H5Lget_info_by_idx_external_link_crt_order_increasing"
+#define GET_LINK_INFO_TEST_SUBGROUP14_NAME "H5Lget_info_by_idx_external_link_crt_order_decreasing"
+#define GET_LINK_INFO_TEST_SUBGROUP15_NAME "H5Lget_info_by_idx_external_link_name_order_increasing"
+#define GET_LINK_INFO_TEST_SUBGROUP16_NAME "H5Lget_info_by_idx_external_link_name_order_decreasing"
+#define GET_LINK_INFO_TEST_SUBGROUP17_NAME "H5Lget_info_by_idx_ud_link_crt_order_increasing"
+#define GET_LINK_INFO_TEST_SUBGROUP18_NAME "H5Lget_info_by_idx_ud_link_crt_order_decreasing"
+#define GET_LINK_INFO_TEST_SUBGROUP19_NAME "H5Lget_info_by_idx_ud_link_name_order_increasing"
+#define GET_LINK_INFO_TEST_SUBGROUP20_NAME "H5Lget_info_by_idx_ud_link_name_order_decreasing"
+
+#define GET_LINK_INFO_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link"
+#define GET_LINK_INFO_INVALID_PARAMS_TEST_GROUP_NAME "get_link_info_invalid_params_test"
+
+#define GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME "get_external_link_name_crt_order_increasing"
+#define GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME2 "get_external_link_name_crt_order_decreasing"
+#define GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME3 "get_external_link_name_alpha_order_increasing"
+#define GET_LINK_NAME_TEST_EXTERNAL_SUBGROUP_NAME4 "get_external_link_name_alpha_order_decreasing"
+#define GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME "external_link"
+#define GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME2 GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME "2"
+#define GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME3 GET_LINK_NAME_TEST_EXTERNAL_LINK_NAME "3"
+#define GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME "get_hard_link_name_crt_order_increasing"
+#define GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME2 "get_hard_link_name_crt_order_decreasing"
+#define GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME3 "get_hard_link_name_alpha_order_increasing"
+#define GET_LINK_NAME_TEST_HARD_SUBGROUP_NAME4 "get_hard_link_name_alpha_order_decreasing"
+#define GET_LINK_NAME_TEST_HARD_LINK_NAME "hard_link"
+#define GET_LINK_NAME_TEST_HARD_LINK_NAME2 GET_LINK_NAME_TEST_HARD_LINK_NAME "2"
+#define GET_LINK_NAME_TEST_HARD_LINK_NAME3 GET_LINK_NAME_TEST_HARD_LINK_NAME "3"
+#define GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME "get_soft_link_name_crt_order_increasing"
+#define GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME2 "get_soft_link_name_crt_order_decreasing"
+#define GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME3 "get_soft_link_name_alpha_order_increasing"
+#define GET_LINK_NAME_TEST_SOFT_SUBGROUP_NAME4 "get_soft_link_name_alpha_order_decreasing"
+#define GET_LINK_NAME_TEST_SOFT_LINK_NAME "soft_link"
+#define GET_LINK_NAME_TEST_SOFT_LINK_NAME2 GET_LINK_NAME_TEST_SOFT_LINK_NAME "2"
+#define GET_LINK_NAME_TEST_SOFT_LINK_NAME3 GET_LINK_NAME_TEST_SOFT_LINK_NAME "3"
+#define GET_LINK_NAME_TEST_GROUP_NAME "get_link_name_test"
+#define GET_LINK_NAME_TEST_BUF_SIZE 256
+
+#define GET_LINK_NAME_INVALID_PARAMS_TEST_HARD_LINK_NAME "test_link1"
+#define GET_LINK_NAME_INVALID_PARAMS_TEST_GROUP_NAME "get_link_name_invalid_params_test"
+
+#define LINK_ITER_HARD_LINKS_TEST_DSET_SPACE_RANK 2
+#define LINK_ITER_HARD_LINKS_TEST_SUBGROUP_NAME "link_iter_hard_links_test"
+#define LINK_ITER_HARD_LINKS_TEST_LINK_NAME "hard_link"
+#define LINK_ITER_HARD_LINKS_TEST_NUM_LINKS 10
+#define LINK_ITER_HARD_LINKS_TEST_BUF_SIZE 64
+
+#define LINK_ITER_SOFT_LINKS_TEST_SUBGROUP_NAME "link_iter_soft_links_test"
+#define LINK_ITER_SOFT_LINKS_TEST_LINK_NAME "soft_link"
+#define LINK_ITER_SOFT_LINKS_TEST_NUM_LINKS 10
+#define LINK_ITER_SOFT_LINKS_TEST_BUF_SIZE 64
+
+#define LINK_ITER_EXT_LINKS_TEST_SUBGROUP_NAME "link_iter_ext_links_test"
+#define LINK_ITER_EXT_LINKS_TEST_LINK_NAME "external_link"
+#define LINK_ITER_EXT_LINKS_TEST_NUM_LINKS 10
+#define LINK_ITER_EXT_LINKS_TEST_BUF_SIZE 64
+
+#define LINK_ITER_MIXED_LINKS_TEST_DSET_SPACE_RANK 2
+#define LINK_ITER_MIXED_LINKS_TEST_HARD_LINK_NAME "hard_link1"
+#define LINK_ITER_MIXED_LINKS_TEST_SOFT_LINK_NAME "soft_link1"
+#define LINK_ITER_MIXED_LINKS_TEST_EXT_LINK_NAME "ext_link1"
+#define LINK_ITER_MIXED_LINKS_TEST_SUBGROUP_NAME "link_iter_mixed_links_test"
+#define LINK_ITER_MIXED_LINKS_TEST_NUM_LINKS 3
+
+#define LINK_ITER_INVALID_PARAMS_TEST_DSET_SPACE_RANK 2
+#define LINK_ITER_INVALID_PARAMS_TEST_HARD_LINK_NAME "hard_link1"
+#define LINK_ITER_INVALID_PARAMS_TEST_SOFT_LINK_NAME "soft_link1"
+#define LINK_ITER_INVALID_PARAMS_TEST_EXT_LINK_NAME "ext_link1"
+#define LINK_ITER_INVALID_PARAMS_TEST_SUBGROUP_NAME "link_iter_invalid_params_test"
+
+#define LINK_ITER_0_LINKS_TEST_SUBGROUP_NAME "link_iter_0_links_test"
+
+#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST \
+ ((LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP * \
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) + \
+ LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS)
+#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP 10
+#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_DSET_SPACE_RANK 2
+#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS 5
+#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "subgroup"
+#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME "link_visit_hard_links_no_cycle_test"
+#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_LINK_NAME "hard_link"
+#define LINK_VISIT_HARD_LINKS_NO_CYCLE_TEST_BUF_SIZE 256
+
+#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST \
+ ((LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP * \
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) + \
+ LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS)
+#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP 10
+#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS 5
+#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "subgroup"
+#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME "link_visit_soft_links_no_cycle_test"
+#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_LINK_NAME "soft_link"
+#define LINK_VISIT_SOFT_LINKS_NO_CYCLE_TEST_BUF_SIZE 256
+
+#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_TEST \
+ ((LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP * \
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS) + \
+ LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS)
+#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_LINKS_PER_GROUP 10
+#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NUM_SUBGROUPS 5
+#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_NESTED_GRP_NAME "subgroup"
+#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME "link_visit_ext_links_no_cycle_test"
+#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_LINK_NAME "external_link"
+#define LINK_VISIT_EXT_LINKS_NO_CYCLE_TEST_BUF_SIZE 256
+
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_SPACE_RANK 2
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME "dset"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_DSET_NAME2 "dset2"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME "link_visit_mixed_links_no_cycle_test"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME2 "link_visit_subgroup1"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_SUBGROUP_NAME3 "link_visit_subgroup2"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME1 "hard_link1"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME2 "soft_link1"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME3 "ext_link1"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_LINK_NAME4 "hard_link2"
+#define LINK_VISIT_MIXED_LINKS_NO_CYCLE_TEST_NUM_LINKS 8
+
+#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST \
+ ((LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP * \
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS) + \
+ LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS)
+#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP 10
+#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_NUM_SUBGROUPS 5
+#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_NESTED_GRP_NAME "subgroup"
+#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_SUBGROUP_NAME "link_visit_hard_links_cycle_test"
+#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_LINK_NAME "hard_link"
+#define LINK_VISIT_HARD_LINKS_CYCLE_TEST_BUF_SIZE 256
+
+#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST \
+ ((LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP * \
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS) + \
+ LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS)
+#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP 10
+#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NUM_SUBGROUPS 5
+#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "subgroup"
+#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_SUBGROUP_NAME "link_visit_soft_links_cycle_test"
+#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_LINK_NAME "soft_link"
+#define LINK_VISIT_SOFT_LINKS_CYCLE_TEST_BUF_SIZE 256
+
+#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_TEST \
+ ((LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP * LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS) + \
+ LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS)
+#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_LINKS_PER_GROUP 10
+#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_NUM_SUBGROUPS 5
+#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_NESTED_GRP_NAME "subgroup"
+#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_SUBGROUP_NAME "link_visit_ext_links_cycle_test"
+#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_LINK_NAME "external_link"
+#define LINK_VISIT_EXT_LINKS_CYCLE_TEST_BUF_SIZE 256
+
+#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME "link_visit_mixed_links_cycle_test"
+#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME2 "link_visit_subgroup1"
+#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_SUBGROUP_NAME3 "link_visit_subgroup2"
+#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME1 "hard_link1"
+#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME2 "soft_link1"
+#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME3 "ext_link1"
+#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_LINK_NAME4 "hard_link2"
+#define LINK_VISIT_MIXED_LINKS_CYCLE_TEST_NUM_LINKS 6
+
+#define LINK_VISIT_INVALID_PARAMS_TEST_DSET_SPACE_RANK 2
+#define LINK_VISIT_INVALID_PARAMS_TEST_DSET_NAME "dset"
+#define LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME "link_visit_invalid_params_test"
+#define LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME2 "link_visit_subgroup1"
+#define LINK_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME3 "link_visit_subgroup2"
+#define LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME1 "hard_link1"
+#define LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME2 "soft_link1"
+#define LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME3 "ext_link1"
+#define LINK_VISIT_INVALID_PARAMS_TEST_LINK_NAME4 "hard_link2"
+
+#define LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME "link_visit_0_links_test"
+#define LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME2 "link_visit_0_links_test_subgroup1"
+#define LINK_VISIT_0_LINKS_TEST_SUBGROUP_NAME3 "link_visit_0_links_test_subgroup2"
+
+#endif
diff --git a/test/API/H5_api_misc_test.c b/test/API/H5_api_misc_test.c
new file mode 100644
index 0000000..256550b
--- /dev/null
+++ b/test/API/H5_api_misc_test.c
@@ -0,0 +1,1060 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_misc_test.h"
+
+static int test_open_link_without_leading_slash(void);
+static int test_object_creation_by_absolute_path(void);
+static int test_absolute_vs_relative_path(void);
+static int test_dot_for_object_name(void);
+static int test_symbols_in_compound_field_name(void);
+static int test_double_init_term(void);
+
+/*
+ * The array of miscellaneous tests to be performed.
+ */
+static int (*misc_tests[])(void) = {
+ test_open_link_without_leading_slash, test_object_creation_by_absolute_path,
+ test_absolute_vs_relative_path, test_dot_for_object_name,
+ test_symbols_in_compound_field_name, test_double_init_term,
+};
+
+static int
+test_open_link_without_leading_slash(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING("opening a link without a leading slash");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(OPEN_LINK_WITHOUT_SLASH_DSET_SPACE_RANK, NULL, NULL, FALSE)) <
+ 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(container_group, OPEN_LINK_WITHOUT_SLASH_DSET_NAME, dset_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ goto error;
+ }
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gopen2(file_id, "/", H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open root group\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, MISCELLANEOUS_TEST_GROUP_NAME "/" OPEN_LINK_WITHOUT_SLASH_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset\n");
+ goto error;
+ }
+
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+static int
+test_object_creation_by_absolute_path(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID, sub_group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object creation by absolute path");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, link, or stored datatype aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ /* Start by creating a group to hold all the objects for this test */
+ if ((group_id = H5Gcreate2(container_group, OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group\n");
+ goto error;
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ goto error;
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" container group didn't exist at the correct location\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gcreate_using_absolute_path)
+ {
+ TESTING_2("creation of group using absolute pathname");
+
+ /* Try to create a group under the container group by using an absolute pathname */
+ if ((sub_group_id = H5Gcreate2(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create subgroup by absolute pathname\n");
+ PART_ERROR(H5Gcreate_using_absolute_path);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Gcreate_using_absolute_path);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" subgroup didn't exist at the correct location\n");
+ PART_ERROR(H5Gcreate_using_absolute_path);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_using_absolute_path);
+
+ PART_BEGIN(H5Dcreate_using_absolute_path)
+ {
+ TESTING_2("creation of dataset using absolute pathname");
+
+ /* Try to create a dataset nested at the end of this group chain by using an absolute pathname */
+ if ((fspace_id = generate_random_dataspace(OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_SPACE_RANK,
+ NULL, NULL, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to generate dataspace\n");
+ PART_ERROR(H5Dcreate_using_absolute_path);
+ }
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to generate datatype\n");
+ PART_ERROR(H5Dcreate_using_absolute_path);
+ }
+
+ if ((dset_id = H5Dcreate2(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_NAME,
+ dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ PART_ERROR(H5Dcreate_using_absolute_path);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Dcreate_using_absolute_path);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" dataset didn't exist at the correct location\n");
+ PART_ERROR(H5Dcreate_using_absolute_path);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_using_absolute_path);
+
+ PART_BEGIN(H5Tcommit_using_absolute_path)
+ {
+ TESTING_2("creation of committed datatype using absolute pathname");
+
+ /* Try to create a committed datatype in the same fashion as the preceding dataset */
+ if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ PART_ERROR(H5Tcommit_using_absolute_path);
+ }
+
+ if (H5Tcommit2(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DTYPE_NAME,
+ dtype_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype\n");
+ PART_ERROR(H5Tcommit_using_absolute_path);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME
+ "/" OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DTYPE_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Tcommit_using_absolute_path);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" datatype didn't exist at the correct location\n");
+ PART_ERROR(H5Tcommit_using_absolute_path);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_using_absolute_path);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dtype_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(sub_group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Tclose(dtype_id);
+ H5Gclose(sub_group_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/* XXX: Add testing for groups */
+static int
+test_absolute_vs_relative_path(void)
+{
+ htri_t link_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID, dset_id3 = H5I_INVALID_HID,
+ dset_id4 = H5I_INVALID_HID, dset_id5 = H5I_INVALID_HID, dset_id6 = H5I_INVALID_HID;
+ hid_t dset_dtype1 = H5I_INVALID_HID, dset_dtype2 = H5I_INVALID_HID, dset_dtype3 = H5I_INVALID_HID,
+ dset_dtype4 = H5I_INVALID_HID, dset_dtype5 = H5I_INVALID_HID, dset_dtype6 = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("absolute vs. relative pathnames");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ /* Start by creating a group to be used during some of the dataset creation operations */
+ if ((group_id = H5Gcreate2(container_group, ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container group\n");
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET_SPACE_RANK, NULL, NULL,
+ FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype1 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype2 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype3 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype4 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype5 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype6 = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dcreate_absolute_from_root)
+ {
+ TESTING_2("dataset creation by absolute path from root group");
+
+ /* Create a dataset by absolute path in the form "/group/dataset" starting from the root group */
+ if ((dset_id1 = H5Dcreate2(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET1_NAME,
+ dset_dtype1, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset by absolute path from root\n");
+ PART_ERROR(H5Dcreate_absolute_from_root);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET1_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Dcreate_absolute_from_root);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" didn't exist at the correct location\n");
+ PART_ERROR(H5Dcreate_absolute_from_root);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_absolute_from_root);
+
+ PART_BEGIN(H5Dcreate_absolute_from_nonroot)
+ {
+ TESTING_2("dataset creation by absolute path from non-root group");
+
+ /* Create a dataset by absolute path in the form "/group/dataset" starting from the container
+ * group */
+ if ((dset_id4 = H5Dcreate2(container_group,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET4_NAME,
+ dset_dtype4, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset by absolute path from container group\n");
+ PART_ERROR(H5Dcreate_absolute_from_nonroot);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET4_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Dcreate_absolute_from_nonroot);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" didn't exist at the correct location\n");
+ PART_ERROR(H5Dcreate_absolute_from_nonroot);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_absolute_from_nonroot);
+
+ PART_BEGIN(H5Dcreate_relative_from_root)
+ {
+ TESTING_2("dataset creation by relative path from root group");
+
+ /* TODO: */
+
+ SKIPPED();
+ PART_EMPTY(H5Dcreate_relative_from_root);
+ }
+ PART_END(H5Dcreate_relative_from_root);
+
+ PART_BEGIN(H5Dcreate_relative_from_nonroot)
+ {
+ TESTING_2("dataset creation by relative path from non-root group");
+
+ /* Create a dataset by relative path in the form "dataset" starting from the test container group
+ */
+ if ((dset_id5 = H5Dcreate2(group_id, ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET5_NAME, dset_dtype5,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset by relative path from container group\n");
+ PART_ERROR(H5Dcreate_relative_from_nonroot);
+ }
+
+ /* Create a dataset by relative path in the form "group/dataset" starting from the top-level
+ * container group */
+ if ((dset_id2 = H5Dcreate2(container_group,
+ ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET2_NAME,
+ dset_dtype2, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset by relative path from container group\n");
+ PART_ERROR(H5Dcreate_relative_from_nonroot);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET2_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Dcreate_relative_from_nonroot);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" didn't exist at the correct location\n");
+ PART_ERROR(H5Dcreate_relative_from_nonroot);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET5_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Dcreate_relative_from_nonroot);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" didn't exist at the correct location\n");
+ PART_ERROR(H5Dcreate_relative_from_nonroot);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_relative_from_nonroot);
+
+ PART_BEGIN(H5Dcreate_relative_leading_dot_root)
+ {
+ TESTING_2("dataset creation by path with leading '.' from root group");
+
+ /* Create a dataset by relative path in the form "./group/dataset" starting from the root group */
+ if ((dset_id3 = H5Dcreate2(file_id,
+ "./" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET3_NAME,
+ dset_dtype3, fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset by relative path from root with leading '.'\n");
+ PART_ERROR(H5Dcreate_relative_leading_dot_root);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET3_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Dcreate_relative_leading_dot_root);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" didn't exist at the correct location\n");
+ PART_ERROR(H5Dcreate_relative_leading_dot_root);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_relative_leading_dot_root);
+
+ PART_BEGIN(H5Dcreate_relative_leading_dot_nonroot)
+ {
+ TESTING_2("dataset creation by path with leading '.' from non-root group");
+
+ /* Create a dataset by relative path in the form "./dataset" starting from the container group */
+ if ((dset_id6 = H5Dcreate2(group_id, "./" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET6_NAME, dset_dtype6,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(
+ " couldn't create dataset by relative path from container group with leading '.'\n");
+ PART_ERROR(H5Dcreate_relative_leading_dot_nonroot);
+ }
+
+ if ((link_exists = H5Lexists(file_id,
+ "/" MISCELLANEOUS_TEST_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME
+ "/" ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET6_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link exists\n");
+ PART_ERROR(H5Dcreate_relative_leading_dot_nonroot);
+ }
+
+ if (!link_exists) {
+ H5_FAILED();
+ HDprintf(" didn't exist at the correct location\n");
+ PART_ERROR(H5Dcreate_relative_leading_dot_nonroot);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_relative_leading_dot_nonroot);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype1) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype2) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype3) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype4) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype5) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype6) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id1) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id3) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id4) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id5) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id6) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype1);
+ H5Tclose(dset_dtype2);
+ H5Tclose(dset_dtype3);
+ H5Tclose(dset_dtype4);
+ H5Tclose(dset_dtype5);
+ H5Tclose(dset_dtype6);
+ H5Dclose(dset_id1);
+ H5Dclose(dset_id2);
+ H5Dclose(dset_id3);
+ H5Dclose(dset_id4);
+ H5Dclose(dset_id5);
+ H5Dclose(dset_id6);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check creating/opening objects with the "." as the name
+ */
+static int
+test_dot_for_object_name(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, subgroup_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID, dspace_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ herr_t ret = -1;
+
+ TESTING_MULTIPART("creating objects with \".\" as the name");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or stored datatype aren't supported with "
+ "this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", MISCELLANEOUS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((subgroup_id = H5Gcreate2(container_group, DOT_AS_OBJECT_NAME_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", DOT_AS_OBJECT_NAME_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((dspace_id = H5Screate(H5S_SCALAR)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create data space\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Gcreate_dot_as_name)
+ {
+ TESTING_2("invalid creation of group with '.' as name");
+
+ /* Create a group with the "." as the name. It should fail. */
+ H5E_BEGIN_TRY
+ {
+ group_id = H5Gcreate2(subgroup_id, ".", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id >= 0) {
+ H5_FAILED();
+ HDprintf(" a group was created with '.' as the name!\n");
+ PART_ERROR(H5Gcreate_dot_as_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Gcreate_dot_as_name);
+
+ PART_BEGIN(H5Dcreate_dot_as_name)
+ {
+ TESTING_2("invalid creation of dataset with '.' as name");
+
+ /* Create a dataset with the "." as the name. It should fail. */
+ H5E_BEGIN_TRY
+ {
+ dset_id = H5Dcreate2(subgroup_id, ".", H5T_NATIVE_INT, dspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset_id >= 0) {
+ H5_FAILED();
+ HDprintf(" a dataset was created with '.' as the name!\n");
+ PART_ERROR(H5Dcreate_dot_as_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dcreate_dot_as_name);
+
+ PART_BEGIN(H5Tcommit_dot_as_name)
+ {
+ TESTING_2("invalid creation of committed datatype with '.' as name");
+
+ if ((dtype_id = H5Tcopy(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy a native datatype\n");
+ PART_ERROR(H5Tcommit_dot_as_name);
+ }
+
+ /* Commit a datatype with "." as the name. It should fail. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Tcommit2(subgroup_id, ".", dtype_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (ret >= 0) {
+ H5_FAILED();
+ HDprintf(" a named datatype was committed with '.' as the name!\n");
+ PART_ERROR(H5Tcommit_dot_as_name);
+ }
+
+ if (H5Tclose(dtype_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close datatype\n");
+ PART_ERROR(H5Tcommit_dot_as_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Tcommit_dot_as_name);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(dspace_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(subgroup_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(dspace_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Tclose(dtype_id);
+ H5Gclose(group_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that the initialization and termination
+ * functions of a VOL connector can be called multiple times
+ * in a row.
+ *
+ * TODO: Not sure if this test can be done from public APIs
+ * at the moment.
+ */
+static int
+test_double_init_term(void)
+{
+ TESTING("double init/term correctness");
+
+ SKIPPED();
+
+ return 0;
+
+#if 0
+error:
+ return 1;
+#endif
+}
+
+static int
+test_symbols_in_compound_field_name(void)
+{
+ size_t i;
+ size_t total_type_size;
+ size_t next_offset;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t compound_type = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t type_pool[COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES];
+ char member_names[COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES][256];
+
+ TESTING("usage of '{', '}' and '\\\"' symbols in compound field name");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ for (i = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++)
+ type_pool[i] = H5I_INVALID_HID;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file\n");
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group\n");
+ goto error;
+ }
+
+ for (i = 0, total_type_size = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++) {
+ type_pool[i] = generate_random_datatype(H5T_NO_CLASS, FALSE);
+ total_type_size += H5Tget_size(type_pool[i]);
+ }
+
+ HDsnprintf(member_names[0], 256, "{{{ member0");
+ HDsnprintf(member_names[1], 256, "member1 }}}");
+ HDsnprintf(member_names[2], 256, "{{{ member2 }}");
+ HDsnprintf(member_names[3], 256, "{{ member3 }}}");
+ HDsnprintf(member_names[4], 256, "\\\"member4");
+ HDsnprintf(member_names[5], 256, "member5\\\"");
+ HDsnprintf(member_names[6], 256, "mem\\\"ber6");
+ HDsnprintf(member_names[7], 256, "{{ member7\\\" }");
+ HDsnprintf(member_names[8], 256, "{{ member8\\\\");
+
+ if ((compound_type = H5Tcreate(H5T_COMPOUND, total_type_size)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create compound datatype\n");
+ goto error;
+ }
+
+ for (i = 0, next_offset = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++) {
+ if (H5Tinsert(compound_type, member_names[i], next_offset, type_pool[i]) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't insert compound member %zu\n", i);
+ goto error;
+ }
+
+ next_offset += H5Tget_size(type_pool[i]);
+ }
+
+ if (H5Tpack(compound_type) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = generate_random_dataspace(COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_RANK, NULL,
+ NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_NAME, compound_type,
+ fspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset\n");
+ goto error;
+ }
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dopen2(group_id, COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset\n");
+ goto error;
+ }
+
+ for (i = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++)
+ if (type_pool[i] >= 0 && H5Tclose(type_pool[i]) < 0)
+ TEST_ERROR;
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(compound_type) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ for (i = 0; i < COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES; i++)
+ H5Tclose(type_pool[i]);
+ H5Sclose(fspace_id);
+ H5Tclose(compound_type);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+int
+H5_api_misc_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Miscellaneous Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(misc_tests); i++) {
+ nerrors += (*misc_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/test/API/H5_api_misc_test.h b/test/API/H5_api_misc_test.h
new file mode 100644
index 0000000..8729db7
--- /dev/null
+++ b/test/API/H5_api_misc_test.h
@@ -0,0 +1,52 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_MISC_TEST_H
+#define H5_API_MISC_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_misc_test(void);
+
+/******************************************************
+ * *
+ * API Miscellaneous test defines *
+ * *
+ ******************************************************/
+
+#define OPEN_LINK_WITHOUT_SLASH_DSET_SPACE_RANK 2
+#define OPEN_LINK_WITHOUT_SLASH_DSET_NAME "link_without_slash_test_dset"
+
+#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_CONTAINER_GROUP_NAME "absolute_path_test_container_group"
+#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_SUBGROUP_NAME "absolute_path_test_subgroup"
+#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DTYPE_NAME "absolute_path_test_dtype"
+#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_NAME "absolute_path_test_dset"
+#define OBJECT_CREATE_BY_ABSOLUTE_PATH_TEST_DSET_SPACE_RANK 3
+
+#define ABSOLUTE_VS_RELATIVE_PATH_TEST_CONTAINER_GROUP_NAME "absolute_vs_relative_test_container_group"
+#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET1_NAME "absolute_vs_relative_test_dset1"
+#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET2_NAME "absolute_vs_relative_test_dset2"
+#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET3_NAME "absolute_vs_relative_test_dset3"
+#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET4_NAME "absolute_vs_relative_test_dset4"
+#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET5_NAME "absolute_vs_relative_test_dset5"
+#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET6_NAME "absolute_vs_relative_test_dset6"
+#define ABSOLUTE_VS_RELATIVE_PATH_TEST_DSET_SPACE_RANK 3
+
+#define DOT_AS_OBJECT_NAME_TEST_SUBGROUP_NAME "dot_as_object_name_test"
+
+#define COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_SUBGROUP_NAME \
+ "compound_type_with_symbols_in_member_names_test"
+#define COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_NUM_SUBTYPES 9
+#define COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_RANK 2
+#define COMPOUND_WITH_SYMBOLS_IN_MEMBER_NAMES_TEST_DSET_NAME "dset"
+
+#endif
diff --git a/test/API/H5_api_object_test.c b/test/API/H5_api_object_test.c
new file mode 100644
index 0000000..e054356
--- /dev/null
+++ b/test/API/H5_api_object_test.c
@@ -0,0 +1,7172 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_object_test.h"
+
+static int test_open_object(void);
+static int test_open_object_invalid_params(void);
+static int test_object_exists(void);
+static int test_object_exists_invalid_params(void);
+static int test_get_object_info(void);
+static int test_get_object_info_invalid_params(void);
+static int test_link_object(void);
+static int test_link_object_invalid_params(void);
+static int test_incr_decr_object_refcount(void);
+static int test_incr_decr_object_refcount_invalid_params(void);
+static int test_object_copy_basic(void);
+static int test_object_copy_already_existing(void);
+static int test_object_copy_shallow_group_copy(void);
+static int test_object_copy_no_attributes(void);
+static int test_object_copy_by_soft_link(void);
+static int test_object_copy_group_with_soft_links(void);
+static int test_object_copy_between_files(void);
+static int test_object_copy_invalid_params(void);
+static int test_object_comments(void);
+static int test_object_comments_invalid_params(void);
+static int test_object_visit(void);
+static int test_object_visit_soft_link(void);
+static int test_object_visit_invalid_params(void);
+static int test_close_object(void);
+static int test_close_object_invalid_params(void);
+static int test_close_invalid_objects(void);
+static int test_flush_object(void);
+static int test_flush_object_invalid_params(void);
+static int test_refresh_object(void);
+static int test_refresh_object_invalid_params(void);
+
+static herr_t object_copy_attribute_iter_callback(hid_t location_id, const char *attr_name,
+ const H5A_info_t *ainfo, void *op_data);
+static herr_t object_copy_soft_link_non_expand_callback(hid_t group, const char *name,
+ const H5L_info2_t *info, void *op_data);
+static herr_t object_copy_soft_link_expand_callback(hid_t group, const char *name, const H5L_info2_t *info,
+ void *op_data);
+static herr_t object_visit_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info,
+ void *op_data);
+static herr_t object_visit_dset_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info,
+ void *op_data);
+static herr_t object_visit_dtype_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info,
+ void *op_data);
+static herr_t object_visit_soft_link_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info,
+ void *op_data);
+static herr_t object_visit_noop_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info,
+ void *op_data);
+
+/*
+ * The array of object tests to be performed.
+ */
+static int (*object_tests[])(void) = {
+ test_open_object,
+ test_open_object_invalid_params,
+ test_object_exists,
+ test_object_exists_invalid_params,
+ test_get_object_info,
+ test_get_object_info_invalid_params,
+ test_link_object,
+ test_link_object_invalid_params,
+ test_incr_decr_object_refcount,
+ test_incr_decr_object_refcount_invalid_params,
+ test_object_copy_basic,
+ test_object_copy_already_existing,
+ test_object_copy_shallow_group_copy,
+ test_object_copy_no_attributes,
+ test_object_copy_by_soft_link,
+ test_object_copy_group_with_soft_links,
+ test_object_copy_between_files,
+ test_object_copy_invalid_params,
+ test_object_comments,
+ test_object_comments_invalid_params,
+ test_object_visit,
+ test_object_visit_soft_link,
+ test_object_visit_invalid_params,
+ test_close_object,
+ test_close_object_invalid_params,
+ test_close_invalid_objects,
+ test_flush_object,
+ test_flush_object_invalid_params,
+ test_refresh_object,
+ test_refresh_object_invalid_params,
+};
+
+/*
+ * A test to check that various objects (group, dataset, datatype)
+ * can be opened by using H5Oopen, H5Oopen_by_idx and H5Oopen_by_addr.
+ *
+ * XXX: create separate objects for each test part.
+ *
+ * XXX: Add more open by idx tests
+ *
+ * XXX: test opening through dangling and resolving soft links.
+ */
+static int
+test_open_object(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object opening");
+
+ TESTING_2("test setup");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, dataset, or stored datatype aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_OPEN_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_OPEN_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(OBJECT_OPEN_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oopen_group)
+ {
+ TESTING_2("H5Oopen on a group");
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_OPEN_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_OPEN_TEST_GRP_NAME);
+ PART_ERROR(H5Oopen_group);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ }
+ H5E_END_TRY;
+
+ if ((group_id2 = H5Oopen(group_id, OBJECT_OPEN_TEST_GRP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group '%s' with H5Oopen\n", OBJECT_OPEN_TEST_GRP_NAME);
+ PART_ERROR(H5Oopen_group);
+ }
+
+ if (H5Iget_type(group_id2) != H5I_GROUP) {
+ H5_FAILED();
+ HDprintf(" ID is not a group\n");
+ PART_ERROR(H5Oopen_group);
+ }
+
+ if (H5Gclose(group_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group opened with H5Oopen\n");
+ PART_ERROR(H5Oopen_group);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_group);
+
+ PART_BEGIN(H5Oopen_dset)
+ {
+ TESTING_2("H5Oopen on a dataset");
+
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_OPEN_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_OPEN_TEST_DSET_NAME);
+ PART_ERROR(H5Oopen_dset);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+
+ if ((dset_id = H5Oopen(group_id, OBJECT_OPEN_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s' with H5Oopen\n", OBJECT_OPEN_TEST_DSET_NAME);
+ PART_ERROR(H5Oopen_dset);
+ }
+
+ if (H5Iget_type(dset_id) != H5I_DATASET) {
+ H5_FAILED();
+ HDprintf(" ID is not a dataset\n");
+ PART_ERROR(H5Oopen_dset);
+ }
+
+ if (H5Dclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close dataset opened with H5Oopen\n");
+ PART_ERROR(H5Oopen_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_dset);
+
+ PART_BEGIN(H5Oopen_dtype)
+ {
+ TESTING_2("H5Oopen on a committed datatype");
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype '%s'\n", OBJECT_OPEN_TEST_TYPE_NAME);
+ PART_ERROR(H5Oopen_dtype);
+ }
+
+ if (H5Tcommit2(group_id, OBJECT_OPEN_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_OPEN_TEST_TYPE_NAME);
+ PART_ERROR(H5Oopen_dtype);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ }
+ H5E_END_TRY;
+
+ if ((type_id = H5Oopen(group_id, OBJECT_OPEN_TEST_TYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open datatype '%s' with H5Oopen\n", OBJECT_OPEN_TEST_TYPE_NAME);
+ PART_ERROR(H5Oopen_dtype);
+ }
+
+ if (H5Iget_type(type_id) != H5I_DATATYPE) {
+ H5_FAILED();
+ HDprintf(" ID is not a dataset\n");
+ PART_ERROR(H5Oopen_dtype);
+ }
+
+ if (H5Tclose(type_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close committed datatype opened with H5Oopen\n");
+ PART_ERROR(H5Oopen_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_dtype);
+
+ if (group_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ }
+ H5E_END_TRY;
+ group_id2 = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ if (type_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ }
+ H5E_END_TRY;
+ type_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Oopen_by_idx_group)
+ {
+ TESTING_2("H5Oopen_by_idx on a group");
+
+ if ((group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group '%s' with H5Oopen_by_idx\n", OBJECT_OPEN_TEST_GRP_NAME);
+ PART_ERROR(H5Oopen_by_idx_group);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_idx_group);
+
+ PART_BEGIN(H5Oopen_by_idx_dset)
+ {
+ TESTING_2("H5Oopen_by_idx on a dataset");
+
+ if ((dset_id = H5Oopen_by_idx(container_group, OBJECT_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 0, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s' with H5Oopen_by_idx\n", OBJECT_OPEN_TEST_DSET_NAME);
+ PART_ERROR(H5Oopen_by_idx_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_idx_dset);
+
+ PART_BEGIN(H5Oopen_by_idx_dtype)
+ {
+ TESTING_2("H5Oopen_by_idx on a committed datatype");
+
+ if ((type_id = H5Oopen_by_idx(container_group, OBJECT_OPEN_TEST_GROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, 2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open committed datatype '%s' with H5Oopen_by_idx\n",
+ OBJECT_OPEN_TEST_TYPE_NAME);
+ PART_ERROR(H5Oopen_by_idx_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_idx_dtype);
+
+ if (group_id2 >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ }
+ H5E_END_TRY;
+ group_id2 = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ if (type_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(type_id);
+ }
+ H5E_END_TRY;
+ type_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(type_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that various objects (group, dataset, datatype)
+ * can't be opened when H5Oopen, H5Oopen_by_idx and H5Oopen_by_addr
+ * are passed invalid parameters.
+ */
+static int
+test_open_object_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object opening with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, or creation order aren't supported with "
+ "this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oopen_invalid_loc_id)
+ {
+ TESTING_2("H5Oopen with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen(H5I_INVALID_HID, OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen succeeded with an invalid location ID!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_invalid_loc_id);
+
+ PART_BEGIN(H5Oopen_invalid_obj_name)
+ {
+ TESTING_2("H5Oopen with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen(group_id, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen succeeded with a NULL object name!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen(group_id, "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen succeeded with an invalid object name of ''!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_invalid_obj_name);
+
+ PART_BEGIN(H5Oopen_invalid_lapl)
+ {
+ TESTING_2("H5Oopen with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen(group_id, OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen succeeded with an invalid LAPL!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_invalid_lapl);
+
+ PART_BEGIN(H5Oopen_by_idx_invalid_loc_id)
+ {
+ TESTING_2("H5Oopen_by_idx with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_idx(H5I_INVALID_HID, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_idx succeeded with an invalid location ID!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_idx_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_idx_invalid_loc_id);
+
+ PART_BEGIN(H5Oopen_by_idx_invalid_grp_name)
+ {
+ TESTING_2("H5Oopen_by_idx with an invalid group name");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_idx(container_group, NULL, H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_idx succeeded with a NULL group name!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_idx_invalid_grp_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_idx(container_group, "", H5_INDEX_NAME, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_idx succeeded with an invalid group name of ''!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_idx_invalid_grp_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_idx_invalid_grp_name);
+
+ PART_BEGIN(H5Oopen_by_idx_invalid_index_type)
+ {
+ TESTING_2("H5Oopen_by_idx with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_UNKNOWN, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_idx succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_idx_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_N, H5_ITER_INC, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_idx succeeded with invalid index type H5_INDEX_N!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_idx_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_idx_invalid_index_type);
+
+ PART_BEGIN(H5Oopen_by_idx_invalid_iter_order)
+ {
+ TESTING_2("H5Oopen_by_idx with an invalid iteration order");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_UNKNOWN, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(
+ " H5Oopen_by_idx succeeded with an invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_idx_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_N, 0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_idx succeeded with an invalid iteration ordering H5_ITER_N!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_idx_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_idx_invalid_iter_order);
+
+ PART_BEGIN(H5Oopen_by_idx_invalid_lapl)
+ {
+ TESTING_2("H5Oopen_by_idx with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_idx(container_group, OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME,
+ H5_INDEX_NAME, H5_ITER_INC, 0, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_idx succeeded with an invalid LAPL!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_idx_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_idx_invalid_lapl);
+
+ PART_BEGIN(H5Oopen_by_token_invalid_loc_id)
+ {
+ TESTING_2("H5Oopen_by_token with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_token(H5I_INVALID_HID, H5O_TOKEN_UNDEF);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_token succeeded with an invalid location ID!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_token_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_token_invalid_loc_id);
+
+ PART_BEGIN(H5Oopen_by_token_invalid_token)
+ {
+ TESTING_2("H5Oopen_by_token with an invalid token");
+
+ H5E_BEGIN_TRY
+ {
+ group_id2 = H5Oopen_by_token(file_id, H5O_TOKEN_UNDEF);
+ }
+ H5E_END_TRY;
+
+ if (group_id2 >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oopen_by_token succeeded with an invalid token!\n");
+ H5Gclose(group_id2);
+ PART_ERROR(H5Oopen_by_token_invalid_token);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oopen_by_token_invalid_token);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Oexists_by_name.
+ */
+static int
+test_object_exists(void)
+{
+ htri_t object_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object existence");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, dataset, stored datatype or soft link "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_EXISTS_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_EXISTS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(OBJECT_EXISTS_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ /*
+ * NOTE: H5Oexists_by_name for hard links should always succeed.
+ * H5Oexists_by_name for a soft link may fail if the link doesn't resolve.
+ */
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oexists_by_name_group)
+ {
+ TESTING_2("H5Oexists_by_name on a group");
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_EXISTS_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_EXISTS_TEST_GRP_NAME);
+ PART_ERROR(H5Oexists_by_name_group);
+ }
+
+ if ((object_exists = H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_GRP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if object '%s' exists\n", OBJECT_EXISTS_TEST_GRP_NAME);
+ PART_ERROR(H5Oexists_by_name_group);
+ }
+
+ if (!object_exists) {
+ H5_FAILED();
+ HDprintf(" object '%s' didn't exist!\n", OBJECT_EXISTS_TEST_GRP_NAME);
+ PART_ERROR(H5Oexists_by_name_group);
+ }
+
+ if (H5Gclose(group_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group\n");
+ PART_ERROR(H5Oexists_by_name_group);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oexists_by_name_group);
+
+ PART_BEGIN(H5Oexists_by_name_dset)
+ {
+ TESTING_2("H5Oexists_by_name on a dataset");
+
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_EXISTS_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_EXISTS_TEST_DSET_NAME);
+ PART_ERROR(H5Oexists_by_name_dset);
+ }
+
+ if ((object_exists = H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_DSET_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if object '%s' exists\n", OBJECT_EXISTS_TEST_DSET_NAME);
+ PART_ERROR(H5Oexists_by_name_dset);
+ }
+
+ if (!object_exists) {
+ H5_FAILED();
+ HDprintf(" object '%s' didn't exist!\n", OBJECT_EXISTS_TEST_DSET_NAME);
+ PART_ERROR(H5Oexists_by_name_dset);
+ }
+
+ if (H5Dclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close dataset\n");
+ PART_ERROR(H5Oexists_by_name_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oexists_by_name_dset);
+
+ PART_BEGIN(H5Oexists_by_name_dtype)
+ {
+ TESTING_2("H5Oexists_by_name on a committed datatype");
+
+ if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype '%s'\n", OBJECT_EXISTS_TEST_TYPE_NAME);
+ PART_ERROR(H5Oexists_by_name_dtype);
+ }
+
+ if (H5Tcommit2(group_id, OBJECT_EXISTS_TEST_TYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_EXISTS_TEST_TYPE_NAME);
+ PART_ERROR(H5Oexists_by_name_dtype);
+ }
+
+ if ((object_exists = H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_TYPE_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if object '%s' exists\n", OBJECT_EXISTS_TEST_TYPE_NAME);
+ PART_ERROR(H5Oexists_by_name_dtype);
+ }
+
+ if (!object_exists) {
+ H5_FAILED();
+ HDprintf(" object '%s' didn't exist!\n", OBJECT_EXISTS_TEST_TYPE_NAME);
+ PART_ERROR(H5Oexists_by_name_dtype);
+ }
+
+ if (H5Tclose(dtype_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close datatype\n");
+ PART_ERROR(H5Oexists_by_name_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oexists_by_name_dtype);
+
+ PART_BEGIN(H5Oexists_by_name_soft_link)
+ {
+ TESTING_2("H5Oexists_by_name for a soft link");
+
+ if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_EXISTS_TEST_SUBGROUP_NAME, group_id,
+ OBJECT_EXISTS_TEST_SOFT_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", OBJECT_EXISTS_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Oexists_by_name_soft_link);
+ }
+
+ if ((object_exists =
+ H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_SOFT_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if object '%s' exists\n", OBJECT_EXISTS_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Oexists_by_name_soft_link);
+ }
+
+ if (!object_exists) {
+ H5_FAILED();
+ HDprintf(" object '%s' didn't exist!\n", OBJECT_EXISTS_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Oexists_by_name_soft_link);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oexists_by_name_soft_link);
+
+ PART_BEGIN(H5Oexists_by_name_dangling_soft_link)
+ {
+ TESTING_2("H5Oexists_by_name for a dangling soft link");
+
+ if (H5Lcreate_soft(
+ "/" OBJECT_TEST_GROUP_NAME "/" OBJECT_EXISTS_TEST_SUBGROUP_NAME "/non_existent_object",
+ group_id, OBJECT_EXISTS_TEST_DANGLING_LINK_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", OBJECT_EXISTS_TEST_DANGLING_LINK_NAME);
+ PART_ERROR(H5Oexists_by_name_dangling_soft_link);
+ }
+
+ if ((object_exists =
+ H5Oexists_by_name(group_id, OBJECT_EXISTS_TEST_DANGLING_LINK_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if object '%s' exists\n",
+ "/" OBJECT_TEST_GROUP_NAME "/" OBJECT_EXISTS_TEST_SUBGROUP_NAME
+ "/non_existent_object");
+ PART_ERROR(H5Oexists_by_name_dangling_soft_link);
+ }
+
+ if (object_exists) {
+ H5_FAILED();
+ HDprintf(" object pointed to by dangling soft link should not have existed!\n");
+ PART_ERROR(H5Oexists_by_name_dangling_soft_link);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oexists_by_name_dangling_soft_link);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(dtype_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Oexists_by_name fails
+ * when it is passed invalid parameters.
+ */
+static int
+test_object_exists_invalid_params(void)
+{
+ htri_t object_exists;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object existence with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or object aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_EXISTS_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ OBJECT_EXISTS_INVALID_PARAMS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME);
+ goto error;
+ }
+
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oexists_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Oexists_by_name with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ object_exists = H5Oexists_by_name(H5I_INVALID_HID, OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (object_exists >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oexists_by_name succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Oexists_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oexists_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Oexists_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Oexists_by_name with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ object_exists = H5Oexists_by_name(group_id, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (object_exists >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oexists_by_name succeeded with a NULL object name!\n");
+ PART_ERROR(H5Oexists_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ object_exists = H5Oexists_by_name(group_id, "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (object_exists >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oexists_by_name succeeded with an invalid object name of ''!\n");
+ PART_ERROR(H5Oexists_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oexists_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Oexists_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Oexists_by_name with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ object_exists =
+ H5Oexists_by_name(group_id, OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (object_exists >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oexists_by_name succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Oexists_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oexists_by_name_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Oget_info(_by_name/_by_idx).
+ */
+static int
+test_get_object_info(void)
+{
+ TESTING("object info retrieval");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that an object's info can't be retrieved
+ * when H5Oget_info(_by_name/_by_idx) are passed invalid
+ * parameters.
+ */
+static int
+test_get_object_info_invalid_params(void)
+{
+ TESTING("object info retrieval with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test for H5Olink.
+ */
+static int
+test_link_object(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object linking");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, dataset, or stored datatype aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(OBJECT_LINK_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Olink_group)
+ {
+ TESTING_2("H5Olink an anonymous group");
+
+ if ((group_id2 = H5Gcreate_anon(group_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create an anonymous group\n");
+ PART_ERROR(H5Olink_group);
+ }
+
+ if (H5Olink(group_id2, group_id, OBJECT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't link the anonymous group\n");
+ PART_ERROR(H5Olink_group);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Olink_group);
+
+ PART_BEGIN(H5Olink_dataset)
+ {
+ TESTING_2("H5Olink an anonymous dataset");
+
+ if ((dset_id = H5Dcreate_anon(group_id, dset_dtype, fspace_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create an anonymous dataset\n");
+ PART_ERROR(H5Olink_dataset);
+ }
+
+ if (H5Olink(dset_id, group_id, OBJECT_LINK_TEST_DSET_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't link the anonymous dataset\n");
+ PART_ERROR(H5Olink_dataset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Olink_dataset);
+
+ PART_BEGIN(H5Olink_datatype)
+ {
+ TESTING_2("H5Olink an anonymous datatype");
+
+ if (H5Tcommit_anon(group_id, dset_dtype, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create an anonymous datatype\n");
+ PART_ERROR(H5Olink_datatype);
+ }
+
+ if (H5Olink(dset_dtype, group_id, OBJECT_LINK_TEST_DTYPE_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't link the anonymous datatype\n");
+ PART_ERROR(H5Olink_datatype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Olink_datatype);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(type_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that an object can't be linked into
+ * the file structure when H5Olink is passed invalid
+ * parameters.
+ */
+static int
+test_link_object_invalid_params(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ herr_t status;
+
+ TESTING_MULTIPART("object linking with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or object aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_LINK_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id2 = H5Gcreate_anon(group_id, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create an anonymous group\n");
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Olink_invalid_object_id)
+ {
+ TESTING_2("H5Olink with an invalid object ID");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Olink(H5I_INVALID_HID, group_id, OBJECT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Olink succeeded with an invalid object ID!\n");
+ PART_ERROR(H5Olink_invalid_object_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Olink_invalid_object_id);
+
+ PART_BEGIN(H5Olink_invalid_location)
+ {
+ TESTING_2("H5Olink with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Olink(group_id2, H5I_INVALID_HID, OBJECT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Olink succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Olink_invalid_location);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Olink_invalid_location);
+
+ PART_BEGIN(H5Olink_invalid_name)
+ {
+ TESTING_2("H5Olink with an invalid name");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Olink(group_id2, group_id, NULL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Olink succeeded with NULL as the object name!\n");
+ PART_ERROR(H5Olink_invalid_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Olink(group_id2, group_id, "", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Olink succeeded with an invalid object name of ''!\n");
+ PART_ERROR(H5Olink_invalid_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Olink_invalid_name);
+
+ PART_BEGIN(H5Olink_invalid_lcpl)
+ {
+ TESTING_2("H5Olink with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ status =
+ H5Olink(group_id2, group_id, OBJECT_LINK_TEST_GROUP_NAME2, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Olink succeeded with an invalid LCPL!\n");
+ PART_ERROR(H5Olink_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Olink_invalid_lcpl);
+
+ PART_BEGIN(H5Olink_invalid_lapl)
+ {
+ TESTING_2("H5Olink with an invalid LAPL");
+#ifndef NO_INVALID_PROPERTY_LIST_TESTS
+ H5E_BEGIN_TRY
+ {
+ status =
+ H5Olink(group_id2, group_id, OBJECT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Olink succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Olink_invalid_lapl);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Olink_invalid_lapl);
+#endif
+ }
+ PART_END(H5Olink_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Oincr_refcount/H5Odecr_refcount.
+ */
+static int
+test_incr_decr_object_refcount(void)
+{
+ H5O_info2_t oinfo; /* Object info struct */
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("increment/decrement the reference count of object");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, stored datatype, basic or more object "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_REF_COUNT_TEST_SUBGROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_REF_COUNT_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(OBJECT_REF_COUNT_TEST_DSET_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oincr_decr_refcount_group)
+ {
+ TESTING_2("H5Oincr_refcount/H5Odecr_refcount on a group");
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_REF_COUNT_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_REF_COUNT_TEST_GRP_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_group);
+ }
+
+ /* Increment the reference count */
+ if (H5Oincr_refcount(group_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't increment reference count for the group '%s' \n",
+ OBJECT_REF_COUNT_TEST_GRP_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_group);
+ }
+
+ /* Verify that reference count is 2 now */
+ if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_GRP_NAME, &oinfo, H5O_INFO_BASIC,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get reference count for the group '%s' \n",
+ OBJECT_REF_COUNT_TEST_GRP_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_group);
+ }
+
+ if (oinfo.rc != 2) {
+ H5_FAILED();
+ HDprintf(" the reference count for the group '%s' isn't 2: %d\n",
+ OBJECT_REF_COUNT_TEST_GRP_NAME, oinfo.rc);
+ PART_ERROR(H5Oincr_decr_refcount_group);
+ }
+
+ /* Decrement the reference count */
+ if (H5Odecr_refcount(group_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't decrement reference count for the group '%s' \n",
+ OBJECT_REF_COUNT_TEST_GRP_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_group);
+ }
+
+ /* Verify that reference count is 1 now */
+ if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_GRP_NAME, &oinfo, H5O_INFO_BASIC,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get reference count for the group '%s' \n",
+ OBJECT_REF_COUNT_TEST_GRP_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_group);
+ }
+
+ if (oinfo.rc != 1) {
+ H5_FAILED();
+ HDprintf(" the reference count for the group '%s' isn't 1: %d\n",
+ OBJECT_REF_COUNT_TEST_GRP_NAME, oinfo.rc);
+ PART_ERROR(H5Oincr_decr_refcount_group);
+ }
+
+ if (H5Gclose(group_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group\n");
+ PART_ERROR(H5Oincr_decr_refcount_group);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oincr_decr_refcount_group);
+
+ PART_BEGIN(H5Oincr_decr_refcount_dset)
+ {
+ TESTING_2("H5Oincr_refcount/H5Odecr_refcount on a dataset");
+
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_REF_COUNT_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_REF_COUNT_TEST_DSET_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dset);
+ }
+
+ /* Increment the reference count */
+ if (H5Oincr_refcount(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't increment reference count for the dataset '%s' \n",
+ OBJECT_REF_COUNT_TEST_DSET_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dset);
+ }
+
+ /* Verify that reference count is 2 now */
+ if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_DSET_NAME, &oinfo, H5O_INFO_BASIC,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get reference count for the dataset '%s' \n",
+ OBJECT_REF_COUNT_TEST_DSET_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dset);
+ }
+
+ if (oinfo.rc != 2) {
+ H5_FAILED();
+ HDprintf(" the reference count for the dataset '%s' isn't 2: %d\n",
+ OBJECT_REF_COUNT_TEST_DSET_NAME, oinfo.rc);
+ PART_ERROR(H5Oincr_decr_refcount_dset);
+ }
+
+ /* Decrement the reference count */
+ if (H5Odecr_refcount(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't decrement reference count for the dataset '%s' \n",
+ OBJECT_REF_COUNT_TEST_DSET_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dset);
+ }
+
+ /* Verify that reference count is 1 now */
+ if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_DSET_NAME, &oinfo, H5O_INFO_BASIC,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get reference count for the dataset '%s' \n",
+ OBJECT_REF_COUNT_TEST_DSET_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dset);
+ }
+
+ if (oinfo.rc != 1) {
+ H5_FAILED();
+ HDprintf(" the reference count for the dataset '%s' isn't 1: %d\n",
+ OBJECT_REF_COUNT_TEST_DSET_NAME, oinfo.rc);
+ PART_ERROR(H5Oincr_decr_refcount_dset);
+ }
+
+ if (H5Dclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close dataset\n");
+ PART_ERROR(H5Oincr_decr_refcount_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oincr_decr_refcount_dset);
+
+ PART_BEGIN(H5Oincr / decr_refcount_dtype)
+ {
+ TESTING_2("H5Oincr_refcount/H5Odecr_refcount on a committed datatype");
+
+ if (H5Tcommit2(group_id, OBJECT_REF_COUNT_TEST_TYPE_NAME, dset_dtype, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_REF_COUNT_TEST_TYPE_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dtype);
+ }
+
+ /* Increment the reference count */
+ if (H5Oincr_refcount(dset_dtype) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't increment reference count for the datatype '%s' \n",
+ OBJECT_REF_COUNT_TEST_TYPE_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dtype);
+ }
+
+ /* Verify that reference count is 2 now */
+ if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_TYPE_NAME, &oinfo, H5O_INFO_BASIC,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get reference count for the datatype '%s' \n",
+ OBJECT_REF_COUNT_TEST_TYPE_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dtype);
+ }
+
+ if (oinfo.rc != 2) {
+ H5_FAILED();
+ HDprintf(" the reference count for the datatype '%s' isn't 2: %d\n",
+ OBJECT_REF_COUNT_TEST_TYPE_NAME, oinfo.rc);
+ PART_ERROR(H5Oincr_decr_refcount_dtype);
+ }
+
+ /* Decrement the reference count */
+ if (H5Odecr_refcount(dset_dtype) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't decrement reference count for the datatype '%s' \n",
+ OBJECT_REF_COUNT_TEST_TYPE_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dtype);
+ }
+
+ /* Verify that reference count is 1 now */
+ if (H5Oget_info_by_name3(group_id, OBJECT_REF_COUNT_TEST_TYPE_NAME, &oinfo, H5O_INFO_BASIC,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get reference count for the datatype '%s' \n",
+ OBJECT_REF_COUNT_TEST_TYPE_NAME);
+ PART_ERROR(H5Oincr_decr_refcount_dtype);
+ }
+
+ if (oinfo.rc != 1) {
+ H5_FAILED();
+ HDprintf(" the reference count for the datatype '%s' isn't 1: %d\n",
+ OBJECT_REF_COUNT_TEST_TYPE_NAME, oinfo.rc);
+ PART_ERROR(H5Oincr_decr_refcount_dtype);
+ }
+
+ if (H5Tclose(dset_dtype) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close datatype\n");
+ PART_ERROR(H5Oincr_decr_refcount_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oincr_decr_refcount_dtype);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Dclose(dset_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* test_incr_decr_object_refcount */
+
+/*
+ * A test to check that H5Oincr_refcount/H5Odecr_refcount
+ * fail when passed invalid parameters.
+ */
+static int
+test_incr_decr_object_refcount_invalid_params(void)
+{
+ herr_t status;
+
+ TESTING_MULTIPART("object reference count incr./decr. with an invalid parameter");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for more object aren't supported with this connector\n");
+ return 0;
+ }
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oincr_refcount_invalid_param)
+ {
+ TESTING_2("H5Oincr_refcount with invalid object ID");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Oincr_refcount(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" incremented the reference count for an invalid object ID\n");
+ PART_ERROR(H5Oincr_refcount_invalid_param);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oincr_refcount_invalid_param);
+
+ PART_BEGIN(H5Odecr_refcount_invalid_param)
+ {
+ TESTING_2("H5Odecr_refcount with invalid object ID");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Odecr_refcount(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" decremented the reference count for an invalid object ID\n");
+ PART_ERROR(H5Odecr_refcount_invalid_param);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Odecr_refcount_invalid_param);
+ }
+ END_MULTIPART;
+
+ return 0;
+
+error:
+ return 1;
+}
+
+/*
+ * Basic tests for H5Ocopy.
+ */
+static int
+test_object_copy_basic(void)
+{
+ H5O_info2_t object_info;
+ H5G_info_t group_info;
+ htri_t object_link_exists;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t tmp_group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t tmp_dset_id = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hid_t tmp_dtype_id = H5I_INVALID_HID;
+ hid_t tmp_attr_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("basic object copying");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, link, dataset, attribute, iterate, or "
+ "stored datatype aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_BASIC_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_COPY_BASIC_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(OBJECT_COPY_BASIC_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+ if ((attr_space_id = generate_random_dataspace(OBJECT_COPY_BASIC_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ /* Create the test group object, along with its nested members and the attributes attached to it. */
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_BASIC_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_BASIC_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS; i++) {
+ char grp_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE];
+
+ snprintf(grp_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, "grp%d", (int)i);
+
+ if ((tmp_group_id = H5Gcreate2(group_id2, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name,
+ OBJECT_COPY_BASIC_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create a further nested group under the last group added */
+ if (i == (OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS - 1)) {
+ if (H5Gclose(H5Gcreate2(tmp_group_id, OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create nested group '%s' under group '%s'\n",
+ OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME, grp_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_BASIC_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((tmp_attr_id = H5Acreate2(group_id2, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on group '%s'\n", attr_name,
+ OBJECT_COPY_BASIC_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(tmp_attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ /* Create the test dataset object, along with the attributes attached to it. */
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_COPY_BASIC_TEST_DSET_NAME, dset_dtype, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_COPY_BASIC_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_BASIC_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((tmp_attr_id = H5Acreate2(dset_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on dataset '%s'\n", attr_name,
+ OBJECT_COPY_BASIC_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(tmp_attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ /* Create the test committed datatype object, along with the attributes attached to it. */
+ if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, OBJECT_COPY_BASIC_TEST_DTYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_COPY_BASIC_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_BASIC_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((tmp_attr_id = H5Acreate2(dtype_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on committed datatype '%s'\n", attr_name,
+ OBJECT_COPY_BASIC_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(tmp_attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ocopy_group)
+ {
+ TESTING_2("H5Ocopy on a group (default copy options)");
+
+ if (H5Ocopy(group_id, OBJECT_COPY_BASIC_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy group '%s' to '%s'\n", OBJECT_COPY_BASIC_TEST_GROUP_NAME,
+ OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(group_id, OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied group exists\n",
+ OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied group didn't exist!\n",
+ OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ /* Ensure that the new group has all the members of the copied group, and all its attributes */
+ if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group copy '%s'\n", OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 0;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ if (group_info.nlinks != OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS) {
+ H5_FAILED();
+ HDprintf(" copied group contained %d members instead of %d members after a deep copy!\n",
+ (int)group_info.nlinks, OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS);
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 0;
+
+ if (H5Oget_info3(tmp_group_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ if (object_info.num_attrs == 0) {
+ H5_FAILED();
+ HDprintf(" copied group didn't contain any attributes after copy operation!\n");
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ /* Check the attribute names, types, etc. */
+ i = 0;
+ if (H5Aiterate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_attribute_iter_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over copied group's attributes\n");
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ if (i != OBJECT_COPY_BASIC_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(
+ " number of attributes on copied group (%llu) didn't match expected number (%llu)!\n",
+ (unsigned long long)i, (unsigned long long)OBJECT_COPY_BASIC_TEST_NUM_ATTRS);
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group copy\n");
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ /*
+ * Ensure that the last immediate member of the copied group
+ * contains its single member after the deep copy.
+ */
+ {
+ char grp_name[OBJECT_COPY_BASIC_TEST_BUF_SIZE];
+
+ snprintf(grp_name, OBJECT_COPY_BASIC_TEST_BUF_SIZE,
+ OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME "/grp%d",
+ OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS - 1);
+
+ if ((tmp_group_id = H5Gopen2(group_id, grp_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group '%s'\n",
+ OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 0;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ if (group_info.nlinks != 1) {
+ H5_FAILED();
+ HDprintf(" copied group's immediate members didn't contain nested members after a "
+ "deep copy!\n");
+ PART_ERROR(H5Ocopy_group);
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n",
+ OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_group);
+
+ if (tmp_group_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(tmp_group_id);
+ }
+ H5E_END_TRY;
+ tmp_group_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Ocopy_dset)
+ {
+ TESTING_2("H5Ocopy on a dataset (default copy options)");
+
+ if (H5Ocopy(group_id, OBJECT_COPY_BASIC_TEST_DSET_NAME, group_id,
+ OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy dataset '%s' to '%s'\n", OBJECT_COPY_BASIC_TEST_DSET_NAME,
+ OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(group_id, OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied dataset exists\n",
+ OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied dataset didn't exist!\n",
+ OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ /* Ensure that the new dataset has all of the attributes of the copied dataset */
+ if ((tmp_dset_id = H5Dopen2(group_id, OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset copy '%s'\n", OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 0;
+
+ if (H5Oget_info3(tmp_dset_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ if (object_info.num_attrs == 0) {
+ H5_FAILED();
+ HDprintf(" copied dataset didn't contain any attributes after copy operation!\n");
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ /* Check the attribute names, types, etc. */
+ i = 0;
+ if (H5Aiterate2(tmp_dset_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_attribute_iter_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over copied dataset's attributes\n");
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ if (i != OBJECT_COPY_BASIC_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" number of attributes on copied dataset (%llu) didn't match expected number "
+ "(%llu)!\n",
+ (unsigned long long)i, (unsigned long long)OBJECT_COPY_BASIC_TEST_NUM_ATTRS);
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ if (H5Dclose(tmp_dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close dataset copy\n");
+ PART_ERROR(H5Ocopy_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_dset);
+
+ if (tmp_dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(tmp_dset_id);
+ }
+ H5E_END_TRY;
+ tmp_dset_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Ocopy_dtype)
+ {
+ TESTING_2("H5Ocopy on a committed datatype (default copy options)");
+
+ if (H5Ocopy(group_id, OBJECT_COPY_BASIC_TEST_DTYPE_NAME, group_id,
+ OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy datatype '%s' to '%s'\n", OBJECT_COPY_BASIC_TEST_DTYPE_NAME,
+ OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(group_id, OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied datatype exists\n",
+ OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied datatype didn't exist!\n",
+ OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ /* Ensure that the new committed datatype has all the attributes of the copied datatype */
+ if ((tmp_dtype_id = H5Topen2(group_id, OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open datatype copy '%s'\n", OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 0;
+
+ if (H5Oget_info3(tmp_dtype_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ if (object_info.num_attrs == 0) {
+ H5_FAILED();
+ HDprintf(
+ " copied committed datatype didn't contain any attributes after copy operation!\n");
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ /* Check the attribute names, types, etc. */
+ i = 0;
+ if (H5Aiterate2(tmp_dtype_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_attribute_iter_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over copied datatype's attributes\n");
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ if (i != OBJECT_COPY_BASIC_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" number of attributes on copied datatype (%llu) didn't match expected number "
+ "(%llu)!\n",
+ (unsigned long long)i, (unsigned long long)OBJECT_COPY_BASIC_TEST_NUM_ATTRS);
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ if (H5Tclose(tmp_dtype_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close datatype copy\n");
+ PART_ERROR(H5Ocopy_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_dtype);
+
+ if (tmp_dtype_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(tmp_dtype_id);
+ }
+ H5E_END_TRY;
+ tmp_dtype_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dtype_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(attr_space_id);
+ H5Sclose(space_id);
+ H5Aclose(tmp_attr_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(tmp_dtype_id);
+ H5Tclose(dtype_id);
+ H5Dclose(tmp_dset_id);
+ H5Dclose(dset_id);
+ H5Gclose(tmp_group_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests to ensure that H5Ocopy fails when attempting to copy
+ * an object to a destination where the object already exists.
+ */
+static int
+test_object_copy_already_existing(void)
+{
+ herr_t err_ret;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object copying to location where objects already exist");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, dataset, or stored datatype aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_ALREADY_EXISTING_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ OBJECT_COPY_ALREADY_EXISTING_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id =
+ generate_random_dataspace(OBJECT_COPY_ALREADY_EXISTING_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ /* Create the test group object */
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create the test dataset object */
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME, dset_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME);
+ goto error;
+ }
+
+ /* Create the test committed datatype object */
+ if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ocopy_already_existing_group)
+ {
+ TESTING_2("H5Ocopy group to location where group already exists");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" group copy succeeded in location where group already exists!\n");
+ PART_ERROR(H5Ocopy_already_existing_group);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_already_existing_group);
+
+ PART_BEGIN(H5Ocopy_already_existing_dset)
+ {
+ TESTING_2("H5Ocopy dataset to location where dataset already exists");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME, group_id,
+ OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" dataset copy succeeded in location where dataset already exists!\n");
+ PART_ERROR(H5Ocopy_already_existing_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_already_existing_dset);
+
+ PART_BEGIN(H5Ocopy_already_existing_dtype)
+ {
+ TESTING_2("H5Ocopy committed datatype to location where committed datatype already exists");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME, group_id,
+ OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" committed datatype copy succeeded in location where committed datatype already "
+ "exists!\n");
+ PART_ERROR(H5Ocopy_already_existing_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_already_existing_dtype);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dtype_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(space_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(dtype_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to exercise the H5O_COPY_SHALLOW_HIERARCHY_FLAG flag
+ * for H5Ocopy.
+ */
+static int
+test_object_copy_shallow_group_copy(void)
+{
+ H5G_info_t group_info;
+ htri_t object_link_exists;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t tmp_group_id = H5I_INVALID_HID;
+ hid_t ocpypl_id = H5I_INVALID_HID;
+
+ TESTING("object copying with H5O_COPY_SHALLOW_HIERARCHY_FLAG flag");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, or link aren't supported with this "
+ "connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_SHALLOW_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_COPY_SHALLOW_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ /* Create the test group object, along with its nested members. */
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_SHALLOW_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_SHALLOW_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS; i++) {
+ char grp_name[OBJECT_COPY_SHALLOW_TEST_BUF_SIZE];
+
+ snprintf(grp_name, OBJECT_COPY_SHALLOW_TEST_BUF_SIZE, "grp%d", (int)i);
+
+ if ((tmp_group_id = H5Gcreate2(group_id2, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name,
+ OBJECT_COPY_SHALLOW_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create a further nested group under the last group added */
+ if (i == (OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS - 1)) {
+ if (H5Gclose(H5Gcreate2(tmp_group_id, OBJECT_COPY_SHALLOW_TEST_DEEP_NESTED_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create nested group '%s' under group '%s'\n",
+ OBJECT_COPY_SHALLOW_TEST_DEEP_NESTED_GROUP_NAME, grp_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create OCopyPL\n");
+ goto error;
+ }
+
+ if (H5Pset_copy_object(ocpypl_id, H5O_COPY_SHALLOW_HIERARCHY_FLAG) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set object copying options\n");
+ goto error;
+ }
+
+ if (H5Ocopy(group_id, OBJECT_COPY_SHALLOW_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME, ocpypl_id, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy group '%s' to '%s'\n", OBJECT_COPY_SHALLOW_TEST_GROUP_NAME,
+ OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME);
+ goto error;
+ }
+
+ if ((object_link_exists = H5Lexists(group_id, OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied group exists\n",
+ OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME);
+ goto error;
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied group didn't exist!\n", OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME);
+ goto error;
+ }
+
+ /*
+ * Ensure that the new group has only the immediate members of the copied group.
+ */
+ if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group copy '%s'\n", OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME);
+ goto error;
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 0;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ goto error;
+ }
+
+ if (group_info.nlinks != OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS) {
+ H5_FAILED();
+ HDprintf(" copied group contained %d members instead of %d members after a shallow copy!\n",
+ (int)group_info.nlinks, OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS);
+ goto error;
+ }
+
+ if (H5Gclose(tmp_group_id) < 0)
+ TEST_ERROR;
+
+ /*
+ * Ensure that the last immediate member of the copied group doesn't
+ * contain any members after the shallow copy.
+ */
+ {
+ char grp_name[OBJECT_COPY_SHALLOW_TEST_BUF_SIZE];
+
+ snprintf(grp_name, OBJECT_COPY_SHALLOW_TEST_BUF_SIZE,
+ OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME "/grp%d",
+ OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS - 1);
+
+ if ((tmp_group_id = H5Gopen2(group_id, grp_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group '%s'\n", grp_name);
+ goto error;
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to non-zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 1;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ goto error;
+ }
+
+ if (group_info.nlinks != 0) {
+ H5_FAILED();
+ HDprintf(" copied group's immediate members contained nested members after a shallow copy!\n");
+ goto error;
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ if (H5Pclose(ocpypl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(ocpypl_id);
+ H5Gclose(tmp_group_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests to exercise the H5O_COPY_WITHOUT_ATTR_FLAG flag
+ * of H5Ocopy.
+ */
+static int
+test_object_copy_no_attributes(void)
+{
+ H5O_info2_t object_info;
+ htri_t object_link_exists;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t tmp_group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t tmp_dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hid_t tmp_dtype_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t ocpypl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object copying with H5O_COPY_WITHOUT_ATTR_FLAG flag");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, link, dataset, attribute, or stored "
+ "datatype aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_NO_ATTRS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(OBJECT_COPY_NO_ATTRS_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+ if ((attr_space_id = generate_random_dataspace(OBJECT_COPY_NO_ATTRS_TEST_SPACE_RANK, NULL, NULL, TRUE)) <
+ 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ /* Create the test group object, along with the attributes attached to it. */
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_NO_ATTRS_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((attr_id = H5Acreate2(group_id2, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on group '%s'\n", attr_name,
+ OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ /* Create the test dataset object, along with the attributes attached to it. */
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME, dset_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_NO_ATTRS_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((attr_id = H5Acreate2(dset_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on dataset '%s'\n", attr_name,
+ OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ /* Create the test committed datatype object, along with the attributes attached to it. */
+ if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_NO_ATTRS_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((attr_id = H5Acreate2(dtype_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on committed datatype '%s'\n", attr_name,
+ OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ocopy_group_no_attributes)
+ {
+ TESTING_2("H5Ocopy on a group (without attributes)");
+
+ if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create OCopyPL\n");
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ if (H5Pset_copy_object(ocpypl_id, H5O_COPY_WITHOUT_ATTR_FLAG) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set object copying options\n");
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ if (H5Ocopy(group_id, OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME, ocpypl_id, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy group '%s' to '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME,
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied group exists\n",
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied group didn't exist!\n",
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ /* Ensure that the new group has no attributes */
+ if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group copy '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to non-zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 1;
+
+ if (H5Oget_info3(tmp_group_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ if (object_info.num_attrs != 0) {
+ H5_FAILED();
+ HDprintf(" copied group contained attributes after a non-attribute copy!\n");
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ if (H5Pclose(ocpypl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close OCopyPL\n");
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group copy\n");
+ PART_ERROR(H5Ocopy_group_no_attributes);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_group_no_attributes);
+
+ if (ocpypl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(ocpypl_id);
+ }
+ H5E_END_TRY;
+ ocpypl_id = H5I_INVALID_HID;
+ }
+ if (tmp_group_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(tmp_group_id);
+ }
+ H5E_END_TRY;
+ tmp_group_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Ocopy_dset_no_attributes)
+ {
+ TESTING_2("H5Ocopy on a dataset (without attributes)");
+
+ if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create OCopyPL\n");
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ if (H5Pset_copy_object(ocpypl_id, H5O_COPY_WITHOUT_ATTR_FLAG) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set object copying options\n");
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ if (H5Ocopy(group_id, OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME, group_id,
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME, ocpypl_id, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy dataset '%s' to '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME,
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied dataset exists\n",
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied dataset didn't exist!\n",
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ /* Ensure that the new dataset doesn't have any attributes */
+ if ((tmp_dset_id = H5Dopen2(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset copy '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to non-zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 1;
+
+ if (H5Oget_info3(tmp_dset_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ if (object_info.num_attrs != 0) {
+ H5_FAILED();
+ HDprintf(" copied dataset contained attributes after a non-attribute copy!\n");
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ if (H5Pclose(ocpypl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close OCopyPL\n");
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ if (H5Dclose(tmp_dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close dataset copy\n");
+ PART_ERROR(H5Ocopy_dset_no_attributes);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_dset_no_attributes);
+
+ if (ocpypl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(ocpypl_id);
+ }
+ H5E_END_TRY;
+ ocpypl_id = H5I_INVALID_HID;
+ }
+ if (tmp_dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(tmp_dset_id);
+ }
+ H5E_END_TRY;
+ tmp_dset_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Ocopy_dtype_no_attributes)
+ {
+ TESTING_2("H5Ocopy on a committed datatype (without attributes)");
+
+ if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create OCopyPL\n");
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ if (H5Pset_copy_object(ocpypl_id, H5O_COPY_WITHOUT_ATTR_FLAG) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set object copying options\n");
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ if (H5Ocopy(group_id, OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME, group_id,
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME, ocpypl_id, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy datatype '%s' to '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME,
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied datatype exists\n",
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied datatype didn't exist!\n",
+ OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ /* Ensure that the new committed datatype doesn't have any attributes */
+ if ((tmp_dtype_id = H5Topen2(group_id, OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset copy '%s'\n", OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to non-zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 1;
+
+ if (H5Oget_info3(tmp_dtype_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ if (object_info.num_attrs != 0) {
+ H5_FAILED();
+ HDprintf(" copied committed datatype contained attributes after a non-attribute copy!\n");
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ if (H5Pclose(ocpypl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close OCopyPL\n");
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ if (H5Tclose(tmp_dtype_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close datatype copy\n");
+ PART_ERROR(H5Ocopy_dtype_no_attributes);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_dtype_no_attributes);
+
+ if (ocpypl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(ocpypl_id);
+ }
+ H5E_END_TRY;
+ ocpypl_id = H5I_INVALID_HID;
+ }
+ if (tmp_dtype_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(tmp_dtype_id);
+ }
+ H5E_END_TRY;
+ tmp_dtype_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dtype_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(ocpypl_id);
+ H5Sclose(attr_space_id);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(tmp_dtype_id);
+ H5Tclose(dtype_id);
+ H5Dclose(tmp_dset_id);
+ H5Dclose(dset_id);
+ H5Gclose(tmp_group_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests to exercise the behavior of H5Ocopy when the source
+ * object specified is a soft link or dangling soft link.
+ */
+static int
+test_object_copy_by_soft_link(void)
+{
+ H5O_info2_t object_info;
+ H5G_info_t group_info;
+ H5L_info2_t link_info;
+ htri_t object_link_exists;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t tmp_group_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object copying through use of soft links");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, link, dataset, attribute, iterate, or "
+ "soft link aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((attr_space_id = generate_random_dataspace(OBJECT_COPY_SOFT_LINK_TEST_SPACE_RANK, NULL, NULL, TRUE)) <
+ 0)
+ TEST_ERROR;
+
+ /* Create the test group object, along with its nested members and the attributes attached to it. */
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS; i++) {
+ char grp_name[OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE];
+
+ snprintf(grp_name, OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE, "grp%d", (int)i);
+
+ if ((tmp_group_id = H5Gcreate2(group_id2, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name,
+ OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create a further nested group under the last group added */
+ if (i == (OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS - 1)) {
+ if (H5Gclose(H5Gcreate2(tmp_group_id, OBJECT_COPY_SOFT_LINK_TEST_DEEP_NESTED_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create nested group '%s' under group '%s'\n",
+ OBJECT_COPY_SOFT_LINK_TEST_DEEP_NESTED_GROUP_NAME, grp_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_SOFT_LINK_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((attr_id = H5Acreate2(group_id2, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on group '%s'\n", attr_name,
+ OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ocopy_through_soft_link)
+ {
+ TESTING_2("H5Ocopy through use of a soft link");
+
+ if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME
+ "/" OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME,
+ group_id, OBJECT_COPY_SOFT_LINK_TEST_SOFT_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s' to group for copying\n",
+ OBJECT_COPY_SOFT_LINK_TEST_SOFT_LINK_NAME);
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ if (H5Ocopy(group_id, OBJECT_COPY_SOFT_LINK_TEST_SOFT_LINK_NAME, group_id,
+ OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy group '%s' to '%s'\n", OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME,
+ OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(group_id, OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied group exists\n",
+ OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied group didn't exist!\n",
+ OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ /* Make sure the new object is an actual group and not another soft link */
+ memset(&link_info, 0, sizeof(link_info));
+ if (H5Lget_info2(group_id, OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME, &link_info, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve info for link '%s'\n",
+ OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ if (link_info.type != H5L_TYPE_HARD) {
+ H5_FAILED();
+ HDprintf(
+ " after group copy through soft link, group's new link type wasn't H5L_TYPE_HARD!\n");
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ /*
+ * Ensure that the new group doesn't have any attributes and only the
+ * immediate members of the copied group.
+ */
+ if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group copy '%s'\n", OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 0;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ if (group_info.nlinks != OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS) {
+ H5_FAILED();
+ HDprintf(
+ " copied group contained %d members instead of %d members after a shallow copy!\n",
+ (int)group_info.nlinks, OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS);
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 0;
+
+ if (H5Oget_info3(tmp_group_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ if (object_info.num_attrs == 0) {
+ H5_FAILED();
+ HDprintf(" copied group didn't contain any attributes after copy operation!\n");
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ /* Check the attribute names, types, etc. */
+ i = 0;
+ if (H5Aiterate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_attribute_iter_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over copied group's attributes\n");
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ if (i != OBJECT_COPY_SOFT_LINK_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(
+ " number of attributes on copied group (%llu) didn't match expected number (%llu)!\n",
+ (unsigned long long)i, (unsigned long long)OBJECT_COPY_SOFT_LINK_TEST_NUM_ATTRS);
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group copy\n");
+ PART_ERROR(H5Ocopy_through_soft_link);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_through_soft_link);
+
+ if (tmp_group_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(tmp_group_id);
+ }
+ H5E_END_TRY;
+ tmp_group_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Ocopy_through_dangling_soft_link)
+ {
+ herr_t err_ret;
+
+ TESTING_2("H5Ocopy through use of a dangling soft link");
+
+ if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME
+ "/nonexistent_object",
+ group_id, OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create dangling soft link '%s'\n",
+ OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME);
+ PART_ERROR(H5Ocopy_through_dangling_soft_link);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret =
+ H5Ocopy(group_id, OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME, group_id,
+ OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME "2", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" copied non-existent object through use of a dangling soft link!\n");
+ PART_ERROR(H5Ocopy_through_dangling_soft_link);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_through_dangling_soft_link);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(attr_space_id);
+ H5Aclose(attr_id);
+ H5Gclose(tmp_group_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests for copying groups that contain soft links with
+ * H5Ocopy. Also tested is the H5O_COPY_EXPAND_SOFT_LINK_FLAG
+ * flag.
+ */
+static int
+test_object_copy_group_with_soft_links(void)
+{
+ H5G_info_t group_info;
+ htri_t object_link_exists;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t tmp_group_id = H5I_INVALID_HID;
+ hid_t ocpypl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("group copying when group contains soft links");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, link, or soft link aren't supported with "
+ "this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ /* Create the test group object. */
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create several groups at the root level and add soft links pointing to them inside
+ * the test group object.
+ */
+ for (i = 0; i < (size_t)OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS; i++) {
+ char grp_name[OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE];
+ char lnk_name[OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE];
+ char lnk_target[2 * OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE];
+
+ snprintf(grp_name, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE, "grp%d", (int)i);
+ snprintf(lnk_name, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE, "link%d", (int)i);
+ snprintf(lnk_target, 2 * OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE,
+ "/" OBJECT_TEST_GROUP_NAME "/" OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME "/%s",
+ grp_name);
+
+ if ((tmp_group_id = H5Gcreate2(group_id, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name,
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if (H5Lcreate_soft(lnk_target, group_id2, lnk_name, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create soft link '%s'\n", lnk_name);
+ goto error;
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ocopy_dont_expand_soft_links)
+ {
+ TESTING_2("H5Ocopy on group with soft links (soft links not expanded)");
+
+ if (H5Ocopy(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy group '%s' to '%s'\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME,
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied group exists\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied group didn't exist!\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ /* Ensure that the number of links is the same */
+ if ((tmp_group_id =
+ H5Gopen2(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group copy '%s'\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 0;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ if (group_info.nlinks != OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS) {
+ H5_FAILED();
+ HDprintf(" copied group contained %d members instead of %d members after copy!\n",
+ (int)group_info.nlinks, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS);
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ /*
+ * Iterate over the links in the copied group and ensure that they're all
+ * still soft links with their original values.
+ */
+ i = 0;
+ if (H5Literate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_soft_link_non_expand_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over links in group '%s'\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ if (i != OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS) {
+ H5_FAILED();
+ HDprintf(" number of links in copied group (%llu) didn't match expected number (%llu)!\n",
+ (unsigned long long)i,
+ (unsigned long long)OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS);
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group copy\n");
+ PART_ERROR(H5Ocopy_dont_expand_soft_links);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_dont_expand_soft_links);
+
+ if (tmp_group_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(tmp_group_id);
+ }
+ H5E_END_TRY;
+ tmp_group_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Ocopy_expand_soft_links)
+ {
+ TESTING_2("H5Ocopy on group with soft links (soft links expanded)");
+
+ if ((ocpypl_id = H5Pcreate(H5P_OBJECT_COPY)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create OCopyPL\n");
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ if (H5Pset_copy_object(ocpypl_id, H5O_COPY_EXPAND_SOFT_LINK_FLAG) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set object copying options\n");
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ if (H5Ocopy(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME, ocpypl_id,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy group '%s' to '%s'\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME,
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ if ((object_link_exists = H5Lexists(
+ group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied group exists\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied group didn't exist!\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ /* Ensure that the number of links is the same */
+ if ((tmp_group_id = H5Gopen2(group_id, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group copy '%s'\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 0;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ if (group_info.nlinks != OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS) {
+ H5_FAILED();
+ HDprintf(" copied group contained %d members instead of %d members after copy!\n",
+ (int)group_info.nlinks, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS);
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ /*
+ * Iterate over the links in the copied group and ensure that they've all
+ * been expanded into hard links corresponding to the top-level groups
+ * created.
+ */
+ i = 0;
+ if (H5Literate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_soft_link_expand_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over links in group '%s'\n",
+ OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME);
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ if (i != OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS) {
+ H5_FAILED();
+ HDprintf(" number of links in copied group (%llu) didn't match expected number (%llu)!\n",
+ (unsigned long long)i,
+ (unsigned long long)OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS);
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ if (H5Pclose(ocpypl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close OCopyPL\n");
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group copy\n");
+ PART_ERROR(H5Ocopy_expand_soft_links);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_expand_soft_links);
+
+ if (ocpypl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(ocpypl_id);
+ }
+ H5E_END_TRY;
+ ocpypl_id = H5I_INVALID_HID;
+ }
+ if (tmp_group_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(tmp_group_id);
+ }
+ H5E_END_TRY;
+ tmp_group_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(ocpypl_id);
+ H5Gclose(tmp_group_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests for copying objects between two different files using
+ * H5Ocopy.
+ */
+static int
+test_object_copy_between_files(void)
+{
+ H5O_info2_t object_info;
+ H5G_info_t group_info;
+ htri_t object_link_exists;
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t file_id2 = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t tmp_group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t tmp_dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hid_t tmp_dtype_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t ocpypl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object copying between files");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, link, dataset, attribute, stored "
+ "datatype, or iterate aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ /*
+ * Create the second file for the between file copying tests.
+ */
+ if ((file_id2 = H5Fcreate(OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_BETWEEN_FILES_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((space_id = generate_random_dataspace(OBJECT_COPY_BETWEEN_FILES_TEST_SPACE_RANK, NULL, NULL, FALSE)) <
+ 0)
+ TEST_ERROR;
+ if ((attr_space_id =
+ generate_random_dataspace(OBJECT_COPY_BETWEEN_FILES_TEST_SPACE_RANK, NULL, NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ /* Create the test group object, along with its nested members and the attributes attached to it. */
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS; i++) {
+ char grp_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE];
+
+ snprintf(grp_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, "grp%d", (int)i);
+
+ if ((tmp_group_id = H5Gcreate2(group_id2, grp_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s' under group '%s'\n", grp_name,
+ OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /* Create a further nested group under the last group added */
+ if (i == (OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS - 1)) {
+ if (H5Gclose(H5Gcreate2(tmp_group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DEEP_NESTED_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create nested group '%s' under group '%s'\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_DEEP_NESTED_GROUP_NAME, grp_name);
+ goto error;
+ }
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", grp_name);
+ goto error;
+ }
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((attr_id = H5Acreate2(group_id2, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on group '%s'\n", attr_name,
+ OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ /* Create the test dataset object, along with the attributes attached to it. */
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME, dset_dtype, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((attr_id = H5Acreate2(dset_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on dataset '%s'\n", attr_name,
+ OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ /* Create the test committed datatype object, along with the attributes attached to it. */
+ if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype\n");
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS; i++) {
+ char attr_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE];
+
+ snprintf(attr_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE, "attr%d", (int)i);
+
+ if ((attr_id = H5Acreate2(dtype_id, attr_name, H5T_NATIVE_INT, attr_space_id, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create attribute '%s' on committed datatype '%s'\n", attr_name,
+ OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME);
+ goto error;
+ }
+
+ if (H5Aclose(attr_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close attribute '%s'\n", attr_name);
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ocopy_group_between_files)
+ {
+ TESTING_2("H5Ocopy on group between different files");
+
+ if (H5Ocopy(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME, file_id2,
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy group '%s' to second file '%s'\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME,
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied group exists\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied group in second file '%s' didn't exist!\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME,
+ OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME);
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ /* Ensure that the new group has all the members of the copied group, and all its attributes */
+ if ((tmp_group_id =
+ H5Gopen2(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group copy '%s'\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME);
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 0;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ if (group_info.nlinks != OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS) {
+ H5_FAILED();
+ HDprintf(" copied group contained %d members instead of %d members after a deep copy!\n",
+ (int)group_info.nlinks, OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS);
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 0;
+
+ if (H5Oget_info3(tmp_group_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ if (object_info.num_attrs == 0) {
+ H5_FAILED();
+ HDprintf(" copied group didn't contain any attributes after copy operation!\n");
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ /* Check the attribute names, types, etc. */
+ i = 0;
+ if (H5Aiterate2(tmp_group_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_attribute_iter_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over copied group's attributes\n");
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ if (i != OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(
+ " number of attributes on copied group (%llu) didn't match expected number (%llu)!\n",
+ (unsigned long long)i, (unsigned long long)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS);
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group copy\n");
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ /*
+ * Ensure that the last immediate member of the copied group
+ * contains its single member after the deep copy.
+ */
+ {
+ char grp_name[OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE];
+
+ snprintf(grp_name, OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE,
+ "/" OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME "/grp%d",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS - 1);
+
+ if ((tmp_group_id = H5Gopen2(file_id2, grp_name, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open group '%s'\n", grp_name);
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ memset(&group_info, 0, sizeof(group_info));
+
+ /*
+ * Set link count to zero in case the connector doesn't support
+ * retrieval of group info.
+ */
+ group_info.nlinks = 0;
+
+ if (H5Gget_info(tmp_group_id, &group_info) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve group info\n");
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ if (group_info.nlinks != 1) {
+ H5_FAILED();
+ HDprintf(" copied group's immediate members didn't contain nested members after a "
+ "deep copy!\n");
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+
+ if (H5Gclose(tmp_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close group '%s'\n", grp_name);
+ PART_ERROR(H5Ocopy_group_between_files);
+ }
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_group_between_files);
+
+ if (tmp_group_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(tmp_group_id);
+ }
+ H5E_END_TRY;
+ tmp_group_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Ocopy_dset_between_files)
+ {
+ TESTING_2("H5Ocopy on dataset between different files");
+
+ if (H5Ocopy(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME, file_id2,
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy dataset '%s' to second file '%s'\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME,
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied dataset exists\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied dataset in second file '%s' didn't exist!\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME,
+ OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME);
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ /* Ensure that the new dataset has all the attributes of the copied dataset */
+ if ((tmp_dset_id =
+ H5Dopen2(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open dataset copy '%s'\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME);
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 0;
+
+ if (H5Oget_info3(tmp_dset_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ if (object_info.num_attrs == 0) {
+ H5_FAILED();
+ HDprintf(" copied dataset didn't contain any attributes after copy operation!\n");
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ /* Check the attribute names, types, etc. */
+ i = 0;
+ if (H5Aiterate2(tmp_dset_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_attribute_iter_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over copied dataset's attributes\n");
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ if (i != OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" number of attributes on copied dataset (%llu) didn't match expected number "
+ "(%llu)!\n",
+ (unsigned long long)i, (unsigned long long)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS);
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ if (H5Dclose(tmp_dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close dataset copy\n");
+ PART_ERROR(H5Ocopy_dset_between_files);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_dset_between_files);
+
+ if (tmp_dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(tmp_dset_id);
+ }
+ H5E_END_TRY;
+ tmp_dset_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Ocopy_dtype_between_files)
+ {
+ TESTING_2("H5Ocopy on committed datatype between different files");
+
+ if (H5Ocopy(group_id, OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME, file_id2,
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to copy committed datatype '%s' to second file '%s'\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME,
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ if ((object_link_exists =
+ H5Lexists(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if link '%s' to copied committed datatype exists\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ if (!object_link_exists) {
+ H5_FAILED();
+ HDprintf(" link '%s' to copied committed datatype in second file '%s' didn't exist!\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME,
+ OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME);
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ /* Ensure that the new committed datatype has all the attributes of the copied committed datatype
+ */
+ if ((tmp_dtype_id =
+ H5Topen2(file_id2, OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to open committed datatype copy '%s'\n",
+ OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME);
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ memset(&object_info, 0, sizeof(object_info));
+
+ /*
+ * Set attribute count to zero in case the connector doesn't
+ * support retrieval of object info.
+ */
+ object_info.num_attrs = 0;
+
+ if (H5Oget_info3(tmp_dtype_id, &object_info, H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve object info\n");
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ if (object_info.num_attrs == 0) {
+ H5_FAILED();
+ HDprintf(
+ " copied committed datatype didn't contain any attributes after copy operation!\n");
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ /* Check the attribute names, types, etc. */
+ i = 0;
+ if (H5Aiterate2(tmp_dtype_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+ object_copy_attribute_iter_callback, &i) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to iterate over copied datatype's attributes\n");
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ if (i != OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS) {
+ H5_FAILED();
+ HDprintf(" number of attributes on copied datatype (%llu) didn't match expected number "
+ "(%llu)!\n",
+ (unsigned long long)i, (unsigned long long)OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS);
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ if (H5Tclose(tmp_dtype_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close committed datatype copy\n");
+ PART_ERROR(H5Ocopy_dtype_between_files);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_dtype_between_files);
+
+ if (tmp_dtype_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(tmp_dtype_id);
+ }
+ H5E_END_TRY;
+ tmp_dtype_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dtype_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id2) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(ocpypl_id);
+ H5Sclose(attr_space_id);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(tmp_dtype_id);
+ H5Tclose(dtype_id);
+ H5Dclose(tmp_dset_id);
+ H5Dclose(dset_id);
+ H5Gclose(tmp_group_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id2);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Ocopy fails when it
+ * is passed invalid parameters.
+ */
+static int
+test_object_copy_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object copying with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, or object aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_COPY_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n",
+ OBJECT_COPY_INVALID_PARAMS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ocopy_invalid_src_loc_id)
+ {
+ TESTING_2("H5Ocopy with an invalid source location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(H5I_INVALID_HID, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ocopy succeeded with an invalid source location ID!\n");
+ PART_ERROR(H5Ocopy_invalid_src_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_invalid_src_loc_id);
+
+ PART_BEGIN(H5Ocopy_invalid_src_obj_name)
+ {
+ TESTING_2("H5Ocopy with an invalid source object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, NULL, group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ocopy succeeded with a NULL source object name!\n");
+ PART_ERROR(H5Ocopy_invalid_src_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, "", group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ocopy succeeded with an invalid source object name of ''!\n");
+ PART_ERROR(H5Ocopy_invalid_src_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_invalid_src_obj_name);
+
+ PART_BEGIN(H5Ocopy_invalid_dst_loc_id)
+ {
+ TESTING_2("H5Ocopy with an invalid destination location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, H5I_INVALID_HID,
+ OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ocopy succeeded with an invalid destination location ID!\n");
+ PART_ERROR(H5Ocopy_invalid_dst_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_invalid_dst_loc_id);
+
+ PART_BEGIN(H5Ocopy_invalid_dst_obj_name)
+ {
+ TESTING_2("H5Ocopy with an invalid destination object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id, NULL,
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ocopy succeeded with a NULL destination object name!\n");
+ PART_ERROR(H5Ocopy_invalid_dst_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id, "",
+ H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ocopy succeeded with an invalid destination object name of ''!\n");
+ PART_ERROR(H5Ocopy_invalid_dst_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_invalid_dst_obj_name);
+
+ PART_BEGIN(H5Ocopy_invalid_ocpypl)
+ {
+ TESTING_2("H5Ocopy with an invalid OcpyPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, H5I_INVALID_HID, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ocopy succeeded with an invalid OcpyPL!\n");
+ PART_ERROR(H5Ocopy_invalid_ocpypl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_invalid_ocpypl);
+
+ PART_BEGIN(H5Ocopy_invalid_lcpl)
+ {
+ TESTING_2("H5Ocopy with an invalid LCPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ocopy(group_id, OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME, group_id,
+ OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2, H5P_DEFAULT, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ocopy succeeded with an invalid LCPL!\n");
+ PART_ERROR(H5Ocopy_invalid_lcpl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ocopy_invalid_lcpl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Oset_comment(_by_name)/H5Oget_comment(_by_name).
+ */
+static int
+test_object_comments(void)
+{
+ TESTING("object comments");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that H5Oset_comment(_by_name)/H5Oget_comment(_by_name)
+ * fail when passed invalid parameters.
+ */
+static int
+test_object_comments_invalid_params(void)
+{
+ TESTING("object comment ");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test for H5Ovisit(_by_name).
+ *
+ * XXX: Should have test for checking nested object's names/paths.
+ */
+static int
+test_object_visit(void)
+{
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t type_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object visiting");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, dataset, attribute, stored datatype, "
+ "iterate, or creation order aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_VISIT_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(OBJECT_VISIT_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((type_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype '%s'\n", OBJECT_VISIT_TEST_TYPE_NAME);
+ goto error;
+ }
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_VISIT_TEST_GROUP_NAME, H5P_DEFAULT, gcpl_id, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_VISIT_TEST_DSET_NAME, dset_dtype, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_VISIT_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (H5Tcommit2(group_id, OBJECT_VISIT_TEST_TYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_VISIT_TEST_TYPE_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up
+ * the expected objects with a given step throughout all of the following
+ * iterations. This is to try and check that the objects are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Ovisit_obj_name_increasing)
+ {
+ TESTING_2("H5Ovisit by object name in increasing order");
+
+ i = 0;
+
+ if (H5Ovisit3(group_id, H5_INDEX_NAME, H5_ITER_INC, object_visit_callback, &i, H5O_INFO_ALL) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit by object name in increasing order failed\n");
+ PART_ERROR(H5Ovisit_obj_name_increasing);
+ }
+
+ if (i != OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_obj_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_obj_name_increasing);
+
+ PART_BEGIN(H5Ovisit_obj_name_decreasing)
+ {
+ TESTING_2("H5Ovisit by object name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit3(group_id, H5_INDEX_NAME, H5_ITER_DEC, object_visit_callback, &i, H5O_INFO_ALL) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit by object name in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_obj_name_decreasing);
+ }
+
+ if (i != 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_obj_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ovisit_obj_name_decreasing);
+#endif
+ }
+ PART_END(H5Ovisit_obj_name_decreasing);
+
+ PART_BEGIN(H5Ovisit_create_order_increasing)
+ {
+ TESTING_2("H5Ovisit by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit3(group_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, object_visit_callback, &i,
+ H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit by creation order in increasing order failed\n");
+ PART_ERROR(H5Ovisit_create_order_increasing);
+ }
+
+ if (i != 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_create_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_create_order_increasing);
+
+ PART_BEGIN(H5Ovisit_create_order_decreasing)
+ {
+ TESTING_2("H5Ovisit by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit3(group_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, object_visit_callback, &i,
+ H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit by creation order in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_create_order_decreasing);
+ }
+
+ if (i != 4 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_create_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_create_order_decreasing);
+
+ PART_BEGIN(H5Ovisit_file)
+ {
+ TESTING_2("H5Ovisit on a file ID");
+
+ /*
+ * XXX:
+ */
+
+ SKIPPED();
+ PART_EMPTY(H5Ovisit_file);
+ }
+ PART_END(H5Ovisit_file);
+
+ PART_BEGIN(H5Ovisit_dset)
+ {
+ TESTING_2("H5Ovisit on a dataset ID");
+
+ if (H5Ovisit3(dset_id, H5_INDEX_NAME, H5_ITER_INC, object_visit_dset_callback, NULL,
+ H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit failed\n");
+ PART_ERROR(H5Ovisit_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_dset);
+
+ PART_BEGIN(H5Ovisit_dtype)
+ {
+ TESTING_2("H5Ovisit on a committed datatype ID");
+
+ if (H5Ovisit3(type_id, H5_INDEX_NAME, H5_ITER_INC, object_visit_dtype_callback, NULL,
+ H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit failed\n");
+ PART_ERROR(H5Ovisit_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_dtype);
+
+ PART_BEGIN(H5Ovisit_by_name_obj_name_increasing)
+ {
+ TESTING_2("H5Ovisit_by_name by object name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ /* First, test visiting using "." for the object name */
+ if (H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_INC, object_visit_callback, &i,
+ H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by object name in increasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_increasing);
+ }
+
+ if (i != OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_increasing);
+ }
+
+ /* Reset the special counter and repeat the test using an indirect object name. */
+ i = 0;
+
+ if (H5Ovisit_by_name3(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_INC, object_visit_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by object name in increasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_increasing);
+ }
+
+ if (i != OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_obj_name_increasing);
+
+ PART_BEGIN(H5Ovisit_by_name_obj_name_decreasing)
+ {
+ TESTING_2("H5Ovisit_by_name by object name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ /* First, test visiting using "." for the object name */
+ if (H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_DEC, object_visit_callback, &i,
+ H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by object name in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_decreasing);
+ }
+
+ if (i != 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_decreasing);
+ }
+
+ /* Reset the special counter and repeat the test using an indirect object name. */
+ i = OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit_by_name3(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5_INDEX_NAME,
+ H5_ITER_DEC, object_visit_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by object name in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_decreasing);
+ }
+
+ if (i != 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ovisit_by_name_obj_name_decreasing);
+#endif
+ }
+ PART_END(H5Ovisit_by_name_obj_name_decreasing);
+
+ PART_BEGIN(H5Ovisit_by_name_create_order_increasing)
+ {
+ TESTING_2("H5Ovisit_by_name by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ /* First, test visiting using "." for the object name */
+ if (H5Ovisit_by_name3(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, object_visit_callback, &i,
+ H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by creation order in increasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_increasing);
+ }
+
+ if (i != 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_increasing);
+ }
+
+ /* Reset the special counter and repeat the test using an indirect object name. */
+ i = 2 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit_by_name3(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, object_visit_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by creation order in increasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_increasing);
+ }
+
+ if (i != 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_create_order_increasing);
+
+ PART_BEGIN(H5Ovisit_by_name_create_order_decreasing)
+ {
+ TESTING_2("H5Ovisit_by_name by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ /* First, test visiting using "." for the object name */
+ if (H5Ovisit_by_name3(group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, object_visit_callback, &i,
+ H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by creation order in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_decreasing);
+ }
+
+ if (i != 4 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_decreasing);
+ }
+
+ /* Reset the special counter and repeat the test using an indirect object name. */
+ i = 3 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit_by_name3(container_group, OBJECT_VISIT_TEST_SUBGROUP_NAME, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, object_visit_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by creation order in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_decreasing);
+ }
+
+ if (i != 4 * OBJECT_VISIT_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_create_order_decreasing);
+
+ PART_BEGIN(H5Ovisit_by_name_file)
+ {
+ TESTING_2("H5Ovisit_by_name on a file ID");
+
+ /*
+ * XXX:
+ */
+
+ SKIPPED();
+ PART_EMPTY(H5Ovisit_by_name_file);
+ }
+ PART_END(H5Ovisit_by_name_file);
+
+ PART_BEGIN(H5Ovisit_by_name_dset)
+ {
+ TESTING_2("H5Ovisit_by_name on a dataset ID");
+
+ if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_TEST_DSET_NAME, H5_INDEX_NAME, H5_ITER_INC,
+ object_visit_dset_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name failed\n");
+ PART_ERROR(H5Ovisit_by_name_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_dset);
+
+ PART_BEGIN(H5Ovisit_by_name_dtype)
+ {
+ TESTING_2("H5Ovisit_by_name on a committed datatype ID");
+
+ if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_TEST_TYPE_NAME, H5_INDEX_NAME, H5_ITER_INC,
+ object_visit_dtype_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name failed\n");
+ PART_ERROR(H5Ovisit_by_name_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_dtype);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Tclose(type_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(type_id);
+ H5Dclose(dset_id);
+ H5Pclose(gcpl_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Ovisit(_by_name) on soft links. Since
+ * H5Ovisit(_by_name) ignores soft links, this test is
+ * meant to verify that behavior by placing objects and
+ * the soft links pointing to those objects in separate
+ * groups. Visiting is done only on the group containing
+ * the links to ensure that the objects in the other group
+ * do not get visited.
+ */
+static int
+test_object_visit_soft_link(void)
+{
+ size_t i;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID, subgroup_id2 = H5I_INVALID_HID;
+ hid_t linked_group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object visiting with soft links");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, soft link, iterate, or creation order "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create a GCPL\n");
+ goto error;
+ }
+
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't enable link creation order tracking and indexing on GCPL\n");
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ /* Create group to hold soft links */
+ if ((subgroup_id = H5Gcreate2(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1);
+ goto error;
+ }
+
+ /* Create group to hold objects pointed to by soft links */
+ if ((subgroup_id2 = H5Gcreate2(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2, H5P_DEFAULT, gcpl_id,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2);
+ goto error;
+ }
+
+ /* Create objects under subgroup 2 */
+ if ((linked_group_id = H5Gcreate2(subgroup_id2, OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1);
+ goto error;
+ }
+
+ if (H5Gclose(linked_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1);
+ goto error;
+ }
+
+ if ((linked_group_id = H5Gcreate2(subgroup_id2, OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2);
+ goto error;
+ }
+
+ if (H5Gclose(linked_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2);
+ goto error;
+ }
+
+ if ((linked_group_id = H5Gcreate2(subgroup_id2, OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3, H5P_DEFAULT,
+ gcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3);
+ goto error;
+ }
+
+ if (H5Gclose(linked_group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3);
+ goto error;
+ }
+
+ if (H5Gclose(subgroup_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2);
+ goto error;
+ }
+
+ /* Create soft links under subgroup 1 to point to the previously-created objects */
+ if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME
+ "/" OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2 "/" OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1,
+ subgroup_id, OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME1, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME1);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME
+ "/" OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2 "/" OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2,
+ subgroup_id, OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME2, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME2);
+ goto error;
+ }
+
+ if (H5Lcreate_soft("/" OBJECT_TEST_GROUP_NAME "/" OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME
+ "/" OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2 "/" OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3,
+ subgroup_id, OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME3, H5P_DEFAULT, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create soft link '%s'\n", OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME3);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ /*
+ * NOTE: A counter is passed to the iteration callback to try to match up
+ * the expected objects with a given step throughout all of the following
+ * iterations. This is to try and check that the objects are indeed being
+ * returned in the correct order.
+ */
+
+ PART_BEGIN(H5Ovisit_obj_name_increasing)
+ {
+ TESTING_2("H5Ovisit by object name in increasing order");
+
+ i = 0;
+
+ if (H5Ovisit3(subgroup_id, H5_INDEX_NAME, H5_ITER_INC, object_visit_soft_link_callback, &i,
+ H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit by object name in increasing order failed\n");
+ PART_ERROR(H5Ovisit_obj_name_increasing);
+ }
+
+ if (i != OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_obj_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_obj_name_increasing);
+
+ PART_BEGIN(H5Ovisit_obj_name_decreasing)
+ {
+ TESTING_2("H5Ovisit by object name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit3(subgroup_id, H5_INDEX_NAME, H5_ITER_DEC, object_visit_soft_link_callback, &i,
+ H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit by object name in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_obj_name_decreasing);
+ }
+
+ if (i != 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_obj_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ovisit_obj_name_decreasing);
+#endif
+ }
+ PART_END(H5Ovisit_obj_name_decreasing);
+
+ PART_BEGIN(H5Ovisit_create_order_increasing)
+ {
+ TESTING_2("H5Ovisit by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit3(subgroup_id, H5_INDEX_CRT_ORDER, H5_ITER_INC, object_visit_soft_link_callback, &i,
+ H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit by creation order in increasing order failed\n");
+ PART_ERROR(H5Ovisit_create_order_increasing);
+ }
+
+ if (i != 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_create_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_create_order_increasing);
+
+ PART_BEGIN(H5Ovisit_create_order_decreasing)
+ {
+ TESTING_2("H5Ovisit by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ if (H5Ovisit3(subgroup_id, H5_INDEX_CRT_ORDER, H5_ITER_DEC, object_visit_soft_link_callback, &i,
+ H5O_INFO_ALL) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit by creation order in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_create_order_decreasing);
+ }
+
+ if (i != 4 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_create_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_create_order_decreasing);
+
+ PART_BEGIN(H5Ovisit_by_name_obj_name_increasing)
+ {
+ TESTING_2("H5Ovisit_by_name by object name in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 0;
+
+ /* First, test visiting using "." for the object name */
+ if (H5Ovisit_by_name3(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_INC,
+ object_visit_soft_link_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by object name in increasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_increasing);
+ }
+
+ if (i != OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_increasing);
+ }
+
+ /* Reset the special counter and repeat the test using an indirect object name. */
+ i = 0;
+
+ /* Repeat the test using an indirect object name */
+ if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5_INDEX_NAME,
+ H5_ITER_INC, object_visit_soft_link_callback, &i, H5O_INFO_ALL,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by object name in increasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_increasing);
+ }
+
+ if (i != OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_obj_name_increasing);
+
+ PART_BEGIN(H5Ovisit_by_name_obj_name_decreasing)
+ {
+ TESTING_2("H5Ovisit_by_name by object name in decreasing order");
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Reset the counter to the appropriate value for the next test */
+ i = OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ /* First, test visiting using "." for the object name */
+ if (H5Ovisit_by_name3(subgroup_id, ".", H5_INDEX_NAME, H5_ITER_DEC,
+ object_visit_soft_link_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by object name in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_decreasing);
+ }
+
+ if (i != 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_decreasing);
+ }
+
+ /* Reset the special counter and repeat the test using an indirect object name. */
+ i = OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ /* Repeat the test using an indirect object name */
+ if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5_INDEX_NAME,
+ H5_ITER_DEC, object_visit_soft_link_callback, &i, H5O_INFO_ALL,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by object name in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_decreasing);
+ }
+
+ if (i != 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_obj_name_decreasing);
+ }
+
+ PASSED();
+#else
+ SKIPPED();
+ PART_EMPTY(H5Ovisit_by_name_obj_name_decreasing);
+#endif
+ }
+ PART_END(H5Ovisit_by_name_obj_name_decreasing);
+
+ PART_BEGIN(H5Ovisit_by_name_create_order_increasing)
+ {
+ TESTING_2("H5Ovisit_by_name by creation order in increasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ /* First, test visiting using "." for the object name */
+ if (H5Ovisit_by_name3(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC,
+ object_visit_soft_link_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by creation order in increasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_increasing);
+ }
+
+ if (i != 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_increasing);
+ }
+
+ /* Reset the special counter and repeat the test using an indirect object name. */
+ i = 2 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ /* Repeat the test using an indirect object name */
+ if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, object_visit_soft_link_callback, &i, H5O_INFO_ALL,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by creation order in increasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_increasing);
+ }
+
+ if (i != 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_increasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_create_order_increasing);
+
+ PART_BEGIN(H5Ovisit_by_name_create_order_decreasing)
+ {
+ TESTING_2("H5Ovisit_by_name by creation order in decreasing order");
+
+ /* Reset the counter to the appropriate value for the next test */
+ i = 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ /* First, test visiting using "." for the object name */
+ if (H5Ovisit_by_name3(subgroup_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC,
+ object_visit_soft_link_callback, &i, H5O_INFO_ALL, H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by creation order in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_decreasing);
+ }
+
+ if (i != 4 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_decreasing);
+ }
+
+ /* Reset the special counter and repeat the test using an indirect object name. */
+ i = 3 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED;
+
+ /* Repeat the test using an indirect object name */
+ if (H5Ovisit_by_name3(group_id, OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1, H5_INDEX_CRT_ORDER,
+ H5_ITER_DEC, object_visit_soft_link_callback, &i, H5O_INFO_ALL,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name by creation order in decreasing order failed\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_decreasing);
+ }
+
+ if (i != 4 * OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED) {
+ H5_FAILED();
+ HDprintf(" some objects were not visited!\n");
+ PART_ERROR(H5Ovisit_by_name_create_order_decreasing);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_create_order_decreasing);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(subgroup_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(gcpl_id);
+ H5Gclose(linked_group_id);
+ H5Gclose(subgroup_id);
+ H5Gclose(subgroup_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Ovisit(_by_name) fails when
+ * it is passed invalid parameters.
+ */
+static int
+test_object_visit_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("object visiting with invalid parameters");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ITERATE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or iterate aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ OBJECT_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_VISIT_INVALID_PARAMS_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_VISIT_INVALID_PARAMS_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Ovisit_invalid_obj_id)
+ {
+ TESTING_2("H5Ovisit with an invalid object ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit3(H5I_INVALID_HID, H5_INDEX_NAME, H5_ITER_INC, object_visit_noop_callback,
+ NULL, H5O_INFO_ALL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit succeeded with an invalid object ID!\n");
+ PART_ERROR(H5Ovisit_invalid_obj_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_invalid_obj_id);
+
+ PART_BEGIN(H5Ovisit_invalid_index_type)
+ {
+ TESTING_2("H5Ovisit with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit3(group_id, H5_INDEX_UNKNOWN, H5_ITER_INC, object_visit_noop_callback, NULL,
+ H5O_INFO_ALL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Ovisit_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit3(group_id, H5_INDEX_N, H5_ITER_INC, object_visit_noop_callback, NULL,
+ H5O_INFO_ALL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Ovisit_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_invalid_index_type);
+
+ PART_BEGIN(H5Ovisit_invalid_iter_order)
+ {
+ TESTING_2("H5Ovisit with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit3(group_id, H5_INDEX_NAME, H5_ITER_UNKNOWN, object_visit_noop_callback,
+ NULL, H5O_INFO_ALL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Ovisit_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit3(group_id, H5_INDEX_NAME, H5_ITER_N, object_visit_noop_callback, NULL,
+ H5O_INFO_ALL);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Ovisit_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_invalid_iter_order);
+
+ PART_BEGIN(H5Ovisit_by_name_invalid_loc_id)
+ {
+ TESTING_2("H5Ovisit_by_name with an invalid location ID");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit_by_name3(H5I_INVALID_HID, ".", H5_INDEX_NAME, H5_ITER_N,
+ object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name succeeded with an invalid location ID!\n");
+ PART_ERROR(H5Ovisit_by_name_invalid_loc_id);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_invalid_loc_id);
+
+ PART_BEGIN(H5Ovisit_by_name_invalid_obj_name)
+ {
+ TESTING_2("H5Ovisit_by_name with an invalid object name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit_by_name3(group_id, NULL, H5_INDEX_NAME, H5_ITER_N,
+ object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name succeeded with a NULL object name!\n");
+ PART_ERROR(H5Ovisit_by_name_invalid_obj_name);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit_by_name3(group_id, "", H5_INDEX_NAME, H5_ITER_N,
+ object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name succeeded with an invalid object name of ''!\n");
+ PART_ERROR(H5Ovisit_by_name_invalid_obj_name);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_invalid_obj_name);
+
+ PART_BEGIN(H5Ovisit_by_name_invalid_index_type)
+ {
+ TESTING_2("H5Ovisit_by_name with an invalid index type");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_UNKNOWN, H5_ITER_N,
+ object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name succeeded with invalid index type H5_INDEX_UNKNOWN!\n");
+ PART_ERROR(H5Ovisit_by_name_invalid_index_type);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_N, H5_ITER_N, object_visit_noop_callback,
+ NULL, H5O_INFO_ALL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name succeeded with invalid index type H5_INDEX_N!\n");
+ PART_ERROR(H5Ovisit_by_name_invalid_index_type);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_invalid_index_type);
+
+ PART_BEGIN(H5Ovisit_by_name_invalid_iter_order)
+ {
+ TESTING_2("H5Ovisit_by_name with an invalid iteration ordering");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_UNKNOWN,
+ object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name succeeded with invalid iteration ordering H5_ITER_UNKNOWN!\n");
+ PART_ERROR(H5Ovisit_by_name_invalid_iter_order);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_N,
+ object_visit_noop_callback, NULL, H5O_INFO_ALL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name succeeded with invalid iteration ordering H5_ITER_N!\n");
+ PART_ERROR(H5Ovisit_by_name_invalid_iter_order);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_invalid_iter_order);
+
+ PART_BEGIN(H5Ovisit_by_name_invalid_lapl)
+ {
+ TESTING_2("H5Ovisit_by_name with an invalid LAPL");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Ovisit_by_name3(group_id, ".", H5_INDEX_NAME, H5_ITER_INC,
+ object_visit_noop_callback, NULL, H5O_INFO_ALL, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Ovisit_by_name succeeded with an invalid LAPL!\n");
+ PART_ERROR(H5Ovisit_by_name_invalid_lapl);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Ovisit_by_name_invalid_lapl);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Gclose(group_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test for H5Oclose.
+ */
+static int
+test_close_object(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t group_id2 = H5I_INVALID_HID;
+ hid_t dtype_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dset_dtype = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Oclose");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_STORED_DATATYPES)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, dataset, attribute, or stored datatype "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_CLOSE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container subgroup '%s'\n", OBJECT_CLOSE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = generate_random_dataspace(OBJECT_CLOSE_TEST_SPACE_RANK, NULL, NULL, FALSE)) < 0)
+ TEST_ERROR;
+
+ if ((dset_dtype = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oclose_group)
+ {
+ TESTING_2("H5Oclose on a group");
+
+ if ((group_id2 = H5Gcreate2(group_id, OBJECT_CLOSE_TEST_GRP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create group '%s'\n", OBJECT_CLOSE_TEST_GRP_NAME);
+ PART_ERROR(H5Oclose_group);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id2);
+ }
+ H5E_END_TRY;
+
+ if ((group_id2 = H5Oopen(group_id, OBJECT_CLOSE_TEST_GRP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open group '%s' with H5Oopen\n", OBJECT_CLOSE_TEST_GRP_NAME);
+ PART_ERROR(H5Oclose_group);
+ }
+
+ if (H5Oclose(group_id2) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close group '%s' with H5Oclose\n", OBJECT_CLOSE_TEST_GRP_NAME);
+ PART_ERROR(H5Oclose_group);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oclose_group);
+
+ PART_BEGIN(H5Oclose_dset)
+ {
+ TESTING_2("H5Oclose on a dataset");
+
+ if ((dset_id = H5Dcreate2(group_id, OBJECT_CLOSE_TEST_DSET_NAME, dset_dtype, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", OBJECT_CLOSE_TEST_DSET_NAME);
+ PART_ERROR(H5Oclose_dset);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+
+ if ((dset_id = H5Oopen(group_id, OBJECT_CLOSE_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s' with H5Oopen\n", OBJECT_CLOSE_TEST_DSET_NAME);
+ PART_ERROR(H5Oclose_dset);
+ }
+
+ if (H5Oclose(dset_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close dataset '%s' with H5Oclose\n", OBJECT_CLOSE_TEST_DSET_NAME);
+ PART_ERROR(H5Oclose_dset);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oclose_dset);
+
+ PART_BEGIN(H5Oclose_dtype)
+ {
+ TESTING_2("H5Oclose on a committed datatype");
+
+ if ((dtype_id = generate_random_datatype(H5T_NO_CLASS, FALSE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create datatype '%s'\n", OBJECT_CLOSE_TEST_TYPE_NAME);
+ PART_ERROR(H5Oclose_dtype);
+ }
+
+ if (H5Tcommit2(group_id, OBJECT_CLOSE_TEST_TYPE_NAME, dtype_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't commit datatype '%s'\n", OBJECT_CLOSE_TEST_TYPE_NAME);
+ PART_ERROR(H5Oclose_dtype);
+ }
+
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(dtype_id);
+ }
+ H5E_END_TRY;
+
+ if ((dtype_id = H5Oopen(group_id, OBJECT_CLOSE_TEST_TYPE_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open datatype '%s' with H5Oopen\n", OBJECT_CLOSE_TEST_TYPE_NAME);
+ PART_ERROR(H5Oclose_dtype);
+ }
+
+ if (H5Oclose(dtype_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close datatype '%s' with H5Oclose\n", OBJECT_CLOSE_TEST_TYPE_NAME);
+ PART_ERROR(H5Oclose_dtype);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oclose_dtype);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(dset_dtype) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ H5Tclose(dset_dtype);
+ H5Tclose(dtype_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that H5Oclose fails when it
+ * is passed invalid parameters.
+ */
+static int
+test_close_object_invalid_params(void)
+{
+ herr_t err_ret = -1;
+ hid_t file_id = H5I_INVALID_HID;
+
+ TESTING("H5Oclose with an invalid object ID");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or object aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Oclose(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+
+ if (err_ret >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oclose succeeded with an invalid object ID!\n");
+ goto error;
+ }
+
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that various objects (file, dataspace, property list,
+ * and attribute) can't be closed with H5Oclose.
+ */
+static int
+test_close_invalid_objects(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t attr_dtype = H5I_INVALID_HID;
+ hid_t attr_space_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ herr_t status;
+
+ TESTING_MULTIPART("H5Oclose invalid objects");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & (H5VL_CAP_FLAG_FILE_BASIC)) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, object, dataset, attribute, or stored datatype "
+ "aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", OBJECT_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, OBJECT_CLOSE_INVALID_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", OBJECT_OPEN_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((attr_space_id = generate_random_dataspace(OBJECT_CLOSE_INVALID_TEST_SPACE_RANK, NULL, NULL, TRUE)) <
+ 0)
+ TEST_ERROR;
+
+ if ((attr_dtype = generate_random_datatype(H5T_NO_CLASS, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((attr_id = H5Acreate2(group_id, OBJECT_CLOSE_INVALID_TEST_ATTRIBUTE_NAME, attr_dtype, attr_space_id,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Oclose_file)
+ {
+ TESTING_2("H5Oclose with an invalid object - file");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Oclose(file_id);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oclose succeeded with an invalid object (file)!\n");
+ PART_ERROR(H5Oclose_file);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oclose_file);
+
+ PART_BEGIN(H5Oclose_plist)
+ {
+ TESTING_2("H5Oclose with an invalid object - property list");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Oclose(fapl_id);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oclose succeeded with an invalid object (property list)!\n");
+ PART_ERROR(H5Oclose_plist);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oclose_plist);
+
+ PART_BEGIN(H5Oclose_dspace)
+ {
+ TESTING_2("H5Oclose with an invalid object - data space");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Oclose(attr_space_id);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oclose succeeded with an invalid object (data space)!\n");
+ PART_ERROR(H5Oclose_dspace);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oclose_dspace);
+
+ PART_BEGIN(H5Oclose_attribute)
+ {
+ TESTING_2("H5Oclose with an invalid object - attribute");
+
+ H5E_BEGIN_TRY
+ {
+ status = H5Oclose(attr_id);
+ }
+ H5E_END_TRY;
+
+ if (status >= 0) {
+ H5_FAILED();
+ HDprintf(" H5Oclose succeeded with an invalid object (attribute)!\n");
+ PART_ERROR(H5Oclose_attribute);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Oclose_attribute);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Tclose(attr_dtype) < 0)
+ TEST_ERROR;
+ if (H5Aclose(attr_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(attr_space_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Tclose(attr_dtype);
+ H5Sclose(attr_space_id);
+ H5Aclose(attr_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+} /* test_close_invalid_objects */
+
+/*
+ * A test for H5Oflush.
+ */
+static int
+test_flush_object(void)
+{
+ TESTING("H5Oflush");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that H5Oflush fails when
+ * it is passed invalid parameters.
+ */
+static int
+test_flush_object_invalid_params(void)
+{
+ TESTING("H5Oflush with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test for H5Orefresh.
+ */
+static int
+test_refresh_object(void)
+{
+ TESTING("H5Orefresh");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to check that H5Orefresh fails when
+ * it is passed invalid parameters.
+ */
+static int
+test_refresh_object_invalid_params(void)
+{
+ TESTING("H5Orefresh with invalid parameters");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * H5Ocopy test callback to check that an object's attributes got copied
+ * over successfully to the new object.
+ */
+static herr_t
+object_copy_attribute_iter_callback(hid_t location_id, const char *attr_name, const H5A_info_t *ainfo,
+ void *op_data)
+{
+ size_t *counter = (size_t *)op_data;
+ htri_t types_equal;
+ char expected_name[256];
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t attr_type = H5I_INVALID_HID;
+ herr_t ret_value = H5_ITER_CONT;
+
+ UNUSED(ainfo);
+ UNUSED(op_data);
+
+ snprintf(expected_name, 256, "attr%d", (int)(*counter));
+
+ if (HDstrncmp(attr_name, expected_name, 256)) {
+ HDprintf(" attribute name '%s' did not match expected name '%s'\n", attr_name, expected_name);
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+ if ((attr_id = H5Aopen(location_id, attr_name, H5P_DEFAULT)) < 0) {
+ HDprintf(" failed to open attribute '%s'\n", attr_name);
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+ if ((attr_type = H5Aget_type(attr_id)) < 0) {
+ HDprintf(" failed to retrieve attribute's datatype\n");
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+ if ((types_equal = H5Tequal(attr_type, H5T_NATIVE_INT)) < 0) {
+ HDprintf(" failed to determine if attribute's datatype matched what is expected\n");
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+ if (!types_equal) {
+ HDprintf(" attribute datatype did not match expected H5T_NATIVE_INT\n");
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ if (attr_type >= 0)
+ H5Tclose(attr_type);
+ if (attr_id >= 0)
+ H5Aclose(attr_id);
+
+ (*counter)++;
+
+ return ret_value;
+}
+
+/*
+ * H5Ocopy callback to check that a copied group's soft links
+ * have not been expanded when the default copy options are
+ * used.
+ */
+static herr_t
+object_copy_soft_link_non_expand_callback(hid_t group, const char *name, const H5L_info2_t *info,
+ void *op_data)
+{
+ size_t *counter = (size_t *)op_data;
+ void *link_val_buf = NULL;
+ char expected_link_val[OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE];
+ herr_t ret_value = H5_ITER_CONT;
+
+ /* Make sure the link type is soft */
+ if (H5L_TYPE_SOFT != info->type) {
+ HDprintf(" link type was not H5L_TYPE_SOFT; link must have been expanded!\n");
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+ if (NULL == (link_val_buf = calloc(1, info->u.val_size))) {
+ HDprintf(" failed to allocate buffer for link value\n");
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+ /* Retrieve the link's value */
+ if (H5Lget_val(group, name, link_val_buf, info->u.val_size, H5P_DEFAULT) < 0) {
+ HDprintf(" failed to retrieve value of link '%s'\n", name);
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+ /* Make sure link's value matches what is expected */
+ snprintf(expected_link_val, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE,
+ "/" OBJECT_TEST_GROUP_NAME "/" OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME "/grp%d",
+ (int)(*counter));
+
+ if (strncmp(link_val_buf, expected_link_val, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE)) {
+ HDprintf(" value '%s' for link '%s' did not match expected value '%s'\n", (char *)link_val_buf,
+ name, expected_link_val);
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ if (link_val_buf)
+ free(link_val_buf);
+
+ (*counter)++;
+
+ return ret_value;
+}
+
+/*
+ * H5Ocopy callback to check that a copied group's soft links
+ * have been expanded when the H5O_COPY_EXPAND_SOFT_LINK_FLAG
+ * flag is specified.
+ */
+static herr_t
+object_copy_soft_link_expand_callback(hid_t group, const char *name, const H5L_info2_t *info, void *op_data)
+{
+ size_t *counter = (size_t *)op_data;
+ char expected_link_name[OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE];
+ herr_t ret_value = H5_ITER_CONT;
+
+ UNUSED(group);
+
+ /* Make sure the link type is hard */
+ if (H5L_TYPE_HARD != info->type) {
+ HDprintf(" link type was not H5L_TYPE_HARD; link must not have been expanded!\n");
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+ /* Ensure that the link's name still follows the 'link1', 'link2', etc. pattern */
+ snprintf(expected_link_name, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE, "link%d", (int)(*counter));
+
+ if (strncmp(name, expected_link_name, OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE)) {
+ HDprintf(" link name '%s' did not match expected name '%s'\n", name, expected_link_name);
+ ret_value = H5_ITER_ERROR;
+ goto done;
+ }
+
+done:
+ (*counter)++;
+
+ return ret_value;
+}
+
+/*
+ * H5Ovisit callback to simply iterate recursively through all of the objects in a
+ * group and check to make sure their names match what is expected.
+ */
+static herr_t
+object_visit_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ herr_t ret_val = 0;
+
+ UNUSED(o_id);
+
+ if (!HDstrncmp(name, ".", strlen(".") + 1) &&
+ (counter_val == 0 || counter_val == 4 || counter_val == 8 || counter_val == 12)) {
+ if (H5O_TYPE_GROUP == object_info->type)
+ goto done;
+ else
+ HDprintf(" type for object '%s' was not H5O_TYPE_GROUP\n", name);
+ }
+ else if (!HDstrncmp(name, OBJECT_VISIT_TEST_GROUP_NAME, strlen(OBJECT_VISIT_TEST_GROUP_NAME) + 1) &&
+ (counter_val == 2 || counter_val == 6 || counter_val == 9 || counter_val == 15)) {
+ if (H5O_TYPE_GROUP == object_info->type)
+ goto done;
+ else
+ HDprintf(" type for object '%s' was not H5O_TYPE_GROUP\n", name);
+ }
+ else if (!HDstrncmp(name, OBJECT_VISIT_TEST_DSET_NAME, strlen(OBJECT_VISIT_TEST_DSET_NAME) + 1) &&
+ (counter_val == 1 || counter_val == 7 || counter_val == 10 || counter_val == 14)) {
+ if (H5O_TYPE_DATASET == object_info->type)
+ goto done;
+ else
+ HDprintf(" type for object '%s' was not H5O_TYPE_DATASET\n", name);
+ }
+ else if (!HDstrncmp(name, OBJECT_VISIT_TEST_TYPE_NAME, strlen(OBJECT_VISIT_TEST_TYPE_NAME) + 1) &&
+ (counter_val == 3 || counter_val == 5 || counter_val == 11 || counter_val == 13)) {
+ if (H5O_TYPE_NAMED_DATATYPE == object_info->type)
+ goto done;
+ else
+ HDprintf(" type for object '%s' was not H5O_TYPE_NAMED_DATATYPE\n", name);
+ }
+ else
+ HDprintf(" object '%s' didn't match known names or came in an incorrect order\n", name);
+
+ ret_val = -1;
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+/*
+ * H5Ovisit callback for visiting a singular dataset.
+ */
+static herr_t
+object_visit_dset_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data)
+{
+ herr_t ret_val = 0;
+
+ UNUSED(o_id);
+ UNUSED(op_data);
+
+ if (HDstrncmp(name, ".", strlen(".") + 1)) {
+ HDprintf(" object '%s' didn't match known names\n", name);
+ return -1;
+ }
+
+ if (H5O_TYPE_DATASET != object_info->type) {
+ HDprintf(" object type was not H5O_TYPE_DATASET\n");
+ return -1;
+ }
+
+ return ret_val;
+}
+
+/*
+ * H5Ovisit callback for visiting a singular committed datatype.
+ */
+static herr_t
+object_visit_dtype_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data)
+{
+ herr_t ret_val = 0;
+
+ UNUSED(o_id);
+ UNUSED(op_data);
+
+ if (HDstrncmp(name, ".", strlen(".") + 1)) {
+ HDprintf(" object '%s' didn't match known names\n", name);
+ return -1;
+ }
+
+ if (H5O_TYPE_NAMED_DATATYPE != object_info->type) {
+ HDprintf(" object type was not H5O_TYPE_NAMED_DATATYPE\n");
+ return -1;
+ }
+
+ return ret_val;
+}
+
+/*
+ * H5Ovisit callback for testing ignoring of
+ * soft links during object visiting.
+ */
+static herr_t
+object_visit_soft_link_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data)
+{
+ size_t *i = (size_t *)op_data;
+ size_t counter_val = *((size_t *)op_data);
+ herr_t ret_val = 0;
+
+ UNUSED(o_id);
+
+ if (!HDstrncmp(name, ".", strlen(".") + 1) && (counter_val <= 5)) {
+ if (H5O_TYPE_GROUP == object_info->type)
+ goto done;
+ else
+ HDprintf(" type for object '%s' was not H5O_TYPE_GROUP\n", name);
+ }
+ else
+ HDprintf(" object '%s' didn't match known names or came in an incorrect order\n", name);
+
+ ret_val = -1;
+
+done:
+ (*i)++;
+
+ return ret_val;
+}
+
+/*
+ * H5Ovisit callback to simply iterate through all of the objects in a given
+ * group.
+ */
+static herr_t
+object_visit_noop_callback(hid_t o_id, const char *name, const H5O_info2_t *object_info, void *op_data)
+{
+ UNUSED(o_id);
+ UNUSED(name);
+ UNUSED(object_info);
+ UNUSED(op_data);
+
+ return 0;
+}
+
+/*
+ * Cleanup temporary test files
+ */
+static void
+cleanup_files(void)
+{
+ H5Fdelete(OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME, H5P_DEFAULT);
+}
+
+int
+H5_api_object_test(void)
+{
+ size_t i;
+ int nerrors;
+
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Object Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(object_tests); i++) {
+ nerrors += (*object_tests[i])() ? 1 : 0;
+ }
+
+ HDprintf("\n");
+
+ HDprintf("Cleaning up testing files\n");
+ cleanup_files();
+
+ return nerrors;
+}
diff --git a/test/API/H5_api_object_test.h b/test/API/H5_api_object_test.h
new file mode 100644
index 0000000..5470843
--- /dev/null
+++ b/test/API/H5_api_object_test.h
@@ -0,0 +1,191 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_OBJECT_TEST_H
+#define H5_API_OBJECT_TEST_H
+
+#include "H5_api_test.h"
+
+int H5_api_object_test(void);
+
+/***********************************************
+ * *
+ * API Object test defines *
+ * *
+ ***********************************************/
+
+#define OBJECT_OPEN_TEST_SPACE_RANK 2
+#define OBJECT_OPEN_TEST_GROUP_NAME "object_open_test"
+#define OBJECT_OPEN_TEST_GRP_NAME "object_open_test_group"
+#define OBJECT_OPEN_TEST_DSET_NAME "object_open_test_dset"
+#define OBJECT_OPEN_TEST_TYPE_NAME "object_open_test_type"
+
+#define OBJECT_OPEN_INVALID_PARAMS_TEST_GROUP_NAME "object_open_invalid_params_test"
+#define OBJECT_OPEN_INVALID_PARAMS_TEST_GRP_NAME "object_open_invalid_params_test_group"
+
+#define OBJECT_CLOSE_INVALID_TEST_GROUP_NAME "object_close_invalid_params_test"
+#define OBJECT_CLOSE_INVALID_TEST_ATTRIBUTE_NAME "object_close_invalid_test_attribute"
+#define OBJECT_CLOSE_INVALID_TEST_SPACE_RANK 2
+
+#define OBJECT_EXISTS_TEST_DSET_SPACE_RANK 2
+#define OBJECT_EXISTS_TEST_SUBGROUP_NAME "object_exists_test"
+#define OBJECT_EXISTS_TEST_DANGLING_LINK_NAME "object_exists_test_dangling_soft_link"
+#define OBJECT_EXISTS_TEST_SOFT_LINK_NAME "object_exists_test_soft_link"
+#define OBJECT_EXISTS_TEST_GRP_NAME "object_exists_test_group"
+#define OBJECT_EXISTS_TEST_TYPE_NAME "object_exists_test_type"
+#define OBJECT_EXISTS_TEST_DSET_NAME "object_exists_test_dset"
+
+#define OBJECT_EXISTS_INVALID_PARAMS_TEST_SUBGROUP_NAME "object_exists_invalid_params_test"
+#define OBJECT_EXISTS_INVALID_PARAMS_TEST_GRP_NAME "object_exists_invalid_params_test_group"
+
+#define OBJECT_COPY_BASIC_TEST_DEEP_NESTED_GROUP_NAME "deep_nested_group"
+#define OBJECT_COPY_BASIC_TEST_NUM_NESTED_OBJS 3
+#define OBJECT_COPY_BASIC_TEST_NEW_GROUP_NAME "copied_group"
+#define OBJECT_COPY_BASIC_TEST_NEW_DSET_NAME "copied_dset"
+#define OBJECT_COPY_BASIC_TEST_NEW_DTYPE_NAME "copied_dtype"
+#define OBJECT_COPY_BASIC_TEST_SUBGROUP_NAME "object_copy_basic_test"
+#define OBJECT_COPY_BASIC_TEST_GROUP_NAME "group_to_copy"
+#define OBJECT_COPY_BASIC_TEST_DSET_NAME "dset_to_copy"
+#define OBJECT_COPY_BASIC_TEST_DTYPE_NAME "dtype_to_copy"
+#define OBJECT_COPY_BASIC_TEST_SPACE_RANK 2
+#define OBJECT_COPY_BASIC_TEST_NUM_ATTRS 3
+#define OBJECT_COPY_BASIC_TEST_BUF_SIZE 256
+
+#define OBJECT_COPY_ALREADY_EXISTING_TEST_SUBGROUP_NAME "object_copy_existing_objects_test"
+#define OBJECT_COPY_ALREADY_EXISTING_TEST_GROUP_NAME "group_to_copy"
+#define OBJECT_COPY_ALREADY_EXISTING_TEST_DSET_NAME "dset_to_copy"
+#define OBJECT_COPY_ALREADY_EXISTING_TEST_DTYPE_NAME "dtype_to_copy"
+#define OBJECT_COPY_ALREADY_EXISTING_TEST_SPACE_RANK 2
+
+#define OBJECT_COPY_SHALLOW_TEST_DEEP_NESTED_GROUP_NAME "deep_nested_group"
+#define OBJECT_COPY_SHALLOW_TEST_NUM_NESTED_OBJS 3
+#define OBJECT_COPY_SHALLOW_TEST_SUBGROUP_NAME "object_copy_shallow_group_copy_test"
+#define OBJECT_COPY_SHALLOW_TEST_NEW_GROUP_NAME "copied_group"
+#define OBJECT_COPY_SHALLOW_TEST_GROUP_NAME "group_to_copy"
+#define OBJECT_COPY_SHALLOW_TEST_BUF_SIZE 256
+
+#define OBJECT_COPY_NO_ATTRS_TEST_SUBGROUP_NAME "object_copy_no_attributes_test"
+#define OBJECT_COPY_NO_ATTRS_TEST_NEW_GROUP_NAME "copied_group"
+#define OBJECT_COPY_NO_ATTRS_TEST_NEW_DSET_NAME "copied_dset"
+#define OBJECT_COPY_NO_ATTRS_TEST_NEW_DTYPE_NAME "copied_dtype"
+#define OBJECT_COPY_NO_ATTRS_TEST_GROUP_NAME "group_to_copy"
+#define OBJECT_COPY_NO_ATTRS_TEST_DSET_NAME "dset_to_copy"
+#define OBJECT_COPY_NO_ATTRS_TEST_DTYPE_NAME "dtype_to_copy"
+#define OBJECT_COPY_NO_ATTRS_TEST_SPACE_RANK 2
+#define OBJECT_COPY_NO_ATTRS_TEST_NUM_ATTRS 3
+#define OBJECT_COPY_NO_ATTRS_TEST_BUF_SIZE 256
+
+#define OBJECT_COPY_SOFT_LINK_TEST_DEEP_NESTED_GROUP_NAME "deep_nested_group"
+#define OBJECT_COPY_SOFT_LINK_TEST_DANGLING_LINK_NAME "dangling_link"
+#define OBJECT_COPY_SOFT_LINK_TEST_NUM_NESTED_OBJS 3
+#define OBJECT_COPY_SOFT_LINK_TEST_SUBGROUP_NAME "object_copy_soft_link_test"
+#define OBJECT_COPY_SOFT_LINK_TEST_SOFT_LINK_NAME "soft_link_to_group_to_copy"
+#define OBJECT_COPY_SOFT_LINK_TEST_NEW_GROUP_NAME "copied_group"
+#define OBJECT_COPY_SOFT_LINK_TEST_GROUP_NAME "group_to_copy"
+#define OBJECT_COPY_SOFT_LINK_TEST_SPACE_RANK 2
+#define OBJECT_COPY_SOFT_LINK_TEST_NUM_ATTRS 3
+#define OBJECT_COPY_SOFT_LINK_TEST_BUF_SIZE 256
+
+#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_DANGLING_GROUP_NAME "expanded_dangling_soft_links_group"
+#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NON_EXPAND_GROUP_NAME "non_expanded_soft_links_group"
+#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_EXPAND_GROUP_NAME "expanded_soft_links_group"
+#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_NUM_NESTED_OBJS 3
+#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_SUBGROUP_NAME "object_copy_group_with_soft_links_test"
+#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_GROUP_NAME "group_to_copy"
+#define OBJECT_COPY_GROUP_WITH_SOFT_LINKS_TEST_BUF_SIZE 256
+
+#define OBJECT_COPY_BETWEEN_FILES_TEST_DEEP_NESTED_GROUP_NAME "deep_nested_group"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_NUM_NESTED_OBJS 3
+#define OBJECT_COPY_BETWEEN_FILES_TEST_SUBGROUP_NAME "object_copy_between_files_test"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_NEW_GROUP_NAME "copied_group"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DSET_NAME "copied_dset"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_NEW_DTYPE_NAME "copied_dtype"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_FILE_NAME "object_copy_test_file.h5"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_GROUP_NAME "group_to_copy"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_DSET_NAME "dset_to_copy"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_DTYPE_NAME "dtype_to_copy"
+#define OBJECT_COPY_BETWEEN_FILES_TEST_SPACE_RANK 2
+#define OBJECT_COPY_BETWEEN_FILES_TEST_NUM_ATTRS 3
+#define OBJECT_COPY_BETWEEN_FILES_TEST_BUF_SIZE 256
+
+#define OBJECT_COPY_INVALID_PARAMS_TEST_SUBGROUP_NAME "object_copy_invalid_params_test"
+#define OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME "object_copy_invalid_params_group"
+#define OBJECT_COPY_INVALID_PARAMS_TEST_GROUP_NAME2 "object_copy_invalid_params_group_copy"
+
+#define OBJECT_VISIT_TEST_NUM_OBJS_VISITED 4
+#define OBJECT_VISIT_TEST_SUBGROUP_NAME "object_visit_test"
+#define OBJECT_VISIT_TEST_SPACE_RANK 2
+#define OBJECT_VISIT_TEST_GROUP_NAME "object_visit_test_group"
+#define OBJECT_VISIT_TEST_DSET_NAME "object_visit_test_dset"
+#define OBJECT_VISIT_TEST_TYPE_NAME "object_visit_test_type"
+
+#define OBJECT_VISIT_SOFT_LINK_TEST_NUM_OBJS_VISITED 1
+#define OBJECT_VISIT_SOFT_LINK_TEST_SUBGROUP_NAME "object_visit_soft_link"
+#define OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME1 "links_group"
+#define OBJECT_VISIT_SOFT_LINK_TEST_GROUP_NAME2 "objects_group"
+#define OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME1 "soft_link1"
+#define OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME2 "soft_link2"
+#define OBJECT_VISIT_SOFT_LINK_TEST_LINK_NAME3 "soft_link3"
+#define OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME1 "group1"
+#define OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME2 "group2"
+#define OBJECT_VISIT_SOFT_LINK_TEST_OBJ_NAME3 "group3"
+
+#define OBJECT_VISIT_DANGLING_LINK_TEST_SUBGROUP_NAME "object_visit_dangling_link_test"
+#define OBJECT_VISIT_DANGLING_LINK_TEST_LINK_NAME1 "dangling_link1"
+#define OBJECT_VISIT_DANGLING_LINK_TEST_LINK_NAME2 "dangling_link2"
+#define OBJECT_VISIT_DANGLING_LINK_TEST_LINK_NAME3 "dangling_link3"
+
+#define OBJECT_VISIT_INVALID_PARAMS_TEST_SUBGROUP_NAME "object_visit_invalid_params_test"
+#define OBJECT_VISIT_INVALID_PARAMS_TEST_GROUP_NAME "object_visit_invalid_params_group"
+
+#define OBJECT_CLOSE_TEST_SPACE_RANK 2
+#define OBJECT_CLOSE_TEST_GROUP_NAME "object_close_test"
+#define OBJECT_CLOSE_TEST_GRP_NAME "object_close_test_group"
+#define OBJECT_CLOSE_TEST_DSET_NAME "object_close_test_dset"
+#define OBJECT_CLOSE_TEST_TYPE_NAME "object_close_test_type"
+
+#define OBJECT_LINK_TEST_GROUP_NAME "object_link_test_group"
+#define OBJECT_LINK_TEST_GROUP_NAME2 "object_link_test_group_link"
+#define OBJECT_LINK_TEST_DSET_NAME "object_link_test_dataset"
+#define OBJECT_LINK_TEST_DTYPE_NAME "object_link_test_datatype"
+#define OBJECT_LINK_TEST_SPACE_RANK 2
+
+#define OBJECT_LINK_INVALID_PARAMS_TEST_GROUP_NAME "object_link_invalid_params_test_group"
+
+#define OBJ_REF_GET_TYPE_TEST_SUBGROUP_NAME "obj_ref_get_obj_type_test"
+#define OBJ_REF_GET_TYPE_TEST_DSET_NAME "ref_dset"
+#define OBJ_REF_GET_TYPE_TEST_TYPE_NAME "ref_dtype"
+#define OBJ_REF_GET_TYPE_TEST_SPACE_RANK 2
+
+#define OBJ_REF_DATASET_WRITE_TEST_SUBGROUP_NAME "obj_ref_write_test"
+#define OBJ_REF_DATASET_WRITE_TEST_REF_DSET_NAME "ref_dset"
+#define OBJ_REF_DATASET_WRITE_TEST_REF_TYPE_NAME "ref_dtype"
+#define OBJ_REF_DATASET_WRITE_TEST_SPACE_RANK 1
+#define OBJ_REF_DATASET_WRITE_TEST_DSET_NAME "obj_ref_dset"
+
+#define OBJ_REF_DATASET_READ_TEST_SUBGROUP_NAME "obj_ref_read_test"
+#define OBJ_REF_DATASET_READ_TEST_REF_DSET_NAME "ref_dset"
+#define OBJ_REF_DATASET_READ_TEST_REF_TYPE_NAME "ref_dtype"
+#define OBJ_REF_DATASET_READ_TEST_SPACE_RANK 1
+#define OBJ_REF_DATASET_READ_TEST_DSET_NAME "obj_ref_dset"
+
+#define OBJ_REF_DATASET_EMPTY_WRITE_TEST_SUBGROUP_NAME "obj_ref_empty_write_test"
+#define OBJ_REF_DATASET_EMPTY_WRITE_TEST_SPACE_RANK 1
+#define OBJ_REF_DATASET_EMPTY_WRITE_TEST_DSET_NAME "obj_ref_dset"
+
+#define OBJECT_REF_COUNT_TEST_SUBGROUP_NAME "ref_count_test"
+#define OBJECT_REF_COUNT_TEST_GRP_NAME "ref_count_test_group"
+#define OBJECT_REF_COUNT_TEST_DSET_NAME "ref_count_dset"
+#define OBJECT_REF_COUNT_TEST_TYPE_NAME "ref_count_dtype"
+#define OBJECT_REF_COUNT_TEST_DSET_SPACE_RANK 2
+
+#endif
diff --git a/test/API/H5_api_test.c b/test/API/H5_api_test.c
new file mode 100644
index 0000000..6d61b75
--- /dev/null
+++ b/test/API/H5_api_test.c
@@ -0,0 +1,227 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * A test suite which only makes public HDF5 API calls and which is meant
+ * to test the native VOL connector or a specified HDF5 VOL connector (or
+ * set of connectors stacked with each other). This test suite must assume
+ * that a VOL connector could only implement the File interface. Therefore,
+ * the suite should check that a particular piece of functionality is supported
+ * by the VOL connector before actually testing it. If the functionality is
+ * not supported, the test should simply be skipped, perhaps with a note as
+ * to why the test was skipped, if possible.
+ *
+ * If the VOL connector being used supports the creation of groups, this
+ * test suite will attempt to organize the output of these various tests
+ * into groups based on their respective HDF5 interface.
+ */
+
+#include "H5_api_test.h"
+
+#include "H5_api_attribute_test.h"
+#include "H5_api_dataset_test.h"
+#include "H5_api_datatype_test.h"
+#include "H5_api_file_test.h"
+#include "H5_api_group_test.h"
+#include "H5_api_link_test.h"
+#include "H5_api_misc_test.h"
+#include "H5_api_object_test.h"
+#include "H5_api_test_util.h"
+#ifdef H5_API_TEST_HAVE_ASYNC
+#include "H5_api_async_test.h"
+#endif
+
+char H5_api_test_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+
+const char *test_path_prefix;
+
+/* X-macro to define the following for each test:
+ * - enum type
+ * - name
+ * - test function
+ * - enabled by default
+ */
+#ifdef H5_API_TEST_HAVE_ASYNC
+#define H5_API_TESTS \
+ X(H5_API_TEST_NULL, "", NULL, 0) \
+ X(H5_API_TEST_FILE, "file", H5_api_file_test, 1) \
+ X(H5_API_TEST_GROUP, "group", H5_api_group_test, 1) \
+ X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test, 1) \
+ X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test, 1) \
+ X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test, 1) \
+ X(H5_API_TEST_LINK, "link", H5_api_link_test, 1) \
+ X(H5_API_TEST_OBJECT, "object", H5_api_object_test, 1) \
+ X(H5_API_TEST_MISC, "misc", H5_api_misc_test, 1) \
+ X(H5_API_TEST_ASYNC, "async", H5_api_async_test, 1) \
+ X(H5_API_TEST_MAX, "", NULL, 0)
+#else
+#define H5_API_TESTS \
+ X(H5_API_TEST_NULL, "", NULL, 0) \
+ X(H5_API_TEST_FILE, "file", H5_api_file_test, 1) \
+ X(H5_API_TEST_GROUP, "group", H5_api_group_test, 1) \
+ X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test, 1) \
+ X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test, 1) \
+ X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test, 1) \
+ X(H5_API_TEST_LINK, "link", H5_api_link_test, 1) \
+ X(H5_API_TEST_OBJECT, "object", H5_api_object_test, 1) \
+ X(H5_API_TEST_MISC, "misc", H5_api_misc_test, 1) \
+ X(H5_API_TEST_MAX, "", NULL, 0)
+#endif
+
+#define X(a, b, c, d) a,
+enum H5_api_test_type { H5_API_TESTS };
+#undef X
+#define X(a, b, c, d) b,
+static const char *const H5_api_test_name[] = {H5_API_TESTS};
+#undef X
+#define X(a, b, c, d) c,
+static int (*H5_api_test_func[])(void) = {H5_API_TESTS};
+#undef X
+#define X(a, b, c, d) d,
+static int H5_api_test_enabled[] = {H5_API_TESTS};
+#undef X
+
+static enum H5_api_test_type
+H5_api_test_name_to_type(const char *test_name)
+{
+ enum H5_api_test_type i = 0;
+
+ while (strcmp(H5_api_test_name[i], test_name) && i != H5_API_TEST_MAX)
+ i++;
+
+ return ((i == H5_API_TEST_MAX) ? H5_API_TEST_NULL : i);
+}
+
+static void
+H5_api_test_run(void)
+{
+ enum H5_api_test_type i;
+
+ for (i = H5_API_TEST_FILE; i < H5_API_TEST_MAX; i++)
+ if (H5_api_test_enabled[i])
+ (void)H5_api_test_func[i]();
+}
+
+/******************************************************************************/
+
+int
+main(int argc, char **argv)
+{
+ const char *vol_connector_name;
+ unsigned seed;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hbool_t err_occurred = FALSE;
+
+ /* Simple argument checking, TODO can improve that later */
+ if (argc > 1) {
+ enum H5_api_test_type i = H5_api_test_name_to_type(argv[1]);
+ if (i != H5_API_TEST_NULL) {
+ /* Run only specific API test */
+ memset(H5_api_test_enabled, 0, sizeof(H5_api_test_enabled));
+ H5_api_test_enabled[i] = 1;
+ }
+ }
+
+#ifdef H5_HAVE_PARALLEL
+ /* If HDF5 was built with parallel enabled, go ahead and call MPI_Init before
+ * running these tests. Even though these are meant to be serial tests, they will
+ * likely be run using mpirun (or similar) and we cannot necessarily expect HDF5 or
+ * an HDF5 VOL connector to call MPI_Init.
+ */
+ MPI_Init(&argc, &argv);
+#endif
+
+ /* h5_reset(); */
+
+ n_tests_run_g = 0;
+ n_tests_passed_g = 0;
+ n_tests_failed_g = 0;
+ n_tests_skipped_g = 0;
+
+ seed = (unsigned)HDtime(NULL);
+ srand(seed);
+
+ if (NULL == (test_path_prefix = HDgetenv(HDF5_API_TEST_PATH_PREFIX)))
+ test_path_prefix = "";
+
+ HDsnprintf(H5_api_test_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", test_path_prefix,
+ TEST_FILE_NAME);
+
+ if (NULL == (vol_connector_name = HDgetenv(HDF5_VOL_CONNECTOR))) {
+ HDprintf("No VOL connector selected; using native VOL connector\n");
+ vol_connector_name = "native";
+ }
+
+ HDprintf("Running API tests with VOL connector '%s'\n\n", vol_connector_name);
+ HDprintf("Test parameters:\n");
+ HDprintf(" - Test file name: '%s'\n", H5_api_test_filename);
+ HDprintf(" - Test seed: %u\n", seed);
+ HDprintf("\n\n");
+
+ /* Retrieve the VOL cap flags - work around an HDF5
+ * library issue by creating a FAPL
+ */
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
+ HDfprintf(stderr, "Unable to create FAPL\n");
+ err_occurred = TRUE;
+ goto done;
+ }
+
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) {
+ HDfprintf(stderr, "Unable to retrieve VOL connector capability flags\n");
+ err_occurred = TRUE;
+ goto done;
+ }
+
+ /*
+ * Create the file that will be used for all of the tests,
+ * except for those which test file creation.
+ */
+ if (create_test_container(H5_api_test_filename, vol_cap_flags_g) < 0) {
+ HDfprintf(stderr, "Unable to create testing container file '%s'\n", H5_api_test_filename);
+ err_occurred = TRUE;
+ goto done;
+ }
+
+ /* Run all the tests that are enabled */
+ H5_api_test_run();
+
+ HDprintf("Cleaning up testing files\n");
+ H5Fdelete(H5_api_test_filename, fapl_id);
+
+ if (n_tests_run_g > 0) {
+ HDprintf("%zu/%zu (%.2f%%) API tests passed with VOL connector '%s'\n", n_tests_passed_g,
+ n_tests_run_g, ((double)n_tests_passed_g / (double)n_tests_run_g * 100.0),
+ vol_connector_name);
+ HDprintf("%zu/%zu (%.2f%%) API tests did not pass with VOL connector '%s'\n", n_tests_failed_g,
+ n_tests_run_g, ((double)n_tests_failed_g / (double)n_tests_run_g * 100.0),
+ vol_connector_name);
+ HDprintf("%zu/%zu (%.2f%%) API tests were skipped with VOL connector '%s'\n", n_tests_skipped_g,
+ n_tests_run_g, ((double)n_tests_skipped_g / (double)n_tests_run_g * 100.0),
+ vol_connector_name);
+ }
+
+done:
+ if (fapl_id >= 0 && H5Pclose(fapl_id) < 0) {
+ HDfprintf(stderr, "Unable to close FAPL\n");
+ err_occurred = TRUE;
+ }
+
+ H5close();
+
+#ifdef H5_HAVE_PARALLEL
+ MPI_Finalize();
+#endif
+
+ HDexit(((err_occurred || n_tests_failed_g > 0) ? EXIT_FAILURE : EXIT_SUCCESS));
+}
diff --git a/test/API/H5_api_test.h b/test/API/H5_api_test.h
new file mode 100644
index 0000000..296d296
--- /dev/null
+++ b/test/API/H5_api_test.h
@@ -0,0 +1,73 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_TEST_H
+#define H5_API_TEST_H
+
+#include <hdf5.h>
+#include <H5private.h>
+
+#include "h5test.h"
+
+#include "H5_api_test_config.h"
+#include "H5_api_test_util.h"
+#include "H5_api_tests_disabled.h"
+
+/* Define H5VL_VERSION if not already defined */
+#ifndef H5VL_VERSION
+#define H5VL_VERSION 0
+#endif
+
+/* Define macro to wait forever depending on version */
+#if H5VL_VERSION >= 2
+#define H5_API_TEST_WAIT_FOREVER H5ES_WAIT_FOREVER
+#else
+#define H5_API_TEST_WAIT_FOREVER UINT64_MAX
+#endif
+
+/******************************************************************************/
+
+/* The name of the file that all of the tests will operate on */
+#define TEST_FILE_NAME "H5_api_test.h5"
+extern char H5_api_test_filename[];
+
+extern const char *test_path_prefix;
+
+/*
+ * Environment variable specifying a prefix string to add to
+ * filenames generated by the API tests
+ */
+#define HDF5_API_TEST_PATH_PREFIX "HDF5_API_TEST_PATH_PREFIX"
+
+/* The names of a set of container groups which hold objects
+ * created by each of the different types of tests.
+ */
+#define GROUP_TEST_GROUP_NAME "group_tests"
+#define ATTRIBUTE_TEST_GROUP_NAME "attribute_tests"
+#define DATASET_TEST_GROUP_NAME "dataset_tests"
+#define DATATYPE_TEST_GROUP_NAME "datatype_tests"
+#define LINK_TEST_GROUP_NAME "link_tests"
+#define OBJECT_TEST_GROUP_NAME "object_tests"
+#define MISCELLANEOUS_TEST_GROUP_NAME "miscellaneous_tests"
+
+#define ARRAY_LENGTH(array) sizeof(array) / sizeof(array[0])
+
+#define UNUSED(o) (void)(o);
+
+#define H5_API_TEST_FILENAME_MAX_LENGTH 1024
+
+/* The maximum size of a dimension in an HDF5 dataspace as allowed
+ * for this testing suite so as not to try to create too large
+ * of a dataspace/datatype. */
+#define MAX_DIM_SIZE 16
+
+#endif
diff --git a/test/API/H5_api_test_config.h.in b/test/API/H5_api_test_config.h.in
new file mode 100644
index 0000000..c1833fa
--- /dev/null
+++ b/test/API/H5_api_test_config.h.in
@@ -0,0 +1,66 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_TEST_CONFIG_H
+#define H5_API_TEST_CONFIG_H
+
+#include "hdf5.h"
+
+#cmakedefine H5_API_TEST_HAVE_ASYNC
+
+#ifdef H5_HAVE_PARALLEL
+#cmakedefine MPIEXEC_EXECUTABLE "@MPIEXEC_EXECUTABLE@"
+#cmakedefine MPIEXEC "@MPIEXEC@" /* For compatibility */
+#ifndef MPIEXEC_EXECUTABLE
+# define MPIEXEC_EXECUTABLE MPIEXEC
+#endif
+#cmakedefine MPIEXEC_NUMPROC_FLAG "@MPIEXEC_NUMPROC_FLAG@"
+#cmakedefine MPIEXEC_PREFLAGS "@MPIEXEC_PREFLAGS@"
+#cmakedefine MPIEXEC_POSTFLAGS "@MPIEXEC_POSTFLAGS@"
+/* Server-specific flags if different */
+#cmakedefine MPIEXEC_SERVER_PREFLAGS "@MPIEXEC_SERVER_PREFLAGS@"
+#cmakedefine MPIEXEC_SERVER_POSTFLAGS "@MPIEXEC_SERVER_POSTFLAGS@"
+#cmakedefine MPIEXEC_MAX_NUMPROCS @MPIEXEC_MAX_NUMPROCS@
+#endif /* H5_HAVE_PARALLEL */
+
+#cmakedefine DART_TESTING_TIMEOUT @DART_TESTING_TIMEOUT@
+#ifndef DART_TESTING_TIMEOUT
+# define DART_TESTING_TIMEOUT 1500
+#endif
+
+#cmakedefine H5_API_TEST_ENV_VARS "@H5_API_TEST_ENV_VARS@"
+
+#cmakedefine H5_API_TEST_INIT_COMMAND "@H5_API_TEST_INIT_COMMAND@"
+
+#cmakedefine H5_API_TEST_SERVER_START_MSG "@H5_API_TEST_SERVER_START_MSG@"
+#ifndef H5_API_TEST_SERVER_START_MSG
+# define H5_API_TEST_SERVER_START_MSG "Waiting"
+#endif
+#cmakedefine H5_API_TEST_SERVER_EXIT_COMMAND "@H5_API_TEST_SERVER_EXIT_COMMAND@"
+
+#cmakedefine H5_API_TEST_CLIENT_HELPER_START_MSG "@H5_API_TEST_CLIENT_HELPER_START_MSG@"
+#ifndef H5_API_TEST_CLIENT_HELPER_START_MSG
+# define H5_API_TEST_CLIENT_HELPER_START_MSG "Waiting"
+#endif
+#cmakedefine H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND "@H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND@"
+
+#cmakedefine H5_API_TEST_CLIENT_INIT_TOKEN_REGEX "@H5_API_TEST_CLIENT_INIT_TOKEN_REGEX@"
+#ifndef H5_API_TEST_CLIENT_INIT_TOKEN_REGEX
+# define H5_API_TEST_CLIENT_INIT_TOKEN_REGEX "^token"
+#endif
+#cmakedefine H5_API_TEST_CLIENT_INIT_TOKEN_VAR "@H5_API_TEST_CLIENT_INIT_TOKEN_VAR@"
+#ifndef H5_API_TEST_CLIENT_INIT_TOKEN_VAR
+# define H5_API_TEST_CLIENT_INIT_TOKEN_VAR "TOKEN"
+#endif
+
+
+#endif /* H5_API_TEST_CONFIG_H */
diff --git a/test/API/H5_api_test_util.c b/test/API/H5_api_test_util.c
new file mode 100644
index 0000000..7fec2b6
--- /dev/null
+++ b/test/API/H5_api_test_util.c
@@ -0,0 +1,819 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_test.h"
+#include "H5_api_test_util.h"
+
+/*
+ * The maximum allowable size of a generated datatype.
+ *
+ * NOTE: HDF5 currently has limits on the maximum size of
+ * a datatype of an object, as this information is stored
+ * in the object header. In order to provide maximum
+ * compatibility between the native VOL connector and others
+ * for this test suite, we limit the size of a datatype here.
+ * This value should be adjusted as future HDF5 development
+ * allows.
+ */
+#define GENERATED_DATATYPE_MAX_SIZE 65536
+
+/*
+ * The maximum size of a datatype for compact objects that
+ * must fit within the size of a native HDF5 object header message.
+ * This is typically used for attributes and compact datasets.
+ */
+#define COMPACT_DATATYPE_MAX_SIZE 1024
+
+/* The maximum level of recursion that the generate_random_datatype()
+ * function should go down to, before being forced to choose a base type
+ * in order to not cause a stack overflow.
+ */
+#define TYPE_GEN_RECURSION_MAX_DEPTH 3
+
+/* The maximum number of members allowed in an HDF5 compound type, as
+ * generated by the generate_random_datatype() function, for ease of
+ * development.
+ */
+#define COMPOUND_TYPE_MAX_MEMBERS 4
+
+/* The maximum number and size of the dimensions of an HDF5 array
+ * datatype, as generated by the generate_random_datatype() function.
+ */
+#define ARRAY_TYPE_MAX_DIMS 4
+
+/* The maximum number of members and the maximum size of those
+ * members' names for an HDF5 enum type, as generated by the
+ * generate_random_datatype() function.
+ */
+#define ENUM_TYPE_MAX_MEMBER_NAME_LENGTH 256
+#define ENUM_TYPE_MAX_MEMBERS 16
+
+/* The maximum size of an HDF5 string datatype, as created by the
+ * generate_random_datatype() function.
+ */
+#define STRING_TYPE_MAX_SIZE 1024
+
+/*
+ * The maximum dimensionality and dimension size of a dataspace
+ * generated for an attribute or compact dataset.
+ */
+#define COMPACT_SPACE_MAX_DIM_SIZE 4
+#define COMPACT_SPACE_MAX_DIMS 3
+
+/*
+ * Helper function to generate a random HDF5 datatype in order to thoroughly
+ * test support for datatypes. The parent_class parameter is to support
+ * recursive generation of datatypes. In most cases, this function should be
+ * called with H5T_NO_CLASS for the parent_class parameter.
+ */
+/*
+ * XXX: limit size of datatype generated
+ */
+hid_t
+generate_random_datatype(H5T_class_t parent_class, hbool_t is_compact)
+{
+ static int depth = 0;
+ hsize_t *array_dims = NULL;
+ size_t i;
+ hid_t compound_members[COMPOUND_TYPE_MAX_MEMBERS];
+ hid_t datatype = H5I_INVALID_HID;
+
+ depth++;
+
+ for (i = 0; i < COMPOUND_TYPE_MAX_MEMBERS; i++)
+ compound_members[i] = H5I_INVALID_HID;
+
+ switch (rand() % H5T_NCLASSES) {
+case_integer:
+ case H5T_INTEGER: {
+ switch (rand() % 16) {
+ case 0:
+ if ((datatype = H5Tcopy(H5T_STD_I8BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 1:
+ if ((datatype = H5Tcopy(H5T_STD_I8LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 2:
+ if ((datatype = H5Tcopy(H5T_STD_I16BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 3:
+ if ((datatype = H5Tcopy(H5T_STD_I16LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 4:
+ if ((datatype = H5Tcopy(H5T_STD_I32BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 5:
+ if ((datatype = H5Tcopy(H5T_STD_I32LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 6:
+ if ((datatype = H5Tcopy(H5T_STD_I64BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 7:
+ if ((datatype = H5Tcopy(H5T_STD_I64LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 8:
+ if ((datatype = H5Tcopy(H5T_STD_U8BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 9:
+ if ((datatype = H5Tcopy(H5T_STD_U8LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 10:
+ if ((datatype = H5Tcopy(H5T_STD_U16BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 11:
+ if ((datatype = H5Tcopy(H5T_STD_U16LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 12:
+ if ((datatype = H5Tcopy(H5T_STD_U32BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 13:
+ if ((datatype = H5Tcopy(H5T_STD_U32LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 14:
+ if ((datatype = H5Tcopy(H5T_STD_U64BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ case 15:
+ if ((datatype = H5Tcopy(H5T_STD_U64LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined integer type\n");
+ goto done;
+ }
+
+ break;
+
+ default:
+ H5_FAILED();
+ HDprintf(" invalid value for predefined integer type; should not happen\n");
+ goto done;
+ }
+
+ break;
+ }
+
+case_float:
+ case H5T_FLOAT: {
+ switch (rand() % 4) {
+ case 0:
+ if ((datatype = H5Tcopy(H5T_IEEE_F32BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined floating-point type\n");
+ goto done;
+ }
+
+ break;
+
+ case 1:
+ if ((datatype = H5Tcopy(H5T_IEEE_F32LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined floating-point type\n");
+ goto done;
+ }
+
+ break;
+
+ case 2:
+ if ((datatype = H5Tcopy(H5T_IEEE_F64BE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined floating-point type\n");
+ goto done;
+ }
+
+ break;
+
+ case 3:
+ if ((datatype = H5Tcopy(H5T_IEEE_F64LE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy predefined floating-point type\n");
+ goto done;
+ }
+
+ break;
+
+ default:
+ H5_FAILED();
+ HDprintf(" invalid value for floating point type; should not happen\n");
+ goto done;
+ }
+
+ break;
+ }
+
+case_time:
+ case H5T_TIME: {
+ /* Time datatype is unsupported, try again */
+ goto reroll;
+ break;
+ }
+
+case_string:
+ case H5T_STRING: {
+ /* Note: currently only H5T_CSET_ASCII is supported for the character set and
+ * only H5T_STR_NULLTERM is supported for string padding for variable-length
+ * strings and only H5T_STR_NULLPAD is supported for string padding for
+ * fixed-length strings, but these may change in the future.
+ */
+ if (0 == (rand() % 2)) {
+ if ((datatype = H5Tcreate(H5T_STRING, (size_t)(rand() % STRING_TYPE_MAX_SIZE) + 1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create fixed-length string datatype\n");
+ goto done;
+ }
+
+ if (H5Tset_strpad(datatype, H5T_STR_NULLPAD) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set H5T_STR_NULLPAD for fixed-length string type\n");
+ goto done;
+ }
+ }
+ else {
+ /*
+ * Currently, all VL datatypes are disabled.
+ */
+ goto reroll;
+
+#if 0
+ if ((datatype = H5Tcreate(H5T_STRING, H5T_VARIABLE)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create variable-length string datatype\n");
+ goto done;
+ }
+
+ if (H5Tset_strpad(datatype, H5T_STR_NULLTERM) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set H5T_STR_NULLTERM for variable-length string type\n");
+ goto done;
+ }
+#endif
+ }
+
+ if (H5Tset_cset(datatype, H5T_CSET_ASCII) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set string datatype character set\n");
+ goto done;
+ }
+
+ break;
+ }
+
+case_bitfield:
+ case H5T_BITFIELD: {
+ /* Bitfield datatype is unsupported, try again */
+ goto reroll;
+ break;
+ }
+
+case_opaque:
+ case H5T_OPAQUE: {
+ /* Opaque datatype is unsupported, try again */
+ goto reroll;
+ break;
+ }
+
+case_compound:
+ case H5T_COMPOUND: {
+ size_t num_members;
+ size_t next_offset = 0;
+ size_t compound_size = 0;
+
+ /* Currently only allows arrays of integer, float or string. Pick another type if we
+ * are creating an array of something other than these. Also don't allow recursion
+ * to go too deep. Pick another type that doesn't recursively call this function. */
+ if (H5T_ARRAY == parent_class || depth > TYPE_GEN_RECURSION_MAX_DEPTH)
+ goto reroll;
+
+ if ((datatype = H5Tcreate(H5T_COMPOUND, 1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create compound datatype\n");
+ goto done;
+ }
+
+ num_members = (size_t)(rand() % COMPOUND_TYPE_MAX_MEMBERS + 1);
+
+ for (i = 0; i < num_members; i++) {
+ size_t member_size;
+ char member_name[256];
+
+ HDsnprintf(member_name, 256, "compound_member%zu", i);
+
+ if ((compound_members[i] = generate_random_datatype(H5T_NO_CLASS, is_compact)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create compound datatype member %zu\n", i);
+ goto done;
+ }
+
+ if (!(member_size = H5Tget_size(compound_members[i]))) {
+ H5_FAILED();
+ HDprintf(" couldn't get compound member %zu size\n", i);
+ goto done;
+ }
+
+ compound_size += member_size;
+
+ if (H5Tset_size(datatype, compound_size) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set size for compound datatype\n");
+ goto done;
+ }
+
+ if (H5Tinsert(datatype, member_name, next_offset, compound_members[i]) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't insert compound datatype member %zu\n", i);
+ goto done;
+ }
+
+ next_offset += member_size;
+ }
+
+ break;
+ }
+
+case_reference:
+ case H5T_REFERENCE: {
+ /* Temporarily disable generation of reference datatypes */
+ goto reroll;
+
+ /* Currently only allows arrays of integer, float or string. Pick another type if we
+ * are creating an array of something other than these. */
+ if (H5T_ARRAY == parent_class)
+ goto reroll;
+
+ if (0 == (rand() % 2)) {
+ if ((datatype = H5Tcopy(H5T_STD_REF_OBJ)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy object reference datatype\n");
+ goto done;
+ }
+ }
+ else {
+ /* Region references are currently unsupported */
+ goto reroll;
+
+ if ((datatype = H5Tcopy(H5T_STD_REF_DSETREG)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't copy region reference datatype\n");
+ goto done;
+ }
+ }
+
+ break;
+ }
+
+case_enum:
+ case H5T_ENUM: {
+ /* Currently doesn't currently support ARRAY of ENUM, so try another type
+ * if this happens. */
+ if (H5T_ARRAY == parent_class)
+ goto reroll;
+
+ if ((datatype = H5Tenum_create(H5T_NATIVE_INT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create enum datatype\n");
+ goto done;
+ }
+
+ for (i = 0; i < (size_t)(rand() % ENUM_TYPE_MAX_MEMBERS + 1); i++) {
+ char name[ENUM_TYPE_MAX_MEMBER_NAME_LENGTH];
+ int value = rand();
+
+ HDsnprintf(name, ENUM_TYPE_MAX_MEMBER_NAME_LENGTH, "enum_val%zu", i);
+
+ if (H5Tenum_insert(datatype, name, &value) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't insert member into enum datatype\n");
+ goto done;
+ }
+ }
+
+ break;
+ }
+
+case_vlen:
+ case H5T_VLEN: {
+ /* Variable-length datatypes are unsupported, try again */
+ goto reroll;
+ break;
+ }
+
+case_array:
+ case H5T_ARRAY: {
+ unsigned ndims;
+ hid_t base_datatype = H5I_INVALID_HID;
+
+ /* Currently doesn't currently support ARRAY of ARRAY, so try another type
+ * if this happens. Also check for too much recursion. */
+ if (H5T_ARRAY == parent_class || depth > TYPE_GEN_RECURSION_MAX_DEPTH)
+ goto reroll;
+
+ ndims = (unsigned)(rand() % ARRAY_TYPE_MAX_DIMS + 1);
+
+ if (NULL == (array_dims = (hsize_t *)HDmalloc(ndims * sizeof(*array_dims))))
+ goto done;
+
+ for (i = 0; i < ndims; i++)
+ array_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+
+ if ((base_datatype = generate_random_datatype(H5T_ARRAY, is_compact)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create array base datatype\n");
+ goto done;
+ }
+
+ if ((datatype = H5Tarray_create2(base_datatype, ndims, array_dims)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create array datatype\n");
+ goto done;
+ }
+
+ break;
+ }
+
+ default:
+ H5_FAILED();
+ HDprintf(" invalid datatype class\n");
+ break;
+ } /* end if */
+
+done:
+ if (depth > 0)
+ depth--;
+
+ if (datatype < 0) {
+ for (i = 0; i < COMPOUND_TYPE_MAX_MEMBERS; i++) {
+ if (compound_members[i] > 0 && H5Tclose(compound_members[i]) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't close compound member %zu\n", i);
+ }
+ }
+ }
+
+ if (array_dims) {
+ HDfree(array_dims);
+ array_dims = NULL;
+ }
+
+ if (is_compact && (depth == 0)) {
+ size_t type_size;
+
+ /*
+ * Check to make sure that the generated datatype does
+ * not exceed the maximum compact datatype size if a
+ * compact datatype was requested.
+ */
+ if (0 == (type_size = H5Tget_size(datatype))) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve datatype's size\n");
+ H5Tclose(datatype);
+ datatype = H5I_INVALID_HID;
+ }
+ else {
+ if (type_size > COMPACT_DATATYPE_MAX_SIZE) {
+ /*
+ * Generate a new datatype.
+ */
+ H5Tclose(datatype);
+ datatype = H5I_INVALID_HID;
+ goto reroll;
+ }
+ }
+ }
+
+ return datatype;
+
+reroll:
+ if (depth > 0)
+ depth--;
+
+ /*
+ * The datatype generation resulted in a datatype that is currently invalid
+ * for these tests, try again.
+ */
+ switch (rand() % H5T_NCLASSES) {
+ case H5T_INTEGER:
+ goto case_integer;
+ case H5T_FLOAT:
+ goto case_float;
+ case H5T_TIME:
+ goto case_time;
+ case H5T_STRING:
+ goto case_string;
+ case H5T_BITFIELD:
+ goto case_bitfield;
+ case H5T_OPAQUE:
+ goto case_opaque;
+ case H5T_COMPOUND:
+ goto case_compound;
+ case H5T_REFERENCE:
+ goto case_reference;
+ case H5T_ENUM:
+ goto case_enum;
+ case H5T_VLEN:
+ goto case_vlen;
+ case H5T_ARRAY:
+ goto case_array;
+ default:
+ H5_FAILED();
+ HDprintf(" invalid value for goto\n");
+ break;
+ }
+
+ return H5I_INVALID_HID;
+}
+
+/*
+ * Helper function to generate a random HDF5 dataspace in order to thoroughly
+ * test support for dataspaces.
+ */
+hid_t
+generate_random_dataspace(int rank, const hsize_t *max_dims, hsize_t *dims_out, hbool_t is_compact)
+{
+ hsize_t dataspace_dims[H5S_MAX_RANK];
+ size_t i;
+ hid_t dataspace_id = H5I_INVALID_HID;
+
+ if (rank < 0)
+ TEST_ERROR;
+ if (is_compact && (rank > COMPACT_SPACE_MAX_DIMS)) {
+ HDprintf(" current rank of compact dataspace (%lld) exceeds maximum dimensionality (%lld)\n",
+ (long long)rank, (long long)COMPACT_SPACE_MAX_DIMS);
+ TEST_ERROR;
+ }
+
+ /*
+ * XXX: if max_dims is specified, make sure that the dimensions generated
+ * are not larger than this.
+ */
+ for (i = 0; i < (size_t)rank; i++) {
+ if (is_compact)
+ dataspace_dims[i] = (hsize_t)(rand() % COMPACT_SPACE_MAX_DIM_SIZE + 1);
+ else
+ dataspace_dims[i] = (hsize_t)(rand() % MAX_DIM_SIZE + 1);
+
+ if (dims_out)
+ dims_out[i] = dataspace_dims[i];
+ }
+
+ if ((dataspace_id = H5Screate_simple(rank, dataspace_dims, max_dims)) < 0)
+ TEST_ERROR;
+
+ return dataspace_id;
+
+error:
+ return H5I_INVALID_HID;
+}
+
+int
+create_test_container(char *filename, uint64_t vol_cap_flags)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+
+ if (!(vol_cap_flags & H5VL_CAP_FLAG_FILE_BASIC)) {
+ HDprintf(" VOL connector doesn't support file creation\n");
+ goto error;
+ }
+
+ if ((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ HDprintf(" couldn't create testing container file '%s'\n", filename);
+ goto error;
+ }
+
+ if (vol_cap_flags & H5VL_CAP_FLAG_GROUP_BASIC) {
+ /* Create container groups for each of the test interfaces
+ * (group, attribute, dataset, etc.).
+ */
+ if ((group_id = H5Gcreate2(file_id, GROUP_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >=
+ 0) {
+ HDprintf(" created container group for Group tests\n");
+ H5Gclose(group_id);
+ }
+
+ if ((group_id = H5Gcreate2(file_id, ATTRIBUTE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) >= 0) {
+ HDprintf(" created container group for Attribute tests\n");
+ H5Gclose(group_id);
+ }
+
+ if ((group_id =
+ H5Gcreate2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
+ HDprintf(" created container group for Dataset tests\n");
+ H5Gclose(group_id);
+ }
+
+ if ((group_id =
+ H5Gcreate2(file_id, DATATYPE_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
+ HDprintf(" created container group for Datatype tests\n");
+ H5Gclose(group_id);
+ }
+
+ if ((group_id = H5Gcreate2(file_id, LINK_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >=
+ 0) {
+ HDprintf(" created container group for Link tests\n");
+ H5Gclose(group_id);
+ }
+
+ if ((group_id = H5Gcreate2(file_id, OBJECT_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >=
+ 0) {
+ HDprintf(" created container group for Object tests\n");
+ H5Gclose(group_id);
+ }
+
+ if ((group_id = H5Gcreate2(file_id, MISCELLANEOUS_TEST_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) >= 0) {
+ HDprintf(" created container group for Miscellaneous tests\n");
+ H5Gclose(group_id);
+ }
+ }
+
+ if (H5Fclose(file_id) < 0) {
+ HDprintf(" failed to close testing container\n");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return -1;
+}
+
+/*
+ * Add a prefix to the given filename. The caller
+ * is responsible for freeing the returned filename
+ * pointer with HDfree().
+ */
+herr_t
+prefix_filename(const char *prefix, const char *filename, char **filename_out)
+{
+ char *out_buf = NULL;
+ herr_t ret_value = SUCCEED;
+
+ if (!prefix) {
+ HDprintf(" invalid file prefix\n");
+ ret_value = FAIL;
+ goto done;
+ }
+ if (!filename || (*filename == '\0')) {
+ HDprintf(" invalid filename\n");
+ ret_value = FAIL;
+ goto done;
+ }
+ if (!filename_out) {
+ HDprintf(" invalid filename_out buffer\n");
+ ret_value = FAIL;
+ goto done;
+ }
+
+ if (NULL == (out_buf = HDmalloc(H5_API_TEST_FILENAME_MAX_LENGTH))) {
+ HDprintf(" couldn't allocated filename buffer\n");
+ ret_value = FAIL;
+ goto done;
+ }
+
+ HDsnprintf(out_buf, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", prefix, filename);
+
+ *filename_out = out_buf;
+
+done:
+ return ret_value;
+}
+
+/*
+ * Calls H5Fdelete on the given filename. If a prefix string
+ * is given, adds that prefix string to the filename before
+ * calling H5Fdelete
+ */
+herr_t
+remove_test_file(const char *prefix, const char *filename)
+{
+ const char *test_file;
+ char *prefixed_filename = NULL;
+ herr_t ret_value = SUCCEED;
+
+ if (prefix) {
+ if (prefix_filename(prefix, filename, &prefixed_filename) < 0) {
+ HDprintf(" couldn't prefix filename\n");
+ ret_value = FAIL;
+ goto done;
+ }
+
+ test_file = prefixed_filename;
+ }
+ else
+ test_file = filename;
+
+ if (H5Fdelete(test_file, H5P_DEFAULT) < 0) {
+ HDprintf(" couldn't remove file '%s'\n", test_file);
+ ret_value = FAIL;
+ goto done;
+ }
+
+done:
+ HDfree(prefixed_filename);
+
+ return ret_value;
+}
diff --git a/test/API/H5_api_test_util.h b/test/API/H5_api_test_util.h
new file mode 100644
index 0000000..86b0e3e
--- /dev/null
+++ b/test/API/H5_api_test_util.h
@@ -0,0 +1,24 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_TEST_UTIL_H_
+#define H5_API_TEST_UTIL_H_
+
+#include "hdf5.h"
+
+hid_t generate_random_datatype(H5T_class_t parent_class, hbool_t is_compact);
+hid_t generate_random_dataspace(int rank, const hsize_t *max_dims, hsize_t *dims_out, hbool_t is_compact);
+int create_test_container(char *filename, uint64_t vol_cap_flags);
+herr_t prefix_filename(const char *prefix, const char *filename, char **filename_out);
+herr_t remove_test_file(const char *prefix, const char *filename);
+
+#endif /* H5_API_TEST_UTIL_H_ */
diff --git a/test/API/H5_api_tests_disabled.h b/test/API/H5_api_tests_disabled.h
new file mode 100644
index 0000000..672d2d9
--- /dev/null
+++ b/test/API/H5_api_tests_disabled.h
@@ -0,0 +1,46 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_TESTS_DISABLED_H
+#define H5_API_TESTS_DISABLED_H
+
+#include "H5_api_test_config.h"
+
+/* Contains #defines to temporarily disable API tests based
+ * on problematic or unsupported functionality */
+
+#define NO_LARGE_TESTS
+#define NO_ATTR_FILL_VALUE_SUPPORT
+#define NO_DECREASING_ALPHA_ITER_ORDER
+#define NO_USER_DEFINED_LINKS
+#define NO_EXTERNAL_LINKS
+#define NO_ITERATION_RESTART
+#define NO_FILE_MOUNTS
+#define NO_CLEAR_ON_SHRINK
+#define NO_DOUBLE_OBJECT_OPENS
+#define NO_OBJECT_GET_NAME
+#define WRONG_DATATYPE_OBJ_COUNT
+#define NO_SHARED_DATATYPES
+#define NO_INVALID_PROPERTY_LIST_TESTS
+#define NO_MAX_LINK_CRT_ORDER_RESET
+#define NO_PREVENT_HARD_LINKS_ACROSS_FILES
+#define NO_SOFT_LINK_MANY_DANGLING
+#define NO_ID_PREVENTS_OBJ_DELETE
+#define NO_WRITE_SAME_ELEMENT_TWICE
+#define NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+#define NO_DELETE_NONEXISTENT_ATTRIBUTE
+#define NO_TRUNCATE_OPEN_FILE
+#define NO_CHECK_SELECTION_BOUNDS
+#define NO_VALIDATE_DATASPACE
+#define NO_REFERENCE_TO_DELETED
+
+#endif /* H5_API_TESTS_DISABLED_H */
diff --git a/test/API/README.md b/test/API/README.md
new file mode 100644
index 0000000..d57472d
--- /dev/null
+++ b/test/API/README.md
@@ -0,0 +1,86 @@
+# HDF5 API Tests
+
+This directory contains several test applications that exercise [HDF5](https://github.com/HDFGroup/hdf5)'s
+public API and serve as regression tests for HDF5 [VOL Connectors](https://portal.hdfgroup.org/display/HDF5/Virtual+Object+Layer).
+
+## Build Process and options
+
+These HDF5 API tests are enabled and built by default, but can be disabled if desired.
+The following build options are available to influence how the API tests get built:
+
+### CMake
+
+To set an option, it should be prepended with `-D` when passed to the `cmake` command.
+For example,
+
+ cmake -DHDF5_TEST_API=OFF ..
+
+`HDF5_TEST_API` (Default: `ON`) - Determines whether the API tests will be built.
+
+`HDF5_TEST_API_INSTALL` (Default: `ON`) - Determines whether the API tests should be installed
+on the system.
+
+`HDF5_TEST_API_ENABLE_ASYNC` (Default: `OFF`) - Determines whether tests for HDF5's asynchronous
+I/O capabilities should be enabled. Note that the "native" HDF5 VOL connector doesn't support
+this functionality, so these tests are directed towards VOL connectors that do.
+
+`HDF5_TEST_ENABLE_DRIVER` (Default: `OFF`) - Determines whether the API test driver program should
+be built. This driver program is useful when a VOL connector relies upon a server executable
+(as well as possible additional executables) in order to function. The driver program can be
+supplied with a server executable and
+
+`HDF5_TEST_API_SERVER` (Default: empty string) - If `HDF5_TEST_ENABLE_DRIVER` is set to `ON`, this
+option should be edited to point to the server executable that the driver program should attempt
+to launch before running the API tests.
+
+### Autotools
+
+Currently unsupported
+
+### Usage
+
+These API tests currently only support usage with HDF5 VOL connectors that can be loaded dynamically
+as a plugin. For information on how to build a VOL connector in this manner, refer to section 2.3 of
+the [HDF5 VOL Connector Author Guide](https://portal.hdfgroup.org/display/HDF5/HDF5+VOL+Connector+Authors+Guide?preview=/53610813/59903039/vol_connector_author_guide.pdf).
+
+TODO: section on building VOL connectors alongside HDF5 for use with tests
+
+These API tests can also be used to test an HDF5 VOL connector that is external to the library.
+For convenience, the `HDF5_TEST_API_INSTALL` option can be used to install these tests on the
+system where other HDF5 executables (such as `h5dump`) are installed.
+
+To run these tests with your VOL connector, set the following two environment variables:
+
+`HDF5_VOL_CONNECTOR` - This environment variable should be set to the name chosen for the VOL connector
+to be used. For example, HDF5's DAOS VOL connector uses the name "[daos](https://github.com/HDFGroup/vol-daos/blob/v1.2.0/src/daos_vol.h#L30)" and would therefore set:
+
+ HDF5_VOL_CONNECTOR=daos
+
+`HDF5_PLUGIN_PATH` - This environment variable should be set to the directory that contains the built
+library for the VOL connector to be used.
+
+Once these are set, the HDF5 API tests will attempt to automatically load the specified VOL connector
+and use it when running tests. If HDF5 is unable to locate or load the VOL connector specified, it
+will fall back to running the tests with the native HDF5 VOL connector and an error similar to the
+following will appear in the test output:
+
+ HDF5-DIAG: Error detected in HDF5 (1.13.0) MPI-process 0:
+ #000: /home/user/git/hdf5/src/H5.c line 1010 in H5open(): library initialization failed
+ major: Function entry/exit
+ minor: Unable to initialize object
+ #001: /home/user/git/hdf5/src/H5.c line 277 in H5_init_library(): unable to initialize vol interface
+ major: Function entry/exit
+ minor: Unable to initialize object
+ #002: /home/user/git/hdf5/src/H5VLint.c line 199 in H5VL_init_phase2(): unable to set default VOL connector
+ major: Virtual Object Layer
+ minor: Can't set value
+ #003: /home/user/git/hdf5/src/H5VLint.c line 429 in H5VL__set_def_conn(): can't register connector
+ major: Virtual Object Layer
+ minor: Unable to register new ID
+ #004: /home/user/git/hdf5/src/H5VLint.c line 1321 in H5VL__register_connector_by_name(): unable to load VOL connector
+ major: Virtual Object Layer
+ minor: Unable to initialize object
+
+### Help and Support
+
+For help with building or using the HDF5 API tests, please contact the [HDF Help Desk](https://portal.hdfgroup.org/display/support/The+HDF+Help+Desk).
diff --git a/test/API/driver/CMakeLists.txt b/test/API/driver/CMakeLists.txt
new file mode 100644
index 0000000..2210068
--- /dev/null
+++ b/test/API/driver/CMakeLists.txt
@@ -0,0 +1,17 @@
+cmake_minimum_required(VERSION 2.8.12.2 FATAL_ERROR)
+project(H5_API_TEST_DRIVER CXX)
+
+include(CheckAsan)
+include(CheckUbsan)
+
+set(CMAKE_CXX_STANDARD 11)
+
+set(KWSYS_NAMESPACE h5_api_test_sys)
+set(KWSYS_USE_SystemTools 1)
+set(KWSYS_USE_Process 1)
+set(KWSYS_USE_RegularExpression 1)
+add_subdirectory(kwsys)
+include_directories(${CMAKE_CURRENT_BINARY_DIR}/kwsys)
+
+add_executable(h5_api_test_driver h5_api_test_driver.cxx)
+target_link_libraries(h5_api_test_driver h5_api_test_sys)
diff --git a/test/API/driver/h5_api_test_driver.cxx b/test/API/driver/h5_api_test_driver.cxx
new file mode 100644
index 0000000..b5d9821
--- /dev/null
+++ b/test/API/driver/h5_api_test_driver.cxx
@@ -0,0 +1,910 @@
+#include "h5_api_test_driver.hxx"
+
+#include "H5_api_test_config.h"
+
+#include <cstdio>
+#include <sstream>
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+
+#if !defined(_WIN32) || defined(__CYGWIN__)
+# include <unistd.h>
+# include <sys/wait.h>
+#endif
+
+#include <h5_api_test_sys/RegularExpression.hxx>
+#include <h5_api_test_sys/SystemTools.hxx>
+
+using std::vector;
+using std::string;
+using std::cerr;
+
+// The main function as this class should only be used by this program
+int
+main(int argc, char *argv[])
+{
+ H5APITestDriver d;
+ return d.Main(argc, argv);
+}
+
+//----------------------------------------------------------------------------
+H5APITestDriver::H5APITestDriver()
+{
+ this->ClientArgStart = 0;
+ this->ClientArgCount = 0;
+ this->ClientHelperArgStart = 0;
+ this->ClientHelperArgCount = 0;
+ this->ClientInitArgStart = 0;
+ this->ClientInitArgCount = 0;
+ this->ServerArgStart = 0;
+ this->ServerArgCount = 0;
+ this->AllowErrorInOutput = false;
+ // try to make sure that this times out before dart so it can kill all the processes
+ this->TimeOut = DART_TESTING_TIMEOUT - 10.0;
+ this->ServerExitTimeOut = 2; /* 2 seconds timeout for server to exit */
+ this->ClientHelper = false;
+ this->ClientInit = false;
+ this->TestServer = false;
+ this->TestSerial = false;
+ this->IgnoreServerResult = false;
+}
+
+//----------------------------------------------------------------------------
+H5APITestDriver::~H5APITestDriver()
+{
+}
+
+//----------------------------------------------------------------------------
+void
+H5APITestDriver::SeparateArguments(const char *str, vector<string> &flags)
+{
+ string arg = str;
+ string::size_type pos1 = 0;
+ string::size_type pos2 = arg.find_first_of(" ;");
+ if (pos2 == arg.npos) {
+ flags.push_back(str);
+ return;
+ }
+ while (pos2 != arg.npos) {
+ flags.push_back(arg.substr(pos1, pos2 - pos1));
+ pos1 = pos2 + 1;
+ pos2 = arg.find_first_of(" ;", pos1 + 1);
+ }
+ flags.push_back(arg.substr(pos1, pos2 - pos1));
+}
+
+//----------------------------------------------------------------------------
+void
+H5APITestDriver::CollectConfiguredOptions()
+{
+ if (this->TimeOut < 0)
+ this->TimeOut = 1500;
+
+#ifdef H5_API_TEST_ENV_VARS
+ this->SeparateArguments(H5_API_TEST_ENV_VARS, this->ClientEnvVars);
+#endif
+
+ // now find all the mpi information if mpi run is set
+#ifdef MPIEXEC_EXECUTABLE
+ this->MPIRun = MPIEXEC_EXECUTABLE;
+#else
+ return;
+#endif
+ int maxNumProc = 1;
+
+# ifdef MPIEXEC_MAX_NUMPROCS
+ if (!this->TestSerial)
+ maxNumProc = MPIEXEC_MAX_NUMPROCS;
+# endif
+# ifdef MPIEXEC_NUMPROC_FLAG
+ this->MPINumProcessFlag = MPIEXEC_NUMPROC_FLAG;
+# endif
+# ifdef MPIEXEC_PREFLAGS
+ this->SeparateArguments(MPIEXEC_PREFLAGS, this->MPIClientPreFlags);
+# endif
+# ifdef MPIEXEC_POSTFLAGS
+ this->SeparateArguments(MPIEXEC_POSTFLAGS, this->MPIClientPostFlags);
+# endif
+# ifdef MPIEXEC_SERVER_PREFLAGS
+ this->SeparateArguments(MPIEXEC_SERVER_PREFLAGS, this->MPIServerPreFlags);
+#else
+ this->MPIServerPreFlags = this->MPIClientPreFlags;
+# endif
+# ifdef MPIEXEC_SERVER_POSTFLAGS
+ this->SeparateArguments(MPIEXEC_SERVER_POSTFLAGS, this->MPIServerPostFlags);
+#else
+ this->MPIServerPostFlags = this->MPIClientPostFlags;
+# endif
+ std::stringstream ss;
+ ss << maxNumProc;
+ this->MPIServerNumProcessFlag = "1";
+ this->MPIClientNumProcessFlag = ss.str();
+}
+
+//----------------------------------------------------------------------------
+/// This adds the debug/build configuration crap for the executable on windows.
+static string
+FixExecutablePath(const string &path)
+{
+#ifdef CMAKE_INTDIR
+ string parent_dir =
+ h5_api_test_sys::SystemTools::GetFilenamePath(path.c_str());
+
+ string filename =
+ h5_api_test_sys::SystemTools::GetFilenameName(path);
+
+ if (!h5_api_test_sys::SystemTools::StringEndsWith(parent_dir.c_str(), CMAKE_INTDIR)) {
+ parent_dir += "/" CMAKE_INTDIR;
+ }
+ return parent_dir + "/" + filename;
+#endif
+
+ return path;
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::ProcessCommandLine(int argc, char *argv[])
+{
+ int *ArgCountP = NULL;
+ int i;
+ for (i = 1; i < argc; ++i) {
+ if (strcmp(argv[i], "--client") == 0) {
+ this->ClientExecutable = ::FixExecutablePath(argv[i + 1]);
+ ++i; /* Skip executable */
+ this->ClientArgStart = i + 1;
+ this->ClientArgCount = this->ClientArgStart;
+ ArgCountP = &this->ClientArgCount;
+ continue;
+ }
+ if (strcmp(argv[i], "--client-helper") == 0) {
+ std::cerr << "Client Helper" << std::endl;
+ this->ClientHelper = true;
+ this->ClientHelperExecutable = ::FixExecutablePath(argv[i + 1]);
+ ++i; /* Skip executable */
+ this->ClientHelperArgStart = i + 1;
+ this->ClientHelperArgCount = this->ClientHelperArgStart;
+ ArgCountP = &this->ClientHelperArgCount;
+ continue;
+ }
+ if (strcmp(argv[i], "--client-init") == 0) {
+ std::cerr << "Client Init" << std::endl;
+ this->ClientInit = true;
+ this->ClientInitExecutable = ::FixExecutablePath(argv[i + 1]);
+ ++i; /* Skip executable */
+ this->ClientInitArgStart = i + 1;
+ this->ClientInitArgCount = this->ClientInitArgStart;
+ ArgCountP = &this->ClientInitArgCount;
+ continue;
+ }
+ if (strcmp(argv[i], "--server") == 0) {
+ std::cerr << "Test Server" << std::endl;
+ this->TestServer = true;
+ this->ServerExecutable = ::FixExecutablePath(argv[i + 1]);
+ ++i; /* Skip executable */
+ this->ServerArgStart = i + 1;
+ this->ServerArgCount = this->ServerArgStart;
+ ArgCountP = &this->ServerArgCount;
+ continue;
+ }
+ if (strcmp(argv[i], "--timeout") == 0) {
+ this->TimeOut = atoi(argv[i + 1]);
+ std::cerr << "The timeout was set to " << this->TimeOut << std::endl;
+ ArgCountP = NULL;
+ continue;
+ }
+ if (strncmp(argv[i], "--allow-errors", strlen("--allow-errors")) == 0) {
+ this->AllowErrorInOutput = true;
+ std::cerr << "The allow errors in output flag was set to " <<
+ this->AllowErrorInOutput << std::endl;
+ ArgCountP = NULL;
+ continue;
+ }
+ if (strncmp(argv[i], "--allow-server-errors", strlen("--allow-server-errors")) == 0) {
+ this->IgnoreServerResult = true;
+ std::cerr << "The allow server errors in output flag was set to " <<
+ this->IgnoreServerResult << std::endl;
+ ArgCountP = NULL;
+ continue;
+ }
+ if (strcmp(argv[i], "--serial") == 0) {
+ this->TestSerial = true;
+ std::cerr << "This is a serial test" << std::endl;
+ ArgCountP = NULL;
+ continue;
+ }
+ if (ArgCountP)
+ (*ArgCountP)++;
+ }
+
+ return 1;
+}
+
+//----------------------------------------------------------------------------
+void
+H5APITestDriver::CreateCommandLine(vector<const char*> &commandLine,
+ const char *cmd, int isServer, int isHelper, const char *numProc, int argStart,
+ int argCount, char *argv[])
+{
+ if (!isServer && this->ClientEnvVars.size()) {
+ for (unsigned int i = 0; i < this->ClientEnvVars.size(); ++i)
+ commandLine.push_back(this->ClientEnvVars[i].c_str());
+#ifdef H5_API_TEST_CLIENT_INIT_TOKEN_VAR
+ if (this->ClientTokenVar.size())
+ commandLine.push_back(this->ClientTokenVar.c_str());
+#endif
+ }
+
+ if (!isHelper && this->MPIRun.size()) {
+ commandLine.push_back(this->MPIRun.c_str());
+ commandLine.push_back(this->MPINumProcessFlag.c_str());
+ commandLine.push_back(numProc);
+
+ if (isServer)
+ for (unsigned int i = 0; i < this->MPIServerPreFlags.size(); ++i)
+ commandLine.push_back(this->MPIServerPreFlags[i].c_str());
+ else
+ for (unsigned int i = 0; i < this->MPIClientPreFlags.size(); ++i)
+ commandLine.push_back(this->MPIClientPreFlags[i].c_str());
+ }
+
+ commandLine.push_back(cmd);
+
+ if (isServer)
+ for (unsigned int i = 0; i < this->MPIServerPostFlags.size(); ++i)
+ commandLine.push_back(MPIServerPostFlags[i].c_str());
+ else
+ for (unsigned int i = 0; i < this->MPIClientPostFlags.size(); ++i)
+ commandLine.push_back(MPIClientPostFlags[i].c_str());
+
+ // remaining flags for the test
+ for (int ii = argStart; ii < argCount; ++ii) {
+ commandLine.push_back(argv[ii]);
+ }
+
+ commandLine.push_back(0);
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::StartServer(h5_api_test_sysProcess *server, const char *name,
+ vector<char> &out, vector<char> &err)
+{
+ if (!server)
+ return 1;
+
+ cerr << "H5APITestDriver: starting process " << name << "\n";
+ h5_api_test_sysProcess_SetTimeout(server, this->TimeOut);
+ h5_api_test_sysProcess_Execute(server);
+ int foundWaiting = 0;
+ string output;
+ while (!foundWaiting) {
+ int pipe = this->WaitForAndPrintLine(name, server, output, 100.0, out,
+ err, H5_API_TEST_SERVER_START_MSG, &foundWaiting);
+ if (pipe == h5_api_test_sysProcess_Pipe_None
+ || pipe == h5_api_test_sysProcess_Pipe_Timeout) {
+ break;
+ }
+ }
+ if (foundWaiting) {
+ cerr << "H5APITestDriver: " << name << " successfully started.\n";
+ return 1;
+ } else {
+ cerr << "H5APITestDriver: " << name << " never started.\n";
+ h5_api_test_sysProcess_Kill(server);
+ return 0;
+ }
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::StartClientHelper(h5_api_test_sysProcess *client,
+ const char *name, vector<char> &out, vector<char> &err)
+{
+ if (!client)
+ return 1;
+
+ cerr << "H5APITestDriver: starting process " << name << "\n";
+ h5_api_test_sysProcess_SetTimeout(client, this->TimeOut);
+ h5_api_test_sysProcess_Execute(client);
+ int foundWaiting = 0;
+ string output;
+ while (!foundWaiting) {
+ int pipe = this->WaitForAndPrintLine(name, client, output, 100.0, out,
+ err, H5_API_TEST_CLIENT_HELPER_START_MSG, &foundWaiting);
+ if (pipe == h5_api_test_sysProcess_Pipe_None
+ || pipe == h5_api_test_sysProcess_Pipe_Timeout) {
+ break;
+ }
+ }
+ if (foundWaiting) {
+ cerr << "H5APITestDriver: " << name << " successfully started.\n";
+ return 1;
+ } else {
+ cerr << "H5APITestDriver: " << name << " never started.\n";
+ h5_api_test_sysProcess_Kill(client);
+ return 0;
+ }
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::StartClientInit(h5_api_test_sysProcess *client,
+ const char *name, vector<char> &out, vector<char> &err)
+{
+ if (!client)
+ return 1;
+
+ cerr << "H5APITestDriver: starting process " << name << "\n";
+ h5_api_test_sysProcess_SetTimeout(client, this->TimeOut);
+ h5_api_test_sysProcess_Execute(client);
+ int foundToken = 0;
+ string output, token;
+ while (!foundToken) {
+ int pipe = this->WaitForAndPrintLine(name, client, output, 100.0, out,
+ err, NULL, NULL);
+ if (pipe == h5_api_test_sysProcess_Pipe_None
+ || pipe == h5_api_test_sysProcess_Pipe_Timeout) {
+ break;
+ }
+ if (this->OutputStringHasToken(name, H5_API_TEST_CLIENT_INIT_TOKEN_REGEX, output, token)) {
+ foundToken = 1;
+ this->ClientTokenVar = std::string(H5_API_TEST_CLIENT_INIT_TOKEN_VAR)
+ + std::string("=") + std::string(token);
+ break;
+ }
+ }
+
+ if (foundToken) {
+ cerr << "H5APITestDriver: " << name << " token: " << token << " was found.\n";
+ return 1;
+ } else {
+ cerr << "H5APITestDriver: " << name << " token was not found.\n";
+ return 0;
+ }
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::StartClient(h5_api_test_sysProcess *client, const char *name)
+{
+ if (!client)
+ return 1;
+
+ cerr << "H5APITestDriver: starting process " << name << "\n";
+ h5_api_test_sysProcess_SetTimeout(client, this->TimeOut);
+ h5_api_test_sysProcess_Execute(client);
+ if (h5_api_test_sysProcess_GetState(client)
+ == h5_api_test_sysProcess_State_Executing) {
+ cerr << "H5APITestDriver: " << name << " successfully started.\n";
+ return 1;
+ } else {
+ this->ReportStatus(client, name);
+ h5_api_test_sysProcess_Kill(client);
+ return 0;
+ }
+}
+
+//----------------------------------------------------------------------------
+void
+H5APITestDriver::Stop(h5_api_test_sysProcess *p, const char *name)
+{
+ if (p) {
+ cerr << "H5APITestDriver: killing process " << name << "\n";
+ h5_api_test_sysProcess_Kill(p);
+ h5_api_test_sysProcess_WaitForExit(p, 0);
+ }
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::OutputStringHasError(const char *pname, string &output)
+{
+ const char* possibleMPIErrors[] = {"error", "Error", "Missing:",
+ "core dumped", "process in local group is dead", "Segmentation fault",
+ "erroneous", "ERROR:", "Error:",
+ "mpirun can *only* be used with MPI programs", "due to signal",
+ "failure", "abnormal termination", "failed", "FAILED", "Failed", 0};
+
+ const char* nonErrors[] = {
+ "Memcheck, a memory error detector", //valgrind
+ 0};
+
+ if (this->AllowErrorInOutput)
+ return 0;
+
+ vector<string> lines;
+ vector<string>::iterator it;
+ h5_api_test_sys::SystemTools::Split(output.c_str(), lines);
+
+ int i, j;
+
+ for (it = lines.begin(); it != lines.end(); ++it) {
+ for (i = 0; possibleMPIErrors[i]; ++i) {
+ if (it->find(possibleMPIErrors[i]) != it->npos) {
+ int found = 1;
+ for (j = 0; nonErrors[j]; ++j) {
+ if (it->find(nonErrors[j]) != it->npos) {
+ found = 0;
+ cerr << "Non error \"" << it->c_str()
+ << "\" suppressed " << std::endl;
+ }
+ }
+ if (found) {
+ cerr
+ << "H5APITestDriver: ***** Test will fail, because the string: \""
+ << possibleMPIErrors[i]
+ << "\"\nH5APITestDriver: ***** was found in the following output from the "
+ << pname << ":\n\"" << it->c_str() << "\"\n";
+ return 1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::OutputStringHasToken(const char *pname, const char *regex,
+ string &output, string &token)
+{
+ vector<string> lines;
+ vector<string>::iterator it;
+ h5_api_test_sys::SystemTools::Split(output.c_str(), lines);
+ h5_api_test_sys::RegularExpression re(regex);
+
+ for (it = lines.begin(); it != lines.end(); ++it) {
+ if (re.find(*it)) {
+ token = re.match(1);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+//----------------------------------------------------------------------------
+#define H5_API_CLEAN_PROCESSES do { \
+ h5_api_test_sysProcess_Delete(client); \
+ h5_api_test_sysProcess_Delete(client_helper); \
+ h5_api_test_sysProcess_Delete(client_init); \
+ h5_api_test_sysProcess_Delete(server); \
+} while (0)
+
+#define H5_API_EXECUTE_CMD(cmd) do { \
+ if (strlen(cmd) > 0) { \
+ std::vector<std::string> commands = \
+ h5_api_test_sys::SystemTools::SplitString(cmd, ';'); \
+ for (unsigned int cc = 0; cc < commands.size(); cc++) { \
+ std::string command = commands[cc]; \
+ if (command.size() > 0) { \
+ std::cout << command.c_str() << std::endl; \
+ system(command.c_str()); \
+ } \
+ } \
+ } \
+} while (0)
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::Main(int argc, char* argv[])
+{
+#ifdef H5_API_TEST_INIT_COMMAND
+ // run user-specified commands before initialization.
+ // For example: "killall -9 rsh test;"
+ H5_API_EXECUTE_CMD(H5_API_TEST_INIT_COMMAND);
+#endif
+
+ if (!this->ProcessCommandLine(argc, argv))
+ return 1;
+ this->CollectConfiguredOptions();
+
+ // mpi code
+ // Allocate process managers.
+ h5_api_test_sysProcess *server = 0;
+ h5_api_test_sysProcess *client = 0;
+ h5_api_test_sysProcess *client_helper = 0;
+ h5_api_test_sysProcess *client_init = 0;
+
+ if (this->TestServer) {
+ server = h5_api_test_sysProcess_New();
+ if (!server) {
+ H5_API_CLEAN_PROCESSES;
+ cerr << "H5APITestDriver: Cannot allocate h5_api_test_sysProcess to "
+ "run the server.\n";
+ return 1;
+ }
+ }
+ if (this->ClientHelper) {
+ client_helper = h5_api_test_sysProcess_New();
+ if (!client_helper) {
+ H5API_CLEAN_PROCESSES;
+ cerr << "H5APITestDriver: Cannot allocate h5_api_test_sysProcess to "
+ "run the client helper.\n";
+ return 1;
+ }
+ }
+ if (this->ClientInit) {
+ client_init = h5_api_test_sysProcess_New();
+ if (!client_init) {
+ H5_API_CLEAN_PROCESSES;
+ cerr << "H5APITestDriver: Cannot allocate h5_api_test_sysProcess to "
+ "run the client init.\n";
+ return 1;
+ }
+ }
+ client = h5_api_test_sysProcess_New();
+ if (!client) {
+ H5_API_CLEAN_PROCESSES;
+ cerr << "H5APITestDriver: Cannot allocate h5_api_test_sysProcess to "
+ "run the client.\n";
+ return 1;
+ }
+
+ vector<char> ClientStdOut;
+ vector<char> ClientStdErr;
+ vector<char> ClientHelperStdOut;
+ vector<char> ClientHelperStdErr;
+ vector<char> ClientInitStdOut;
+ vector<char> ClientInitStdErr;
+ vector<char> ServerStdOut;
+ vector<char> ServerStdErr;
+
+ vector<const char *> serverCommand;
+ if (server) {
+ const char* serverExe = this->ServerExecutable.c_str();
+
+ this->CreateCommandLine(serverCommand, serverExe, 1, 0,
+ this->MPIServerNumProcessFlag.c_str(), this->ServerArgStart,
+ this->ServerArgCount, argv);
+ this->ReportCommand(&serverCommand[0], "server");
+ h5_api_test_sysProcess_SetCommand(server, &serverCommand[0]);
+ h5_api_test_sysProcess_SetWorkingDirectory(server,
+ this->GetDirectory(serverExe).c_str());
+ }
+
+ vector<const char *> clientHelperCommand;
+ if (client_helper) {
+ // Construct the client helper process command line.
+ const char *clientHelperExe = this->ClientHelperExecutable.c_str();
+ this->CreateCommandLine(clientHelperCommand, clientHelperExe, 0, 1,
+ "1", this->ClientHelperArgStart,
+ this->ClientHelperArgCount, argv);
+ this->ReportCommand(&clientHelperCommand[0], "client_helper");
+ h5_api_test_sysProcess_SetCommand(client_helper, &clientHelperCommand[0]);
+ h5_api_test_sysProcess_SetWorkingDirectory(client_helper,
+ this->GetDirectory(clientHelperExe).c_str());
+ }
+
+ vector<const char *> clientInitCommand;
+ if (client_init) {
+ // Construct the client helper process command line.
+ const char *clientInitExe = this->ClientInitExecutable.c_str();
+ this->CreateCommandLine(clientInitCommand, clientInitExe, 0, 1,
+ "1", this->ClientInitArgStart, this->ClientInitArgCount, argv);
+ this->ReportCommand(&clientInitCommand[0], "client_init");
+ h5_api_test_sysProcess_SetCommand(client_init, &clientInitCommand[0]);
+ h5_api_test_sysProcess_SetWorkingDirectory(client_init,
+ this->GetDirectory(clientInitExe).c_str());
+ }
+
+ // Start the server if there is one
+ if (!this->StartServer(server, "server", ServerStdOut, ServerStdErr)) {
+ cerr << "H5APITestDriver: Server never started.\n";
+ H5_API_CLEAN_PROCESSES;
+ return -1;
+ }
+
+ // Start the client helper here if there is one
+ if (!this->StartClientHelper(client_helper, "client_helper",
+ ClientHelperStdOut, ClientHelperStdErr)) {
+ cerr << "H5APITestDriver: Client Helper never started.\n";
+ this->Stop(server, "server");
+#ifdef H5_API_TEST_SERVER_EXIT_COMMAND
+ H5_API_EXECUTE_CMD(H5_API_TEST_SERVER_EXIT_COMMAND);
+#endif
+ H5_API_CLEAN_PROCESSES;
+ return -1;
+ }
+
+ // Start the client init here if there is one
+ if (!this->StartClientInit(client_init, "client_init",
+ ClientInitStdOut, ClientInitStdErr)) {
+ cerr << "H5APITestDriver: Client Init never started.\n";
+ this->Stop(server, "server");
+#ifdef H5_API_TEST_SERVER_EXIT_COMMAND
+ H5_API_EXECUTE_CMD(H5_API_TEST_SERVER_EXIT_COMMAND);
+#endif
+ this->Stop(client_helper, "client_helper");
+#ifdef H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND
+ H5_API_EXECUTE_CMD(H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND);
+#endif
+ H5_API_CLEAN_PROCESSES;
+ return -1;
+ }
+
+ // Construct the client process command line.
+ vector<const char *> clientCommand;
+ const char *clientExe = this->ClientExecutable.c_str();
+ this->CreateCommandLine(clientCommand, clientExe, 0, 0,
+ this->MPIClientNumProcessFlag.c_str(), this->ClientArgStart,
+ this->ClientArgCount, argv);
+ this->ReportCommand(&clientCommand[0], "client");
+ h5_api_test_sysProcess_SetCommand(client, &clientCommand[0]);
+ h5_api_test_sysProcess_SetWorkingDirectory(client,
+ this->GetDirectory(clientExe).c_str());
+
+ // Now run the client
+ if (!this->StartClient(client, "client")) {
+ this->Stop(server, "server");
+ this->Stop(client_helper, "client_helper");
+ this->Stop(client_init, "client_init");
+ H5_API_CLEAN_PROCESSES;
+ return -1;
+ }
+
+ // Report the output of the processes.
+ int clientPipe = 1;
+
+ string output;
+ int mpiError = 0;
+ while (clientPipe) {
+ clientPipe = this->WaitForAndPrintLine("client", client, output, 0.1,
+ ClientStdOut, ClientStdErr, NULL, NULL);
+ if (!mpiError && this->OutputStringHasError("client", output)) {
+ mpiError = 1;
+ }
+ // If client has died, we wait for output from the server processes
+ // for this->ServerExitTimeOut, then we'll kill the servers, if needed.
+ double timeout = (clientPipe) ? 0 : this->ServerExitTimeOut;
+ output = "";
+ this->WaitForAndPrintLine("server", server, output, timeout,
+ ServerStdOut, ServerStdErr, NULL, NULL);
+ if (!mpiError && this->OutputStringHasError("server", output)) {
+ mpiError = 1;
+ }
+ output = "";
+ }
+
+ // Wait for the client and server to exit.
+ h5_api_test_sysProcess_WaitForExit(client, 0);
+
+ // Once client is finished, the servers
+ // must finish quickly. If not, it usually is a sign that
+ // the client crashed/exited before it attempted to connect to
+ // the server.
+ if (server) {
+#ifdef H5_API_TEST_SERVER_EXIT_COMMAND
+ H5_API_EXECUTE_CMD(H5_API_TEST_SERVER_EXIT_COMMAND);
+#endif
+ h5_api_test_sysProcess_WaitForExit(server, &this->ServerExitTimeOut);
+ }
+
+ if (client_helper) {
+#ifdef H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND
+ H5_API_EXECUTE_CMD(H5_API_TEST_CLIENT_HELPER_EXIT_COMMAND);
+#endif
+ h5_api_test_sysProcess_WaitForExit(client_helper, 0);
+ }
+
+ // Get the results.
+ int clientResult = this->ReportStatus(client, "client");
+ int serverResult = 0;
+ if (server) {
+ serverResult = this->ReportStatus(server, "server");
+ h5_api_test_sysProcess_Kill(server);
+ }
+
+ // Free process managers.
+ H5_API_CLEAN_PROCESSES;
+
+ // Report the server return code if it is nonzero. Otherwise report
+ // the client return code.
+ if (serverResult && !this->IgnoreServerResult)
+ return serverResult;
+
+ if (mpiError) {
+ cerr
+ << "H5VLTestDriver: Error string found in output, H5APITestDriver returning "
+ << mpiError << "\n";
+ return mpiError;
+ }
+
+ // if server is fine return the client result
+ return clientResult;
+}
+
+//----------------------------------------------------------------------------
+void
+H5APITestDriver::ReportCommand(const char * const *command, const char *name)
+{
+ cerr << "H5APITestDriver: " << name << " command is:\n";
+ for (const char * const *c = command; *c; ++c)
+ cerr << " \"" << *c << "\"";
+ cerr << "\n";
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::ReportStatus(h5_api_test_sysProcess *process, const char *name)
+{
+ int result = 1;
+ switch (h5_api_test_sysProcess_GetState(process)) {
+ case h5_api_test_sysProcess_State_Starting: {
+ cerr << "H5APITestDriver: Never started " << name << " process.\n";
+ }
+ break;
+ case h5_api_test_sysProcess_State_Error: {
+ cerr << "H5APITestDriver: Error executing " << name << " process: "
+ << h5_api_test_sysProcess_GetErrorString(process) << "\n";
+ }
+ break;
+ case h5_api_test_sysProcess_State_Exception: {
+ cerr << "H5APITestDriver: " << name
+ << " process exited with an exception: ";
+ switch (h5_api_test_sysProcess_GetExitException(process)) {
+ case h5_api_test_sysProcess_Exception_None: {
+ cerr << "None";
+ }
+ break;
+ case h5_api_test_sysProcess_Exception_Fault: {
+ cerr << "Segmentation fault";
+ }
+ break;
+ case h5_api_test_sysProcess_Exception_Illegal: {
+ cerr << "Illegal instruction";
+ }
+ break;
+ case h5_api_test_sysProcess_Exception_Interrupt: {
+ cerr << "Interrupted by user";
+ }
+ break;
+ case h5_api_test_sysProcess_Exception_Numerical: {
+ cerr << "Numerical exception";
+ }
+ break;
+ case h5_api_test_sysProcess_Exception_Other: {
+ cerr << "Unknown";
+ }
+ break;
+ }
+ cerr << "\n";
+ }
+ break;
+ case h5_api_test_sysProcess_State_Executing: {
+ cerr << "H5APITestDriver: Never terminated " << name
+ << " process.\n";
+ }
+ break;
+ case h5_api_test_sysProcess_State_Exited: {
+ result = h5_api_test_sysProcess_GetExitValue(process);
+ cerr << "H5APITestDriver: " << name << " process exited with code "
+ << result << "\n";
+ }
+ break;
+ case h5_api_test_sysProcess_State_Expired: {
+ cerr << "H5APITestDriver: killed " << name
+ << " process due to timeout.\n";
+ }
+ break;
+ case h5_api_test_sysProcess_State_Killed: {
+ cerr << "H5APITestDriver: killed " << name << " process.\n";
+ }
+ break;
+ }
+ return result;
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::WaitForLine(h5_api_test_sysProcess *process, string &line,
+ double timeout, vector<char> &out, vector<char> &err)
+{
+ line = "";
+ vector<char>::iterator outiter = out.begin();
+ vector<char>::iterator erriter = err.begin();
+ while (1) {
+ // Check for a newline in stdout.
+ for (; outiter != out.end(); ++outiter) {
+ if ((*outiter == '\r') && ((outiter + 1) == out.end())) {
+ break;
+ } else if (*outiter == '\n' || *outiter == '\0') {
+ int length = outiter - out.begin();
+ if (length > 1 && *(outiter - 1) == '\r')
+ --length;
+ if (length > 0)
+ line.append(&out[0], length);
+ out.erase(out.begin(), outiter + 1);
+ return h5_api_test_sysProcess_Pipe_STDOUT;
+ }
+ }
+
+ // Check for a newline in stderr.
+ for (; erriter != err.end(); ++erriter) {
+ if ((*erriter == '\r') && ((erriter + 1) == err.end())) {
+ break;
+ } else if (*erriter == '\n' || *erriter == '\0') {
+ int length = erriter - err.begin();
+ if (length > 1 && *(erriter - 1) == '\r')
+ --length;
+ if (length > 0)
+ line.append(&err[0], length);
+ err.erase(err.begin(), erriter + 1);
+ return h5_api_test_sysProcess_Pipe_STDERR;
+ }
+ }
+
+ // No newlines found. Wait for more data from the process.
+ int length;
+ char *data;
+ int pipe = h5_api_test_sysProcess_WaitForData(process, &data, &length,
+ &timeout);
+ if (pipe == h5_api_test_sysProcess_Pipe_Timeout) {
+ // Timeout has been exceeded.
+ return pipe;
+ } else if (pipe == h5_api_test_sysProcess_Pipe_STDOUT) {
+ // Append to the stdout buffer.
+ vector<char>::size_type size = out.size();
+ out.insert(out.end(), data, data + length);
+ outiter = out.begin() + size;
+ } else if (pipe == h5_api_test_sysProcess_Pipe_STDERR) {
+ // Append to the stderr buffer.
+ vector<char>::size_type size = err.size();
+ err.insert(err.end(), data, data + length);
+ erriter = err.begin() + size;
+ } else if (pipe == h5_api_test_sysProcess_Pipe_None) {
+ // Both stdout and stderr pipes have broken. Return leftover data.
+ if (!out.empty()) {
+ line.append(&out[0], outiter - out.begin());
+ out.erase(out.begin(), out.end());
+ return h5_api_test_sysProcess_Pipe_STDOUT;
+ } else if (!err.empty()) {
+ line.append(&err[0], erriter - err.begin());
+ err.erase(err.begin(), err.end());
+ return h5_api_test_sysProcess_Pipe_STDERR;
+ } else {
+ return h5_api_test_sysProcess_Pipe_None;
+ }
+ }
+ }
+}
+
+//----------------------------------------------------------------------------
+void
+H5APITestDriver::PrintLine(const char *pname, const char *line)
+{
+ // if the name changed then the line is output from a different process
+ if (this->CurrentPrintLineName != pname) {
+ cerr << "-------------- " << pname << " output --------------\n";
+ // save the current pname
+ this->CurrentPrintLineName = pname;
+ }
+ cerr << line << "\n";
+ cerr.flush();
+}
+
+//----------------------------------------------------------------------------
+int
+H5APITestDriver::WaitForAndPrintLine(const char *pname,
+ h5_api_test_sysProcess *process, string &line, double timeout,
+ vector<char> &out, vector<char> &err, const char *waitMsg,
+ int *foundWaiting)
+{
+ int pipe = this->WaitForLine(process, line, timeout, out, err);
+ if (pipe == h5_api_test_sysProcess_Pipe_STDOUT
+ || pipe == h5_api_test_sysProcess_Pipe_STDERR) {
+ this->PrintLine(pname, line.c_str());
+ if (foundWaiting && (line.find(waitMsg) != line.npos))
+ *foundWaiting = 1;
+ }
+ return pipe;
+}
+
+//----------------------------------------------------------------------------
+string
+H5APITestDriver::GetDirectory(string location)
+{
+ return h5_api_test_sys::SystemTools::GetParentDirectory(location.c_str());
+}
diff --git a/test/API/driver/h5_api_test_driver.hxx b/test/API/driver/h5_api_test_driver.hxx
new file mode 100644
index 0000000..b8e05e7
--- /dev/null
+++ b/test/API/driver/h5_api_test_driver.hxx
@@ -0,0 +1,93 @@
+#ifndef H5_API_TEST_DRIVER_H
+#define H5_API_TEST_DRIVER_H
+
+#include <string>
+#include <vector>
+
+#include <h5_api_test_sys/Process.h>
+
+class H5APITestDriver {
+public:
+ int Main(int argc, char *argv[]);
+ H5APITestDriver();
+ ~H5APITestDriver();
+
+protected:
+ void SeparateArguments(const char* str, std::vector<std::string> &flags);
+
+ void ReportCommand(const char * const *command, const char *name);
+ int ReportStatus(h5_api_test_sysProcess *process, const char *name);
+ int ProcessCommandLine(int argc, char *argv[]);
+ void CollectConfiguredOptions();
+ void CreateCommandLine(std::vector<const char *> &commandLine,
+ const char *cmd, int isServer, int isHelper, const char *numProc,
+ int argStart = 0, int argCount = 0, char *argv[] = 0);
+
+ int StartServer(h5_api_test_sysProcess *server, const char *name,
+ std::vector<char> &out, std::vector<char> &err);
+ int StartClientHelper(h5_api_test_sysProcess *client, const char *name,
+ std::vector<char> &out, std::vector<char> &err);
+ int StartClientInit(h5_api_test_sysProcess *client, const char *name,
+ std::vector<char> &out, std::vector<char> &err);
+ int StartClient(h5_api_test_sysProcess *client, const char *name);
+ void Stop(h5_api_test_sysProcess *p, const char *name);
+ int OutputStringHasError(const char *pname, std::string &output);
+ int OutputStringHasToken(const char *pname, const char *regex,
+ std::string &output, std::string &token);
+
+ int WaitForLine(h5_api_test_sysProcess *process, std::string &line,
+ double timeout, std::vector<char> &out, std::vector<char> &err);
+ void PrintLine(const char *pname, const char *line);
+ int WaitForAndPrintLine(const char *pname, h5_api_test_sysProcess *process,
+ std::string &line, double timeout, std::vector<char> &out,
+ std::vector<char> &err, const char *waitMsg, int *foundWaiting);
+
+ std::string GetDirectory(std::string location);
+
+private:
+ std::string ClientExecutable; // fullpath to client executable
+ std::string ClientHelperExecutable; // fullpath to client helper executable
+ std::string ClientInitExecutable; // fullpath to client init executable
+ std::string ServerExecutable; // fullpath to server executable
+ std::string MPIRun; // fullpath to mpirun executable
+
+ // This specify the preflags and post flags that can be set using:
+ // VTK_MPI_PRENUMPROC_FLAGS VTK_MPI_PREFLAGS / VTK_MPI_POSTFLAGS at config time
+ // std::vector<std::string> MPIPreNumProcFlags;
+ std::vector<std::string> ClientEnvVars;
+ std::vector<std::string> MPIClientPreFlags;
+ std::vector<std::string> MPIClientPostFlags;
+ std::vector<std::string> MPIServerPreFlags;
+ std::vector<std::string> MPIServerPostFlags;
+
+ // Specify the number of process flag, this can be set using: VTK_MPI_NUMPROC_FLAG.
+ // This is then split into :
+ // MPIServerNumProcessFlag & MPIRenderServerNumProcessFlag
+ std::string MPINumProcessFlag;
+ std::string MPIServerNumProcessFlag;
+ std::string MPIClientNumProcessFlag;
+
+ std::string ClientTokenVar; // use token to launch client if requested
+
+ std::string CurrentPrintLineName;
+
+ double TimeOut;
+ double ServerExitTimeOut; // time to wait for servers to finish.
+ bool ClientHelper;
+ bool ClientInit;
+ bool TestServer;
+
+ int ClientArgStart;
+ int ClientArgCount;
+ int ClientHelperArgStart;
+ int ClientHelperArgCount;
+ int ClientInitArgStart;
+ int ClientInitArgCount;
+ int ServerArgStart;
+ int ServerArgCount;
+ bool AllowErrorInOutput;
+ bool TestSerial;
+ bool IgnoreServerResult;
+};
+
+#endif //H5_API_TEST_DRIVER_H
diff --git a/test/API/driver/kwsys/.clang-format b/test/API/driver/kwsys/.clang-format
new file mode 100644
index 0000000..588b790
--- /dev/null
+++ b/test/API/driver/kwsys/.clang-format
@@ -0,0 +1,22 @@
+---
+# This configuration requires clang-format version 6.0 exactly.
+BasedOnStyle: Mozilla
+AlignOperands: false
+AllowShortFunctionsOnASingleLine: InlineOnly
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:
+ AfterClass: true
+ AfterEnum: true
+ AfterFunction: true
+ AfterStruct: true
+ AfterUnion: true
+BreakBeforeBraces: Custom
+ColumnLimit: 79
+IndentPPDirectives: AfterHash
+SortUsingDeclarations: false
+SpaceAfterTemplateKeyword: true
+Standard: Cpp03
+...
diff --git a/test/API/driver/kwsys/.hooks-config b/test/API/driver/kwsys/.hooks-config
new file mode 100644
index 0000000..739cdd2
--- /dev/null
+++ b/test/API/driver/kwsys/.hooks-config
@@ -0,0 +1,2 @@
+[hooks "chain"]
+ pre-commit = GitSetup/pre-commit
diff --git a/test/API/driver/kwsys/Base64.c b/test/API/driver/kwsys/Base64.c
new file mode 100644
index 0000000..bf876f2
--- /dev/null
+++ b/test/API/driver/kwsys/Base64.c
@@ -0,0 +1,225 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Base64.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "Base64.h.in"
+#endif
+
+static const unsigned char kwsysBase64EncodeTable[65] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/";
+
+static const unsigned char kwsysBase64DecodeTable[256] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x3E, 0xFF, 0xFF, 0xFF, 0x3F, 0x34, 0x35, 0x36, 0x37,
+ 0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0xFF, 0xFF, 0xFF, 0x00, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
+ 0x0D, 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F, 0x20,
+ 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D,
+ 0x2E, 0x2F, 0x30, 0x31, 0x32, 0x33, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ /*------------------------------------*/
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
+};
+
+static unsigned char kwsysBase64EncodeChar(int c)
+{
+ return kwsysBase64EncodeTable[(unsigned char)c];
+}
+
+static unsigned char kwsysBase64DecodeChar(unsigned char c)
+{
+ return kwsysBase64DecodeTable[c];
+}
+
+/* Encode 3 bytes into a 4 byte string. */
+void kwsysBase64_Encode3(const unsigned char* src, unsigned char* dest)
+{
+ dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F);
+ dest[1] =
+ kwsysBase64EncodeChar(((src[0] << 4) & 0x30) | ((src[1] >> 4) & 0x0F));
+ dest[2] =
+ kwsysBase64EncodeChar(((src[1] << 2) & 0x3C) | ((src[2] >> 6) & 0x03));
+ dest[3] = kwsysBase64EncodeChar(src[2] & 0x3F);
+}
+
+/* Encode 2 bytes into a 4 byte string. */
+void kwsysBase64_Encode2(const unsigned char* src, unsigned char* dest)
+{
+ dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F);
+ dest[1] =
+ kwsysBase64EncodeChar(((src[0] << 4) & 0x30) | ((src[1] >> 4) & 0x0F));
+ dest[2] = kwsysBase64EncodeChar(((src[1] << 2) & 0x3C));
+ dest[3] = '=';
+}
+
+/* Encode 1 bytes into a 4 byte string. */
+void kwsysBase64_Encode1(const unsigned char* src, unsigned char* dest)
+{
+ dest[0] = kwsysBase64EncodeChar((src[0] >> 2) & 0x3F);
+ dest[1] = kwsysBase64EncodeChar(((src[0] << 4) & 0x30));
+ dest[2] = '=';
+ dest[3] = '=';
+}
+
+/* Encode 'length' bytes from the input buffer and store the
+ encoded stream into the output buffer. Return the length of the encoded
+ buffer (output). Note that the output buffer must be allocated by the caller
+ (length * 1.5 should be a safe estimate). If 'mark_end' is true than an
+ extra set of 4 bytes is added to the end of the stream if the input is a
+ multiple of 3 bytes. These bytes are invalid chars and therefore they will
+ stop the decoder thus enabling the caller to decode a stream without
+ actually knowing how much data to expect (if the input is not a multiple of
+ 3 bytes then the extra padding needed to complete the encode 4 bytes will
+ stop the decoding anyway). */
+size_t kwsysBase64_Encode(const unsigned char* input, size_t length,
+ unsigned char* output, int mark_end)
+{
+ const unsigned char* ptr = input;
+ const unsigned char* end = input + length;
+ unsigned char* optr = output;
+
+ /* Encode complete triplet */
+
+ while ((end - ptr) >= 3) {
+ kwsysBase64_Encode3(ptr, optr);
+ ptr += 3;
+ optr += 4;
+ }
+
+ /* Encodes a 2-byte ending into 3 bytes and 1 pad byte and writes. */
+
+ if (end - ptr == 2) {
+ kwsysBase64_Encode2(ptr, optr);
+ optr += 4;
+ }
+
+ /* Encodes a 1-byte ending into 2 bytes and 2 pad bytes */
+
+ else if (end - ptr == 1) {
+ kwsysBase64_Encode1(ptr, optr);
+ optr += 4;
+ }
+
+ /* Do we need to mark the end */
+
+ else if (mark_end) {
+ optr[0] = optr[1] = optr[2] = optr[3] = '=';
+ optr += 4;
+ }
+
+ return (size_t)(optr - output);
+}
+
+/* Decode 4 bytes into a 3 byte string. */
+int kwsysBase64_Decode3(const unsigned char* src, unsigned char* dest)
+{
+ unsigned char d0, d1, d2, d3;
+
+ d0 = kwsysBase64DecodeChar(src[0]);
+ d1 = kwsysBase64DecodeChar(src[1]);
+ d2 = kwsysBase64DecodeChar(src[2]);
+ d3 = kwsysBase64DecodeChar(src[3]);
+
+ /* Make sure all characters were valid */
+
+ if (d0 == 0xFF || d1 == 0xFF || d2 == 0xFF || d3 == 0xFF) {
+ return 0;
+ }
+
+ /* Decode the 3 bytes */
+
+ dest[0] = (unsigned char)(((d0 << 2) & 0xFC) | ((d1 >> 4) & 0x03));
+ dest[1] = (unsigned char)(((d1 << 4) & 0xF0) | ((d2 >> 2) & 0x0F));
+ dest[2] = (unsigned char)(((d2 << 6) & 0xC0) | ((d3 >> 0) & 0x3F));
+
+ /* Return the number of bytes actually decoded */
+
+ if (src[2] == '=') {
+ return 1;
+ }
+ if (src[3] == '=') {
+ return 2;
+ }
+ return 3;
+}
+
+/* Decode bytes from the input buffer and store the decoded stream
+ into the output buffer until 'length' bytes have been decoded. Return the
+ real length of the decoded stream (which should be equal to 'length'). Note
+ that the output buffer must be allocated by the caller. If
+ 'max_input_length' is not null, then it specifies the number of encoded
+ bytes that should be at most read from the input buffer. In that case the
+ 'length' parameter is ignored. This enables the caller to decode a stream
+ without actually knowing how much decoded data to expect (of course, the
+ buffer must be large enough). */
+size_t kwsysBase64_Decode(const unsigned char* input, size_t length,
+ unsigned char* output, size_t max_input_length)
+{
+ const unsigned char* ptr = input;
+ unsigned char* optr = output;
+
+ /* Decode complete triplet */
+
+ if (max_input_length) {
+ const unsigned char* end = input + max_input_length;
+ while (ptr < end) {
+ int len = kwsysBase64_Decode3(ptr, optr);
+ optr += len;
+ if (len < 3) {
+ return (size_t)(optr - output);
+ }
+ ptr += 4;
+ }
+ } else {
+ unsigned char* oend = output + length;
+ while ((oend - optr) >= 3) {
+ int len = kwsysBase64_Decode3(ptr, optr);
+ optr += len;
+ if (len < 3) {
+ return (size_t)(optr - output);
+ }
+ ptr += 4;
+ }
+
+ /* Decode the last triplet */
+
+ if (oend - optr == 2) {
+ unsigned char temp[3];
+ int len = kwsysBase64_Decode3(ptr, temp);
+ if (len >= 2) {
+ optr[0] = temp[0];
+ optr[1] = temp[1];
+ optr += 2;
+ } else if (len > 0) {
+ optr[0] = temp[0];
+ optr += 1;
+ }
+ } else if (oend - optr == 1) {
+ unsigned char temp[3];
+ int len = kwsysBase64_Decode3(ptr, temp);
+ if (len > 0) {
+ optr[0] = temp[0];
+ optr += 1;
+ }
+ }
+ }
+
+ return (size_t)(optr - output);
+}
diff --git a/test/API/driver/kwsys/Base64.h.in b/test/API/driver/kwsys/Base64.h.in
new file mode 100644
index 0000000..729f972
--- /dev/null
+++ b/test/API/driver/kwsys/Base64.h.in
@@ -0,0 +1,110 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Base64_h
+#define @KWSYS_NAMESPACE@_Base64_h
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+#include <stddef.h> /* size_t */
+
+/* Redefine all public interface symbol names to be in the proper
+ namespace. These macros are used internally to kwsys only, and are
+ not visible to user code. Use kwsysHeaderDump.pl to reproduce
+ these macros after making changes to the interface. */
+#if !defined(KWSYS_NAMESPACE)
+# define kwsys_ns(x) @KWSYS_NAMESPACE@##x
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsysBase64 kwsys_ns(Base64)
+# define kwsysBase64_Decode kwsys_ns(Base64_Decode)
+# define kwsysBase64_Decode3 kwsys_ns(Base64_Decode3)
+# define kwsysBase64_Encode kwsys_ns(Base64_Encode)
+# define kwsysBase64_Encode1 kwsys_ns(Base64_Encode1)
+# define kwsysBase64_Encode2 kwsys_ns(Base64_Encode2)
+# define kwsysBase64_Encode3 kwsys_ns(Base64_Encode3)
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * Encode 3 bytes into a 4 byte string.
+ */
+kwsysEXPORT void kwsysBase64_Encode3(const unsigned char* src,
+ unsigned char* dest);
+
+/**
+ * Encode 2 bytes into a 4 byte string.
+ */
+kwsysEXPORT void kwsysBase64_Encode2(const unsigned char* src,
+ unsigned char* dest);
+
+/**
+ * Encode 1 bytes into a 4 byte string.
+ */
+kwsysEXPORT void kwsysBase64_Encode1(const unsigned char* src,
+ unsigned char* dest);
+
+/**
+ * Encode 'length' bytes from the input buffer and store the encoded
+ * stream into the output buffer. Return the length of the encoded
+ * buffer (output). Note that the output buffer must be allocated by
+ * the caller (length * 1.5 should be a safe estimate). If 'mark_end'
+ * is true than an extra set of 4 bytes is added to the end of the
+ * stream if the input is a multiple of 3 bytes. These bytes are
+ * invalid chars and therefore they will stop the decoder thus
+ * enabling the caller to decode a stream without actually knowing how
+ * much data to expect (if the input is not a multiple of 3 bytes then
+ * the extra padding needed to complete the encode 4 bytes will stop
+ * the decoding anyway).
+ */
+kwsysEXPORT size_t kwsysBase64_Encode(const unsigned char* input,
+ size_t length, unsigned char* output,
+ int mark_end);
+
+/**
+ * Decode 4 bytes into a 3 byte string. Returns the number of bytes
+ * actually decoded.
+ */
+kwsysEXPORT int kwsysBase64_Decode3(const unsigned char* src,
+ unsigned char* dest);
+
+/**
+ * Decode bytes from the input buffer and store the decoded stream
+ * into the output buffer until 'length' bytes have been decoded.
+ * Return the real length of the decoded stream (which should be equal
+ * to 'length'). Note that the output buffer must be allocated by the
+ * caller. If 'max_input_length' is not null, then it specifies the
+ * number of encoded bytes that should be at most read from the input
+ * buffer. In that case the 'length' parameter is ignored. This
+ * enables the caller to decode a stream without actually knowing how
+ * much decoded data to expect (of course, the buffer must be large
+ * enough).
+ */
+kwsysEXPORT size_t kwsysBase64_Decode(const unsigned char* input,
+ size_t length, unsigned char* output,
+ size_t max_input_length);
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif
+
+/* If we are building a kwsys .c or .cxx file, let it use these macros.
+ Otherwise, undefine them to keep the namespace clean. */
+#if !defined(KWSYS_NAMESPACE)
+# undef kwsys_ns
+# undef kwsysEXPORT
+# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# undef kwsysBase64
+# undef kwsysBase64_Decode
+# undef kwsysBase64_Decode3
+# undef kwsysBase64_Encode
+# undef kwsysBase64_Encode1
+# undef kwsysBase64_Encode2
+# undef kwsysBase64_Encode3
+# endif
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/CMakeLists.txt b/test/API/driver/kwsys/CMakeLists.txt
new file mode 100644
index 0000000..09bcdb9
--- /dev/null
+++ b/test/API/driver/kwsys/CMakeLists.txt
@@ -0,0 +1,1260 @@
+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+# file Copyright.txt or https://cmake.org/licensing#kwsys for details.
+
+# The Kitware System Library is intended to be included in other
+# projects. It is completely configurable in that the library's
+# namespace can be configured and the components that are included can
+# be selected invididually.
+
+# Typical usage is to import the kwsys directory tree into a
+# subdirectory under a parent project and enable the classes that will
+# be used. All classes are disabled by default. The CMake listfile
+# above this one configures the library as follows:
+#
+# SET(KWSYS_NAMESPACE foosys)
+# SET(KWSYS_USE_Directory 1) # Enable Directory class.
+# SUBDIRS(kwsys)
+#
+# Optional settings are as follows:
+#
+# KWSYS_HEADER_ROOT = The directory into which to generate the kwsys headers.
+# A directory called "${KWSYS_NAMESPACE}" will be
+# created under this root directory to hold the files.
+# KWSYS_SPLIT_OBJECTS_FROM_INTERFACE
+# = Instead of creating a single ${KWSYS_NAMESPACE} library
+# target, create three separate targets:
+# ${KWSYS_NAMESPACE}
+# - An INTERFACE library only containing usage
+# requirements.
+# ${KWSYS_NAMESPACE}_objects
+# - An OBJECT library for the built kwsys objects.
+# Note: This is omitted from the install rules
+# ${KWSYS_NAMESPACE}_private
+# - An INTERFACE library combining both that is
+# appropriate for use with PRIVATE linking in
+# target_link_libraries. Because of how interface
+# properties propagate, this target is not suitable
+# for use with PUBLIC or INTERFACE linking.
+# KWSYS_ALIAS_TARGET = The name of an alias target to create to the actual target.
+#
+# Example:
+#
+# SET(KWSYS_HEADER_ROOT ${PROJECT_BINARY_DIR})
+# INCLUDE_DIRECTORIES(${PROJECT_BINARY_DIR})
+#
+# KWSYS_CXX_STANDARD = A value for CMAKE_CXX_STANDARD within KWSys.
+# Set to empty string to use no default value.
+# KWSYS_CXX_COMPILE_FEATURES = target_compile_features arguments for KWSys.
+#
+# Optional settings to setup install rules are as follows:
+#
+# KWSYS_INSTALL_BIN_DIR = The installation target directories into
+# KWSYS_INSTALL_LIB_DIR which the libraries and headers from
+# KWSYS_INSTALL_INCLUDE_DIR kwsys should be installed by a "make install".
+# The values should be specified relative to
+# the installation prefix and NOT start with '/'.
+# KWSYS_INSTALL_DOC_DIR = The installation target directory for documentation
+# such as copyright information.
+#
+# KWSYS_INSTALL_COMPONENT_NAME_RUNTIME = Name of runtime and development
+# KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT installation components.
+# If not given the install rules
+# will not be in any component.
+#
+# KWSYS_INSTALL_EXPORT_NAME = The EXPORT option value for install(TARGETS) calls.
+#
+# Example:
+#
+# SET(KWSYS_INSTALL_BIN_DIR bin)
+# SET(KWSYS_INSTALL_LIB_DIR lib)
+# SET(KWSYS_INSTALL_INCLUDE_DIR include)
+# SET(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME Runtime)
+# SET(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT Development)
+
+# Once configured, kwsys should be used as follows from C or C++ code:
+#
+# #include <foosys/Directory.hxx>
+# ...
+# foosys::Directory directory;
+#
+
+# NOTE: This library is intended for internal use by Kitware-driven
+# projects. In order to keep it simple no attempt will be made to
+# maintain backward compatibility when changes are made to KWSys.
+# When an incompatible change is made Kitware's projects that use
+# KWSys will be fixed, but no notification will necessarily be sent to
+# any outside mailing list and no documentation of the change will be
+# written.
+
+CMAKE_MINIMUM_REQUIRED(VERSION 3.1 FATAL_ERROR)
+FOREACH(p
+ CMP0056 # CMake 3.2, Honor link flags in try_compile() source-file signature.
+ CMP0063 # CMake 3.3, Honor visibility properties for all target types.
+ CMP0067 # CMake 3.8, Honor language standard in try_compile source-file signature.
+ CMP0069 # CMake 3.9, INTERPROCEDURAL_OPTIMIZATION is enforced when enabled.
+ )
+ IF(POLICY ${p})
+ CMAKE_POLICY(SET ${p} NEW)
+ ENDIF()
+ENDFOREACH()
+
+#-----------------------------------------------------------------------------
+# If a namespace is not specified, use "kwsys" and enable testing.
+# This should be the case only when kwsys is not included inside
+# another project and is being tested.
+IF(NOT KWSYS_NAMESPACE)
+ SET(KWSYS_NAMESPACE "kwsys")
+ SET(KWSYS_STANDALONE 1)
+ENDIF()
+
+#-----------------------------------------------------------------------------
+# The project name is that of the specified namespace.
+PROJECT(${KWSYS_NAMESPACE})
+
+# Tell CMake how to follow dependencies of sources in this directory.
+SET_PROPERTY(DIRECTORY
+ PROPERTY IMPLICIT_DEPENDS_INCLUDE_TRANSFORM
+ "KWSYS_HEADER(%)=<${KWSYS_NAMESPACE}/%>"
+ )
+
+if(KWSYS_CXX_STANDARD)
+ set(CMAKE_CXX_STANDARD "${KWSYS_CXX_STANDARD}")
+elseif(NOT DEFINED CMAKE_CXX_STANDARD AND NOT DEFINED KWSYS_CXX_STANDARD)
+ if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang"
+ AND CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"
+ AND CMAKE_CXX_COMPILER_FRONTEND_VARIANT STREQUAL "GNU"
+ )
+ set(CMAKE_CXX_STANDARD 14)
+ else()
+ set(CMAKE_CXX_STANDARD 11)
+ endif()
+endif()
+
+# Select library components.
+IF(KWSYS_STANDALONE OR CMake_SOURCE_DIR)
+ SET(KWSYS_ENABLE_C 1)
+ # Enable all components.
+ SET(KWSYS_USE_Base64 1)
+ SET(KWSYS_USE_Directory 1)
+ SET(KWSYS_USE_DynamicLoader 1)
+ SET(KWSYS_USE_Encoding 1)
+ SET(KWSYS_USE_Glob 1)
+ SET(KWSYS_USE_MD5 1)
+ SET(KWSYS_USE_Process 1)
+ SET(KWSYS_USE_RegularExpression 1)
+ SET(KWSYS_USE_System 1)
+ SET(KWSYS_USE_SystemTools 1)
+ SET(KWSYS_USE_CommandLineArguments 1)
+ SET(KWSYS_USE_Terminal 1)
+ SET(KWSYS_USE_IOStream 1)
+ SET(KWSYS_USE_FStream 1)
+ SET(KWSYS_USE_String 1)
+ SET(KWSYS_USE_SystemInformation 1)
+ SET(KWSYS_USE_ConsoleBuf 1)
+ENDIF()
+
+# Enforce component dependencies.
+IF(KWSYS_USE_SystemTools)
+ SET(KWSYS_USE_Directory 1)
+ SET(KWSYS_USE_FStream 1)
+ SET(KWSYS_USE_Encoding 1)
+ENDIF()
+IF(KWSYS_USE_Glob)
+ SET(KWSYS_USE_Directory 1)
+ SET(KWSYS_USE_SystemTools 1)
+ SET(KWSYS_USE_RegularExpression 1)
+ SET(KWSYS_USE_FStream 1)
+ SET(KWSYS_USE_Encoding 1)
+ENDIF()
+IF(KWSYS_USE_Process)
+ SET(KWSYS_USE_System 1)
+ SET(KWSYS_USE_Encoding 1)
+ENDIF()
+IF(KWSYS_USE_SystemInformation)
+ SET(KWSYS_USE_Process 1)
+ENDIF()
+IF(KWSYS_USE_System)
+ SET(KWSYS_USE_Encoding 1)
+ENDIF()
+IF(KWSYS_USE_Directory)
+ SET(KWSYS_USE_Encoding 1)
+ENDIF()
+IF(KWSYS_USE_DynamicLoader)
+ SET(KWSYS_USE_Encoding 1)
+ENDIF()
+IF(KWSYS_USE_FStream)
+ SET(KWSYS_USE_Encoding 1)
+ENDIF()
+IF(KWSYS_USE_ConsoleBuf)
+ SET(KWSYS_USE_Encoding 1)
+ENDIF()
+
+# Specify default 8 bit encoding for Windows
+IF(NOT KWSYS_ENCODING_DEFAULT_CODEPAGE)
+ SET(KWSYS_ENCODING_DEFAULT_CODEPAGE CP_ACP)
+ENDIF()
+
+# Enable testing if building standalone.
+IF(KWSYS_STANDALONE)
+ INCLUDE(Dart)
+ MARK_AS_ADVANCED(BUILD_TESTING DART_ROOT TCL_TCLSH)
+ IF(BUILD_TESTING)
+ ENABLE_TESTING()
+ ENDIF()
+ENDIF()
+
+# Choose default shared/static build if not specified.
+IF(NOT DEFINED KWSYS_BUILD_SHARED)
+ SET(KWSYS_BUILD_SHARED ${BUILD_SHARED_LIBS})
+ENDIF()
+
+# Include helper macros.
+INCLUDE(${CMAKE_CURRENT_SOURCE_DIR}/kwsysPlatformTests.cmake)
+INCLUDE(CheckTypeSize)
+
+# Do full dependency headers.
+INCLUDE_REGULAR_EXPRESSION("^.*$")
+
+# Use new KWSYS_INSTALL_*_DIR variable names to control installation.
+# Take defaults from the old names. Note that there was no old name
+# for the bin dir, so we take the old lib dir name so DLLs will be
+# installed in a compatible way for old code.
+IF(NOT KWSYS_INSTALL_INCLUDE_DIR)
+ STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_INCLUDE_DIR
+ "${KWSYS_HEADER_INSTALL_DIR}")
+ENDIF()
+IF(NOT KWSYS_INSTALL_LIB_DIR)
+ STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_LIB_DIR
+ "${KWSYS_LIBRARY_INSTALL_DIR}")
+ENDIF()
+IF(NOT KWSYS_INSTALL_BIN_DIR)
+ STRING(REGEX REPLACE "^/" "" KWSYS_INSTALL_BIN_DIR
+ "${KWSYS_LIBRARY_INSTALL_DIR}")
+ENDIF()
+
+# Setup header install rules.
+SET(KWSYS_INSTALL_INCLUDE_OPTIONS)
+IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT)
+ SET(KWSYS_INSTALL_INCLUDE_OPTIONS ${KWSYS_INSTALL_INCLUDE_OPTIONS}
+ COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT}
+ )
+ENDIF()
+
+# Setup library install rules.
+SET(KWSYS_INSTALL_LIBRARY_RULE)
+SET(KWSYS_INSTALL_NAMELINK_RULE)
+IF(KWSYS_INSTALL_LIB_DIR)
+ IF(KWSYS_INSTALL_EXPORT_NAME)
+ LIST(APPEND KWSYS_INSTALL_LIBRARY_RULE EXPORT ${KWSYS_INSTALL_EXPORT_NAME})
+ ENDIF()
+ # Install the shared library to the lib directory.
+ SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE}
+ LIBRARY DESTINATION ${KWSYS_INSTALL_LIB_DIR} NAMELINK_SKIP
+ )
+ # Assign the shared library to the runtime component.
+ IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME)
+ SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE}
+ COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME}
+ )
+ ENDIF()
+ IF(KWSYS_BUILD_SHARED)
+ SET(KWSYS_INSTALL_NAMELINK_RULE ${KWSYS_INSTALL_NAMELINK_RULE}
+ LIBRARY DESTINATION ${KWSYS_INSTALL_LIB_DIR} NAMELINK_ONLY
+ )
+ # Assign the namelink to the development component.
+ IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT)
+ SET(KWSYS_INSTALL_NAMELINK_RULE ${KWSYS_INSTALL_NAMELINK_RULE}
+ COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT}
+ )
+ ENDIF()
+ ENDIF()
+
+ # Install the archive to the lib directory.
+ SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE}
+ ARCHIVE DESTINATION ${KWSYS_INSTALL_LIB_DIR}
+ )
+ # Assign the archive to the development component.
+ IF(KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT)
+ SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE}
+ COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_DEVELOPMENT}
+ )
+ ENDIF()
+ENDIF()
+IF(KWSYS_INSTALL_BIN_DIR)
+ # Install the runtime library to the bin directory.
+ SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE}
+ RUNTIME DESTINATION ${KWSYS_INSTALL_BIN_DIR}
+ )
+ # Assign the runtime library to the runtime component.
+ IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME)
+ SET(KWSYS_INSTALL_LIBRARY_RULE ${KWSYS_INSTALL_LIBRARY_RULE}
+ COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME}
+ )
+ ENDIF()
+ENDIF()
+
+# Do not support old KWSYS_*a_INSTALL_DIR variable names.
+SET(KWSYS_HEADER_INSTALL_DIR)
+SET(KWSYS_LIBRARY_INSTALL_DIR)
+
+# Generated source files will need this header.
+STRING(COMPARE EQUAL "${PROJECT_SOURCE_DIR}" "${PROJECT_BINARY_DIR}"
+ KWSYS_IN_SOURCE_BUILD)
+IF(NOT KWSYS_IN_SOURCE_BUILD)
+ CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/kwsysPrivate.h
+ ${PROJECT_BINARY_DIR}/kwsysPrivate.h COPYONLY IMMEDIATE)
+ENDIF()
+
+# Select plugin module file name convention.
+IF(NOT KWSYS_DynamicLoader_PREFIX)
+ SET(KWSYS_DynamicLoader_PREFIX ${CMAKE_SHARED_MODULE_PREFIX})
+ENDIF()
+IF(NOT KWSYS_DynamicLoader_SUFFIX)
+ SET(KWSYS_DynamicLoader_SUFFIX ${CMAKE_SHARED_MODULE_SUFFIX})
+ENDIF()
+
+#-----------------------------------------------------------------------------
+# We require ANSI support from the C compiler. Add any needed flags.
+IF(CMAKE_ANSI_CFLAGS)
+ SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${CMAKE_ANSI_CFLAGS}")
+ENDIF()
+
+#-----------------------------------------------------------------------------
+# Adjust compiler flags for some platforms.
+IF(NOT CMAKE_COMPILER_IS_GNUCXX)
+ IF(CMAKE_SYSTEM MATCHES "OSF1-V.*")
+ STRING(REGEX MATCH "-timplicit_local"
+ KWSYS_CXX_FLAGS_HAVE_IMPLICIT_LOCAL "${CMAKE_CXX_FLAGS}")
+ STRING(REGEX MATCH "-no_implicit_include"
+ KWSYS_CXX_FLAGS_HAVE_NO_IMPLICIT_INCLUDE "${CMAKE_CXX_FLAGS}")
+ IF(NOT KWSYS_CXX_FLAGS_HAVE_IMPLICIT_LOCAL)
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -timplicit_local")
+ ENDIF()
+ IF(NOT KWSYS_CXX_FLAGS_HAVE_NO_IMPLICIT_INCLUDE)
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -no_implicit_include")
+ ENDIF()
+ ENDIF()
+ IF(CMAKE_SYSTEM MATCHES "HP-UX")
+ SET(KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS "+p")
+ IF(CMAKE_CXX_COMPILER_ID MATCHES "HP")
+ # it is known that version 3.85 fails and 6.25 works without these flags
+ IF(CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4)
+ # use new C++ library and improved template support
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -AA +hpxstd98")
+ ENDIF()
+ ENDIF()
+ ENDIF()
+ENDIF()
+IF(KWSYS_STANDALONE)
+ IF(CMAKE_CXX_COMPILER_ID STREQUAL SunPro)
+ IF(NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.13)
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++03")
+ ELSE()
+ SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -library=stlport4")
+ ENDIF()
+ ENDIF()
+ENDIF()
+
+#-----------------------------------------------------------------------------
+# Configure the standard library header wrappers based on compiler's
+# capabilities and parent project's request. Enforce 0/1 as only
+# possible values for configuration into Configure.hxx.
+
+# Check existence and uniqueness of long long and __int64.
+KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_LONG_LONG
+ "Checking whether C++ compiler has 'long long'" DIRECT)
+KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS___INT64
+ "Checking whether C++ compiler has '__int64'" DIRECT)
+IF(KWSYS_CXX_HAS___INT64)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_SAME_LONG_AND___INT64
+ "Checking whether long and __int64 are the same type" DIRECT)
+ IF(KWSYS_CXX_HAS_LONG_LONG)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_SAME_LONG_LONG_AND___INT64
+ "Checking whether long long and __int64 are the same type" DIRECT)
+ ENDIF()
+ENDIF()
+
+# Enable the "long long" type if it is available. It is standard in
+# C99 and C++03 but not in earlier standards.
+IF(KWSYS_CXX_HAS_LONG_LONG)
+ SET(KWSYS_USE_LONG_LONG 1)
+ELSE()
+ SET(KWSYS_USE_LONG_LONG 0)
+ENDIF()
+
+# Enable the "__int64" type if it is available and unique. It is not
+# standard.
+SET(KWSYS_USE___INT64 0)
+IF(KWSYS_CXX_HAS___INT64)
+ IF(NOT KWSYS_CXX_SAME_LONG_AND___INT64)
+ IF(NOT KWSYS_CXX_SAME_LONG_LONG_AND___INT64)
+ SET(KWSYS_USE___INT64 1)
+ ENDIF()
+ ENDIF()
+ENDIF()
+
+IF(KWSYS_USE_Encoding)
+ # Look for type size helper macros.
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_STL_HAS_WSTRING
+ "Checking whether wstring is available" DIRECT)
+ENDIF()
+
+IF(KWSYS_USE_IOStream)
+ # Determine whether iostreams support long long.
+ IF(KWSYS_CXX_HAS_LONG_LONG)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_ISTREAM_LONG_LONG
+ "Checking if istream supports long long" DIRECT)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_OSTREAM_LONG_LONG
+ "Checking if ostream supports long long" DIRECT)
+ ELSE()
+ SET(KWSYS_IOS_HAS_ISTREAM_LONG_LONG 0)
+ SET(KWSYS_IOS_HAS_OSTREAM_LONG_LONG 0)
+ ENDIF()
+ IF(KWSYS_CXX_HAS___INT64)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_ISTREAM___INT64
+ "Checking if istream supports __int64" DIRECT)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_IOS_HAS_OSTREAM___INT64
+ "Checking if ostream supports __int64" DIRECT)
+ ELSE()
+ SET(KWSYS_IOS_HAS_ISTREAM___INT64 0)
+ SET(KWSYS_IOS_HAS_OSTREAM___INT64 0)
+ ENDIF()
+ENDIF()
+
+IF(KWSYS_NAMESPACE MATCHES "^kwsys$")
+ SET(KWSYS_NAME_IS_KWSYS 1)
+ELSE()
+ SET(KWSYS_NAME_IS_KWSYS 0)
+ENDIF()
+
+IF(KWSYS_BUILD_SHARED)
+ SET(KWSYS_BUILD_SHARED 1)
+ SET(KWSYS_LIBRARY_TYPE SHARED)
+ELSE()
+ SET(KWSYS_BUILD_SHARED 0)
+ SET(KWSYS_LIBRARY_TYPE STATIC)
+ENDIF()
+
+if(NOT DEFINED KWSYS_BUILD_PIC)
+ set(KWSYS_BUILD_PIC 0)
+endif()
+
+#-----------------------------------------------------------------------------
+# Configure some implementation details.
+
+KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_PTRDIFF_T
+ "Checking whether C compiler has ptrdiff_t in stddef.h" DIRECT)
+KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_SSIZE_T
+ "Checking whether C compiler has ssize_t in unistd.h" DIRECT)
+IF(KWSYS_USE_Process)
+ KWSYS_PLATFORM_C_TEST(KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC
+ "Checking whether C compiler has clock_gettime" DIRECT)
+ENDIF()
+
+SET_SOURCE_FILES_PROPERTIES(ProcessUNIX.c System.c PROPERTIES
+ COMPILE_FLAGS "-DKWSYS_C_HAS_PTRDIFF_T=${KWSYS_C_HAS_PTRDIFF_T} -DKWSYS_C_HAS_SSIZE_T=${KWSYS_C_HAS_SSIZE_T} -DKWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC=${KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC}"
+ )
+
+IF(DEFINED KWSYS_PROCESS_USE_SELECT)
+ GET_PROPERTY(ProcessUNIX_FLAGS SOURCE ProcessUNIX.c PROPERTY COMPILE_FLAGS)
+ SET_PROPERTY(SOURCE ProcessUNIX.c PROPERTY COMPILE_FLAGS "${ProcessUNIX_FLAGS} -DKWSYSPE_USE_SELECT=${KWSYSPE_USE_SELECT}")
+ENDIF()
+
+IF(KWSYS_USE_DynamicLoader)
+ GET_PROPERTY(KWSYS_SUPPORTS_SHARED_LIBS GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS)
+ IF(KWSYS_SUPPORTS_SHARED_LIBS)
+ SET(KWSYS_SUPPORTS_SHARED_LIBS 1)
+ ELSE()
+ SET(KWSYS_SUPPORTS_SHARED_LIBS 0)
+ ENDIF()
+ SET_PROPERTY(SOURCE DynamicLoader.cxx APPEND PROPERTY COMPILE_DEFINITIONS
+ KWSYS_SUPPORTS_SHARED_LIBS=${KWSYS_SUPPORTS_SHARED_LIBS})
+ENDIF()
+
+IF(KWSYS_USE_SystemTools)
+ if (NOT DEFINED KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP)
+ set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 1)
+ endif ()
+ if (KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP)
+ set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 1)
+ else ()
+ set(KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP 0)
+ endif ()
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_SETENV
+ "Checking whether CXX compiler has setenv" DIRECT)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UNSETENV
+ "Checking whether CXX compiler has unsetenv" DIRECT)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H
+ "Checking whether CXX compiler has environ in stdlib.h" DIRECT)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UTIMES
+ "Checking whether CXX compiler has utimes" DIRECT)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_UTIMENSAT
+ "Checking whether CXX compiler has utimensat" DIRECT)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_STAT_HAS_ST_MTIM
+ "Checking whether CXX compiler struct stat has st_mtim member" DIRECT)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_STAT_HAS_ST_MTIMESPEC
+ "Checking whether CXX compiler struct stat has st_mtimespec member" DIRECT)
+ SET_PROPERTY(SOURCE SystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS
+ KWSYS_CXX_HAS_SETENV=${KWSYS_CXX_HAS_SETENV}
+ KWSYS_CXX_HAS_UNSETENV=${KWSYS_CXX_HAS_UNSETENV}
+ KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H=${KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H}
+ KWSYS_CXX_HAS_UTIMES=${KWSYS_CXX_HAS_UTIMES}
+ KWSYS_CXX_HAS_UTIMENSAT=${KWSYS_CXX_HAS_UTIMENSAT}
+ KWSYS_CXX_STAT_HAS_ST_MTIM=${KWSYS_CXX_STAT_HAS_ST_MTIM}
+ KWSYS_CXX_STAT_HAS_ST_MTIMESPEC=${KWSYS_CXX_STAT_HAS_ST_MTIMESPEC}
+ )
+ IF(NOT WIN32)
+ IF(KWSYS_STANDALONE)
+ OPTION(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES "If true, Windows paths will be supported on Unix as well" ON)
+ ENDIF()
+ IF(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES)
+ SET_PROPERTY(SOURCE SystemTools.cxx testSystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS
+ KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES
+ )
+ ENDIF()
+ ENDIF()
+
+ # Disable getpwnam for static linux builds since it depends on shared glibc
+ GET_PROPERTY(SHARED_LIBS_SUPPORTED GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS)
+ IF(CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT SHARED_LIBS_SUPPORTED)
+ SET_PROPERTY(SOURCE SystemTools.cxx APPEND PROPERTY COMPILE_DEFINITIONS
+ HAVE_GETPWNAM=0
+ )
+ ENDIF()
+ENDIF()
+
+IF(KWSYS_USE_SystemInformation)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS SIZEOF_VOID_P=${CMAKE_SIZEOF_VOID_P})
+ IF(NOT CYGWIN)
+ INCLUDE(CheckIncludeFiles)
+ CHECK_INCLUDE_FILES("sys/types.h;ifaddrs.h" KWSYS_SYS_HAS_IFADDRS_H)
+ IF(KWSYS_SYS_HAS_IFADDRS_H)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_SYS_HAS_IFADDRS_H=1)
+ ENDIF()
+ ENDIF()
+ IF(WIN32)
+ INCLUDE(CheckSymbolExists)
+ SET(CMAKE_REQUIRED_LIBRARIES Psapi)
+ CHECK_SYMBOL_EXISTS(GetProcessMemoryInfo "windows.h;psapi.h" KWSYS_SYS_HAS_PSAPI)
+ UNSET(CMAKE_REQUIRED_LIBRARIES)
+ IF(KWSYS_SYS_HAS_PSAPI)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_SYS_HAS_PSAPI=1)
+ IF(MSVC70 OR MSVC71)
+ # Suppress LNK4089: all references to 'PSAPI.DLL' discarded by /OPT:REF
+ SET(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /IGNORE:4089")
+ ENDIF()
+ ENDIF()
+ ENDIF()
+ IF(CMAKE_SYSTEM MATCHES "HP-UX")
+ CHECK_INCLUDE_FILES("sys/mpctl.h" KWSYS_SYS_HAS_MPCTL_H)
+ IF(KWSYS_SYS_HAS_MPCTL_H)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_SYS_HAS_MPCTL_H=1)
+ ENDIF()
+ ENDIF()
+ IF(CMAKE_SYSTEM MATCHES "BSD")
+ CHECK_INCLUDE_FILES("machine/cpu.h" KWSYS_SYS_HAS_MACHINE_CPU_H)
+ IF(KWSYS_SYS_HAS_MACHINE_CPU_H)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_SYS_HAS_MACHINE_CPU_H=1)
+ ENDIF()
+ ENDIF()
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_RLIMIT64
+ "Checking whether CXX compiler has rlimit64" DIRECT)
+ SET(KWSYS_PLATFORM_CXX_TEST_DEFINES)
+ IF(KWSYS_CXX_HAS_RLIMIT64)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_CXX_HAS_RLIMIT64=1)
+ ENDIF()
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ATOL
+ "Checking whether CXX compiler has atol" DIRECT)
+ IF(KWSYS_CXX_HAS_ATOL)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_CXX_HAS_ATOL=1)
+ ENDIF()
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_ATOLL
+ "Checking whether CXX compiler has atoll" DIRECT)
+ IF(KWSYS_CXX_HAS_ATOLL)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_CXX_HAS_ATOLL=1)
+ ENDIF()
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS__ATOI64
+ "Checking whether CXX compiler has _atoi64" DIRECT)
+ IF(KWSYS_CXX_HAS__ATOI64)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_CXX_HAS__ATOI64=1)
+ ENDIF()
+ IF(UNIX)
+ INCLUDE(CheckIncludeFileCXX)
+ # check for simple stack trace
+ # usually it's in libc but on FreeBSD
+ # it's in libexecinfo
+ FIND_LIBRARY(EXECINFO_LIB "execinfo")
+ MARK_AS_ADVANCED(EXECINFO_LIB)
+ IF (NOT EXECINFO_LIB)
+ SET(EXECINFO_LIB "")
+ ENDIF()
+ CHECK_INCLUDE_FILE_CXX("execinfo.h" KWSYS_CXX_HAS_EXECINFOH)
+ IF (KWSYS_CXX_HAS_EXECINFOH)
+ # we have the backtrace header check if it
+ # can be used with this compiler
+ SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES ${EXECINFO_LIB})
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BACKTRACE
+ "Checking whether backtrace works with this C++ compiler" DIRECT)
+ SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES)
+ IF (KWSYS_CXX_HAS_BACKTRACE)
+ # backtrace is supported by this system and compiler.
+ # now check for the more advanced capabilities.
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE=1)
+ # check for symbol lookup using dladdr
+ CHECK_INCLUDE_FILE_CXX("dlfcn.h" KWSYS_CXX_HAS_DLFCNH)
+ IF (KWSYS_CXX_HAS_DLFCNH)
+ # we have symbol lookup libraries and headers
+ # check if they can be used with this compiler
+ SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES ${CMAKE_DL_LIBS})
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_DLADDR
+ "Checking whether dladdr works with this C++ compiler" DIRECT)
+ SET(KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES)
+ IF (KWSYS_CXX_HAS_DLADDR)
+ # symbol lookup is supported by this system
+ # and compiler.
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP=1)
+ ENDIF()
+ ENDIF()
+ # c++ demangling support
+ # check for cxxabi headers
+ CHECK_INCLUDE_FILE_CXX("cxxabi.h" KWSYS_CXX_HAS_CXXABIH)
+ IF (KWSYS_CXX_HAS_CXXABIH)
+ # check if cxxabi can be used with this
+ # system and compiler.
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_CXXABI
+ "Checking whether cxxabi works with this C++ compiler" DIRECT)
+ IF (KWSYS_CXX_HAS_CXXABI)
+ # c++ demangle using cxxabi is supported with
+ # this system and compiler
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE=1)
+ ENDIF()
+ ENDIF()
+ # basic backtrace works better with release build
+ # don't bother with advanced features for release
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS_DEBUG KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD=1)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS_RELWITHDEBINFO KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD=1)
+ ENDIF()
+ ENDIF()
+ ENDIF()
+ IF(BORLAND)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BORLAND_ASM
+ "Checking whether Borland CXX compiler supports assembler instructions" DIRECT)
+ IF(KWSYS_CXX_HAS_BORLAND_ASM)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_CXX_HAS_BORLAND_ASM=1)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_BORLAND_ASM_CPUID
+ "Checking whether Borland CXX compiler supports CPUID assembler instruction" DIRECT)
+ IF(KWSYS_CXX_HAS_BORLAND_ASM_CPUID)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_CXX_HAS_BORLAND_ASM_CPUID=1)
+ ENDIF()
+ ENDIF()
+ ENDIF()
+ IF(KWSYS_USE___INT64)
+ SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_USE___INT64=1)
+ ENDIF()
+ IF(KWSYS_USE_LONG_LONG)
+ SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_USE_LONG_LONG=1)
+ ENDIF()
+ IF(KWSYS_IOS_HAS_OSTREAM_LONG_LONG)
+ SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_IOS_HAS_OSTREAM_LONG_LONG=1)
+ ENDIF()
+ IF(KWSYS_IOS_HAS_OSTREAM___INT64)
+ SET_PROPERTY(SOURCE SystemInformation.cxx testSystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_IOS_HAS_OSTREAM___INT64=1)
+ ENDIF()
+ IF(KWSYS_BUILD_SHARED)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_BUILD_SHARED=1)
+ ENDIF()
+
+ IF(UNIX AND NOT CYGWIN)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_GETLOADAVG
+ "Checking whether CXX compiler has getloadavg" DIRECT)
+ IF(KWSYS_CXX_HAS_GETLOADAVG)
+ SET_PROPERTY(SOURCE SystemInformation.cxx APPEND PROPERTY
+ COMPILE_DEFINITIONS KWSYS_CXX_HAS_GETLOADAVG=1)
+ ENDIF()
+ ENDIF()
+ENDIF()
+
+IF(KWSYS_USE_FStream)
+ KWSYS_PLATFORM_CXX_TEST(KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H
+ "Checking whether <ext/stdio_filebuf.h> is available" DIRECT)
+ENDIF()
+
+#-----------------------------------------------------------------------------
+# Choose a directory for the generated headers.
+IF(NOT KWSYS_HEADER_ROOT)
+ SET(KWSYS_HEADER_ROOT "${PROJECT_BINARY_DIR}")
+ENDIF()
+SET(KWSYS_HEADER_DIR "${KWSYS_HEADER_ROOT}/${KWSYS_NAMESPACE}")
+INCLUDE_DIRECTORIES(${KWSYS_HEADER_ROOT})
+
+#-----------------------------------------------------------------------------
+IF(KWSYS_INSTALL_DOC_DIR)
+ # Assign the license to the runtime component since it must be
+ # distributed with binary forms of this software.
+ IF(KWSYS_INSTALL_COMPONENT_NAME_RUNTIME)
+ SET(KWSYS_INSTALL_LICENSE_OPTIONS ${KWSYS_INSTALL_LICENSE_OPTIONS}
+ COMPONENT ${KWSYS_INSTALL_COMPONENT_NAME_RUNTIME}
+ )
+ ENDIF()
+
+ # Install the license under the documentation directory.
+ INSTALL(FILES ${CMAKE_CURRENT_SOURCE_DIR}/Copyright.txt
+ DESTINATION ${KWSYS_INSTALL_DOC_DIR}/${KWSYS_NAMESPACE}
+ ${KWSYS_INSTALL_LICENSE_OPTIONS})
+ENDIF()
+
+#-----------------------------------------------------------------------------
+# Build a list of classes and headers we need to implement the
+# selected components. Initialize with required components.
+SET(KWSYS_CLASSES)
+SET(KWSYS_H_FILES Configure SharedForward)
+SET(KWSYS_HXX_FILES Configure String)
+
+IF(NOT CMake_SOURCE_DIR)
+ SET(KWSYS_HXX_FILES ${KWSYS_HXX_FILES}
+ hashtable hash_fun hash_map hash_set
+ )
+ENDIF()
+
+# Add selected C++ classes.
+SET(cppclasses
+ Directory DynamicLoader Encoding Glob RegularExpression SystemTools
+ CommandLineArguments IOStream FStream SystemInformation ConsoleBuf
+ )
+FOREACH(cpp ${cppclasses})
+ IF(KWSYS_USE_${cpp})
+ # Use the corresponding class.
+ SET(KWSYS_CLASSES ${KWSYS_CLASSES} ${cpp})
+
+ # Load component-specific CMake code.
+ IF(EXISTS ${PROJECT_SOURCE_DIR}/kwsys${cpp}.cmake)
+ INCLUDE(${PROJECT_SOURCE_DIR}/kwsys${cpp}.cmake)
+ ENDIF()
+ ENDIF()
+ENDFOREACH()
+
+# Add selected C components.
+FOREACH(c
+ Process Base64 Encoding MD5 Terminal System String
+ )
+ IF(KWSYS_USE_${c})
+ # Use the corresponding header file.
+ SET(KWSYS_H_FILES ${KWSYS_H_FILES} ${c})
+
+ # Load component-specific CMake code.
+ IF(EXISTS ${PROJECT_SOURCE_DIR}/kwsys${c}.cmake)
+ INCLUDE(${PROJECT_SOURCE_DIR}/kwsys${c}.cmake)
+ ENDIF()
+ ENDIF()
+ENDFOREACH()
+
+#-----------------------------------------------------------------------------
+# Build a list of sources for the library based on components that are
+# included.
+SET(KWSYS_C_SRCS)
+SET(KWSYS_CXX_SRCS)
+
+# Add the proper sources for this platform's Process implementation.
+IF(KWSYS_USE_Process)
+ IF(NOT UNIX)
+ # Use the Windows implementation.
+ SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ProcessWin32.c)
+ ELSE()
+ # Use the UNIX implementation.
+ SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ProcessUNIX.c)
+ ENDIF()
+ENDIF()
+
+# Add selected C sources.
+FOREACH(c Base64 Encoding MD5 Terminal System String)
+ IF(KWSYS_USE_${c})
+ IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}C.c)
+ LIST(APPEND KWSYS_C_SRCS ${c}C.c)
+ ELSE()
+ LIST(APPEND KWSYS_C_SRCS ${c}.c)
+ ENDIF()
+ ENDIF()
+ENDFOREACH()
+
+# Configure headers of C++ classes and construct the list of sources.
+FOREACH(c ${KWSYS_CLASSES})
+ # Add this source to the list of source files for the library.
+ IF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}CXX.cxx)
+ LIST(APPEND KWSYS_CXX_SRCS ${c}CXX.cxx)
+ ELSEIF(EXISTS ${CMAKE_CURRENT_SOURCE_DIR}/${c}.cxx)
+ LIST(APPEND KWSYS_CXX_SRCS ${c}.cxx)
+ ENDIF()
+
+ # Configure the header for this class.
+ CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${c}.hxx.in ${KWSYS_HEADER_DIR}/${c}.hxx
+ @ONLY IMMEDIATE)
+ SET(KWSYS_CXX_SRCS ${KWSYS_CXX_SRCS} ${KWSYS_HEADER_DIR}/${c}.hxx)
+
+ # Create an install target for the header.
+ IF(KWSYS_INSTALL_INCLUDE_DIR)
+ INSTALL(FILES ${KWSYS_HEADER_DIR}/${c}.hxx
+ DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE}
+ ${KWSYS_INSTALL_INCLUDE_OPTIONS})
+ ENDIF()
+ENDFOREACH()
+
+# Configure C headers.
+FOREACH(h ${KWSYS_H_FILES})
+ # Configure the header into the given directory.
+ CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${h}.h.in ${KWSYS_HEADER_DIR}/${h}.h
+ @ONLY IMMEDIATE)
+ SET(KWSYS_C_SRCS ${KWSYS_C_SRCS} ${KWSYS_HEADER_DIR}/${h}.h)
+
+ # Create an install target for the header.
+ IF(KWSYS_INSTALL_INCLUDE_DIR)
+ INSTALL(FILES ${KWSYS_HEADER_DIR}/${h}.h
+ DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE}
+ ${KWSYS_INSTALL_INCLUDE_OPTIONS})
+ ENDIF()
+ENDFOREACH()
+
+# Configure other C++ headers.
+FOREACH(h ${KWSYS_HXX_FILES})
+ # Configure the header into the given directory.
+ CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/${h}.hxx.in ${KWSYS_HEADER_DIR}/${h}.hxx
+ @ONLY IMMEDIATE)
+ SET(KWSYS_CXX_SRCS ${KWSYS_CXX_SRCS} ${KWSYS_HEADER_DIR}/${h}.hxx)
+
+ # Create an install target for the header.
+ IF(KWSYS_INSTALL_INCLUDE_DIR)
+ INSTALL(FILES ${KWSYS_HEADER_DIR}/${h}.hxx
+ DESTINATION ${KWSYS_INSTALL_INCLUDE_DIR}/${KWSYS_NAMESPACE}
+ ${KWSYS_INSTALL_INCLUDE_OPTIONS})
+ ENDIF()
+ENDFOREACH()
+
+#-----------------------------------------------------------------------------
+# Add the library with the configured name and list of sources.
+IF(KWSYS_C_SRCS OR KWSYS_CXX_SRCS)
+ IF(KWSYS_SPLIT_OBJECTS_FROM_INTERFACE)
+ SET(KWSYS_TARGET_INTERFACE ${KWSYS_NAMESPACE})
+ SET(KWSYS_TARGET_OBJECT ${KWSYS_NAMESPACE}_objects)
+ SET(KWSYS_TARGET_LINK ${KWSYS_NAMESPACE}_private)
+ SET(KWSYS_TARGET_INSTALL ${KWSYS_TARGET_INTERFACE} ${KWSYS_TARGET_LINK})
+ SET(KWSYS_LINK_DEPENDENCY INTERFACE)
+ ADD_LIBRARY(${KWSYS_TARGET_OBJECT} OBJECT
+ ${KWSYS_C_SRCS} ${KWSYS_CXX_SRCS})
+ IF(KWSYS_BUILD_SHARED OR KWSYS_BUILD_PIC)
+ SET_PROPERTY(TARGET ${KWSYS_TARGET_OBJECT} PROPERTY
+ POSITION_INDEPENDENT_CODE TRUE)
+ ENDIF()
+ ADD_LIBRARY(${KWSYS_TARGET_INTERFACE} INTERFACE)
+ ADD_LIBRARY(${KWSYS_TARGET_LINK} INTERFACE)
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_LINK} INTERFACE
+ ${KWSYS_TARGET_INTERFACE})
+ TARGET_SOURCES(${KWSYS_TARGET_LINK} INTERFACE
+ $<TARGET_OBJECTS:${KWSYS_TARGET_OBJECT}>)
+ target_compile_features(${KWSYS_TARGET_OBJECT} PRIVATE ${KWSYS_CXX_COMPILE_FEATURES})
+ target_compile_features(${KWSYS_TARGET_INTERFACE} INTERFACE ${KWSYS_CXX_COMPILE_FEATURES})
+ ELSE()
+ SET(KWSYS_TARGET_INTERFACE ${KWSYS_NAMESPACE})
+ SET(KWSYS_TARGET_OBJECT ${KWSYS_NAMESPACE})
+ SET(KWSYS_TARGET_LINK ${KWSYS_NAMESPACE})
+ set(KWSYS_TARGET_INSTALL ${KWSYS_TARGET_LINK})
+ SET(KWSYS_LINK_DEPENDENCY PUBLIC)
+ ADD_LIBRARY(${KWSYS_TARGET_INTERFACE} ${KWSYS_LIBRARY_TYPE}
+ ${KWSYS_C_SRCS} ${KWSYS_CXX_SRCS})
+ target_compile_features(${KWSYS_TARGET_INTERFACE} PUBLIC ${KWSYS_CXX_COMPILE_FEATURES})
+ ENDIF()
+ if (KWSYS_ALIAS_TARGET)
+ add_library(${KWSYS_ALIAS_TARGET} ALIAS ${KWSYS_TARGET_INTERFACE})
+ endif ()
+ SET_TARGET_PROPERTIES(${KWSYS_TARGET_OBJECT} PROPERTIES
+ C_CLANG_TIDY ""
+ CXX_CLANG_TIDY ""
+ C_INCLUDE_WHAT_YOU_USE ""
+ CXX_INCLUDE_WHAT_YOU_USE ""
+ LABELS "${KWSYS_LABELS_LIB}")
+ IF(KWSYS_USE_DynamicLoader)
+ IF(UNIX)
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY}
+ ${CMAKE_DL_LIBS})
+ ENDIF()
+ ENDIF()
+
+ IF(KWSYS_USE_SystemInformation)
+ IF(WIN32)
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} ws2_32)
+ # link in dbghelp.dll for symbol lookup if MSVC 1800 or later
+ # Note that the dbghelp runtime is part of MS Windows OS
+ IF(MSVC_VERSION AND NOT MSVC_VERSION VERSION_LESS 1800)
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY} dbghelp)
+ ENDIF()
+ IF(KWSYS_SYS_HAS_PSAPI)
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY}
+ Psapi)
+ ENDIF()
+ ELSEIF(UNIX)
+ IF (EXECINFO_LIB AND KWSYS_CXX_HAS_BACKTRACE)
+ # backtrace on FreeBSD is not in libc
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY}
+ ${EXECINFO_LIB})
+ ENDIF()
+ IF (KWSYS_CXX_HAS_DLADDR)
+ # for symbol lookup using dladdr
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY}
+ ${CMAKE_DL_LIBS})
+ ENDIF()
+ IF (CMAKE_SYSTEM_NAME STREQUAL "SunOS")
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_INTERFACE} ${KWSYS_LINK_DEPENDENCY}
+ socket)
+ ENDIF()
+ ENDIF()
+ ENDIF()
+
+ # Apply user-defined target properties to the library.
+ IF(KWSYS_PROPERTIES_CXX)
+ SET_TARGET_PROPERTIES(${KWSYS_TARGET_INTERFACE} PROPERTIES
+ ${KWSYS_PROPERTIES_CXX})
+ ENDIF()
+
+ # Set up include usage requirement
+ IF(COMMAND TARGET_INCLUDE_DIRECTORIES)
+ TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_INTERFACE} INTERFACE
+ $<BUILD_INTERFACE:${KWSYS_HEADER_ROOT}>)
+ IF(KWSYS_INSTALL_INCLUDE_DIR)
+ TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_INTERFACE} INTERFACE
+ $<INSTALL_INTERFACE:${KWSYS_INSTALL_INCLUDE_DIR}>)
+ ENDIF()
+ ENDIF()
+
+ # Create an install target for the library.
+ IF(KWSYS_INSTALL_LIBRARY_RULE)
+ INSTALL(TARGETS ${KWSYS_TARGET_INSTALL} ${KWSYS_INSTALL_LIBRARY_RULE})
+ ENDIF()
+ IF(KWSYS_INSTALL_NAMELINK_RULE)
+ INSTALL(TARGETS ${KWSYS_TARGET_INSTALL} ${KWSYS_INSTALL_NAMELINK_RULE})
+ ENDIF()
+ENDIF()
+
+# Add a C-only library if requested.
+IF(KWSYS_ENABLE_C AND KWSYS_C_SRCS)
+ IF(KWSYS_SPLIT_OBJECTS_FROM_INTERFACE)
+ SET(KWSYS_TARGET_C_INTERFACE ${KWSYS_NAMESPACE}_c)
+ SET(KWSYS_TARGET_C_OBJECT ${KWSYS_NAMESPACE}_c_objects)
+ SET(KWSYS_TARGET_C_LINK ${KWSYS_NAMESPACE}_c_private)
+ SET(KWSYS_TARGET_C_INSTALL
+ ${KWSYS_TARGET_C_INTERFACE} ${KWSYS_TARGET_C_LINK})
+ SET(KWSYS_LINK_DEPENDENCY INTERFACE)
+ ADD_LIBRARY(${KWSYS_TARGET_C_OBJECT} OBJECT ${KWSYS_C_SRCS})
+ IF(KWSYS_BUILD_SHARED OR KWSYS_BUILD_PIC)
+ SET_PROPERTY(TARGET ${KWSYS_TARGET_C_OBJECT} PROPERTY
+ POSITION_INDEPENDENT_CODE TRUE)
+ ENDIF()
+ ADD_LIBRARY(${KWSYS_TARGET_C_INTERFACE} INTERFACE)
+ ADD_LIBRARY(${KWSYS_TARGET_C_LINK} INTERFACE)
+ TARGET_LINK_LIBRARIES(${KWSYS_TARGET_C_LINK} INTERFACE
+ ${KWSYS_TARGET_C_INTERFACE})
+ TARGET_SOURCES(${KWSYS_TARGET_C_LINK} INTERFACE
+ $<TARGET_OBJECTS:${KWSYS_TARGET_C_OBJECT}>)
+ ELSE()
+ SET(KWSYS_TARGET_C_INTERFACE ${KWSYS_NAMESPACE}_c)
+ SET(KWSYS_TARGET_C_OBJECT ${KWSYS_NAMESPACE}_c)
+ SET(KWSYS_TARGET_C_LINK ${KWSYS_NAMESPACE}_c)
+ SET(KWSYS_TARGET_C_INSTALL ${KWSYS_TARGET_C_LINK})
+ SET(KWSYS_LINK_DEPENDENCY PUBLIC)
+ ADD_LIBRARY(${KWSYS_TARGET_C_INTERFACE} ${KWSYS_LIBRARY_TYPE}
+ ${KWSYS_C_SRCS})
+ ENDIF()
+ SET_TARGET_PROPERTIES(${KWSYS_TARGET_C_OBJECT} PROPERTIES
+ LABELS "${KWSYS_LABELS_LIB}")
+
+ # Apply user-defined target properties to the library.
+ IF(KWSYS_PROPERTIES_C)
+ SET_TARGET_PROPERTIES(${KWSYS_TARGET_C_INTERFACE} PROPERTIES
+ ${KWSYS_PROPERTIES_C})
+ ENDIF()
+
+ # Set up include usage requirement
+ IF(COMMAND TARGET_INCLUDE_DIRECTORIES)
+ TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_C_INTERFACE} INTERFACE
+ $<BUILD_INTERFACE:${KWSYS_HEADER_ROOT}>)
+ IF(KWSYS_INSTALL_INCLUDE_DIR)
+ TARGET_INCLUDE_DIRECTORIES(${KWSYS_TARGET_C_INTERFACE} INTERFACE
+ $<INSTALL_INTERFACE:${KWSYS_INSTALL_INCLUDE_DIR}>)
+ ENDIF()
+ ENDIF()
+
+ # Create an install target for the library.
+ IF(KWSYS_INSTALL_LIBRARY_RULE)
+ INSTALL(TARGETS ${KWSYS_TARGET_C_INSTALL})
+ ENDIF()
+ENDIF()
+
+# For building kwsys itself, we use a macro defined on the command
+# line to configure the namespace in the C and C++ source files.
+ADD_DEFINITIONS("-DKWSYS_NAMESPACE=${KWSYS_NAMESPACE}")
+
+# Disable deprecation warnings for standard C functions.
+IF(MSVC OR (WIN32 AND (CMAKE_C_COMPILER_ID STREQUAL "Intel" OR
+ (CMAKE_C_COMPILER_ID STREQUAL "Clang" AND CMAKE_CXX_SIMULATE_ID STREQUAL "MSVC"))))
+ ADD_DEFINITIONS(
+ -D_CRT_NONSTDC_NO_DEPRECATE
+ -D_CRT_SECURE_NO_DEPRECATE
+ -D_CRT_SECURE_NO_WARNINGS
+ -D_SCL_SECURE_NO_DEPRECATE
+ )
+ENDIF()
+
+IF(WIN32)
+ # Help enforce the use of wide Windows apis.
+ ADD_DEFINITIONS(-DUNICODE -D_UNICODE)
+ENDIF()
+
+IF(KWSYS_USE_String)
+ # Activate code in "String.c". See the comment in the source.
+ SET_SOURCE_FILES_PROPERTIES(String.c PROPERTIES
+ COMPILE_FLAGS "-DKWSYS_STRING_C")
+ENDIF()
+
+IF(KWSYS_USE_Encoding)
+ # Set default 8 bit encoding in "EndcodingC.c".
+ SET_PROPERTY(SOURCE EncodingC.c EncodingCXX.cxx APPEND PROPERTY COMPILE_DEFINITIONS
+ KWSYS_ENCODING_DEFAULT_CODEPAGE=${KWSYS_ENCODING_DEFAULT_CODEPAGE})
+ENDIF()
+
+#-----------------------------------------------------------------------------
+# Setup testing if not being built as part of another project.
+IF(KWSYS_STANDALONE OR CMake_SOURCE_DIR)
+ IF(BUILD_TESTING)
+ # Compute the location of executables.
+ SET(EXEC_DIR "${CMAKE_CURRENT_BINARY_DIR}")
+ IF(EXECUTABLE_OUTPUT_PATH)
+ SET(EXEC_DIR "${EXECUTABLE_OUTPUT_PATH}")
+ ENDIF()
+
+ # C tests
+ SET(KWSYS_C_TESTS
+ testEncode.c
+ testTerminal.c
+ )
+ IF(KWSYS_STANDALONE)
+ SET(KWSYS_C_TESTS ${KWSYS_C_TESTS} testFail.c)
+ ENDIF()
+ CREATE_TEST_SOURCELIST(
+ KWSYS_C_TEST_SRCS ${KWSYS_NAMESPACE}TestsC.c
+ ${KWSYS_C_TESTS}
+ )
+ ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestsC ${KWSYS_C_TEST_SRCS})
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsC PROPERTY LABELS ${KWSYS_LABELS_EXE})
+ TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestsC ${KWSYS_TARGET_C_LINK})
+ FOREACH(testfile ${KWSYS_C_TESTS})
+ get_filename_component(test "${testfile}" NAME_WE)
+ ADD_TEST(kwsys.${test} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestsC ${test} ${KWSYS_TEST_ARGS_${test}})
+ SET_PROPERTY(TEST kwsys.${test} PROPERTY LABELS ${KWSYS_LABELS_TEST})
+ ENDFOREACH()
+
+ # C++ tests
+ IF(NOT WATCOM AND NOT CMake_SOURCE_DIR)
+ SET(KWSYS_CXX_TESTS
+ testHashSTL.cxx
+ )
+ ENDIF()
+ SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS}
+ testConfigure.cxx
+ testSystemTools.cxx
+ testCommandLineArguments.cxx
+ testCommandLineArguments1.cxx
+ testDirectory.cxx
+ )
+ IF(KWSYS_STL_HAS_WSTRING)
+ SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS}
+ testEncoding.cxx
+ )
+ ENDIF()
+ IF(KWSYS_USE_FStream)
+ SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS}
+ testFStream.cxx
+ )
+ ENDIF()
+ IF(KWSYS_USE_ConsoleBuf)
+ ADD_EXECUTABLE(testConsoleBufChild testConsoleBufChild.cxx)
+ SET_PROPERTY(TARGET testConsoleBufChild PROPERTY C_CLANG_TIDY "")
+ SET_PROPERTY(TARGET testConsoleBufChild PROPERTY CXX_CLANG_TIDY "")
+ SET_PROPERTY(TARGET testConsoleBufChild PROPERTY C_INCLUDE_WHAT_YOU_USE "")
+ SET_PROPERTY(TARGET testConsoleBufChild PROPERTY CXX_INCLUDE_WHAT_YOU_USE "")
+ SET_PROPERTY(TARGET testConsoleBufChild PROPERTY LABELS ${KWSYS_LABELS_EXE})
+ TARGET_LINK_LIBRARIES(testConsoleBufChild ${KWSYS_TARGET_LINK})
+ SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS}
+ testConsoleBuf.cxx
+ )
+ IF(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC" AND
+ CMAKE_CXX_COMPILER_VERSION VERSION_GREATER "19.0.23506")
+ set_property(SOURCE testConsoleBuf.cxx testConsoleBufChild.cxx PROPERTY COMPILE_FLAGS /utf-8)
+ ENDIF()
+ SET_PROPERTY(SOURCE testConsoleBuf.cxx APPEND PROPERTY COMPILE_DEFINITIONS
+ KWSYS_ENCODING_DEFAULT_CODEPAGE=${KWSYS_ENCODING_DEFAULT_CODEPAGE})
+ ENDIF()
+ IF(KWSYS_USE_SystemInformation)
+ SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} testSystemInformation.cxx)
+ ENDIF()
+ IF(KWSYS_USE_DynamicLoader)
+ SET(KWSYS_CXX_TESTS ${KWSYS_CXX_TESTS} testDynamicLoader.cxx)
+ # If kwsys contains the DynamicLoader, need extra library
+ ADD_LIBRARY(${KWSYS_NAMESPACE}TestDynload MODULE testDynload.c)
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestDynload PROPERTY LABELS ${KWSYS_LABELS_LIB})
+ ADD_DEPENDENCIES(${KWSYS_NAMESPACE}TestDynload ${KWSYS_TARGET_INTERFACE})
+
+ if (WIN32)
+ # Windows tests supported flags.
+ add_library(${KWSYS_NAMESPACE}TestDynloadImpl SHARED testDynloadImpl.c)
+ set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY LABELS ${KWSYS_LABELS_LIB})
+ set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY DEFINE_SYMBOL BUILDING_TestDynloadImpl)
+ set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadImpl PROPERTY RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynloaddir")
+ add_dependencies(${KWSYS_NAMESPACE}TestDynloadImpl ${KWSYS_TARGET_INTERFACE})
+ add_library(${KWSYS_NAMESPACE}TestDynloadUse MODULE testDynloadUse.c)
+ set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadUse PROPERTY LABELS ${KWSYS_LABELS_LIB})
+ set_property(TARGET ${KWSYS_NAMESPACE}TestDynloadUse PROPERTY LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/dynloaddir")
+ add_dependencies(${KWSYS_NAMESPACE}TestDynloadUse ${KWSYS_TARGET_INTERFACE})
+ target_link_libraries(${KWSYS_NAMESPACE}TestDynloadUse PRIVATE ${KWSYS_NAMESPACE}TestDynloadImpl)
+ endif ()
+ ENDIF()
+ CREATE_TEST_SOURCELIST(
+ KWSYS_CXX_TEST_SRCS ${KWSYS_NAMESPACE}TestsCxx.cxx
+ ${KWSYS_CXX_TESTS}
+ )
+ ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestsCxx ${KWSYS_CXX_TEST_SRCS})
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY C_CLANG_TIDY "")
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY CXX_CLANG_TIDY "")
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY C_INCLUDE_WHAT_YOU_USE "")
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY CXX_INCLUDE_WHAT_YOU_USE "")
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestsCxx PROPERTY LABELS ${KWSYS_LABELS_EXE})
+ TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestsCxx ${KWSYS_TARGET_LINK})
+
+ SET(TEST_SYSTEMTOOLS_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}")
+ SET(TEST_SYSTEMTOOLS_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}")
+ CONFIGURE_FILE(
+ ${PROJECT_SOURCE_DIR}/testSystemTools.h.in
+ ${PROJECT_BINARY_DIR}/testSystemTools.h)
+ INCLUDE_DIRECTORIES(${PROJECT_BINARY_DIR})
+
+ IF(CTEST_TEST_KWSYS)
+ CONFIGURE_FILE("${CMAKE_CURRENT_SOURCE_DIR}/ExtraTest.cmake.in"
+ "${CMAKE_CURRENT_BINARY_DIR}/ExtraTest.cmake")
+ SET_DIRECTORY_PROPERTIES(PROPERTIES TEST_INCLUDE_FILE "${CMAKE_CURRENT_BINARY_DIR}/ExtraTest.cmake")
+ ENDIF()
+
+ SET(KWSYS_TEST_ARGS_testCommandLineArguments
+ --another-bool-variable
+ --long3=opt
+ --set-bool-arg1
+ -SSS ken brad bill andy
+ --some-bool-variable=true
+ --some-double-variable12.5
+ --some-int-variable 14
+ "--some-string-variable=test string with space"
+ --some-multi-argument 5 1 8 3 7 1 3 9 7 1
+ -N 12.5 -SS=andy -N 1.31 -N 22
+ -SS=bill -BBtrue -SS=brad
+ -BBtrue
+ -BBfalse
+ -SS=ken
+ -A
+ -C=test
+ --long2 hello
+ )
+ SET(KWSYS_TEST_ARGS_testCommandLineArguments1
+ --ignored
+ -n 24
+ --second-ignored
+ "-m=test value"
+ third-ignored
+ -p
+ some junk at the end
+ )
+ FOREACH(testfile ${KWSYS_CXX_TESTS})
+ get_filename_component(test "${testfile}" NAME_WE)
+ ADD_TEST(kwsys.${test} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestsCxx ${test} ${KWSYS_TEST_ARGS_${test}})
+ SET_PROPERTY(TEST kwsys.${test} PROPERTY LABELS ${KWSYS_LABELS_TEST})
+ ENDFOREACH()
+
+ # Process tests.
+ ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestProcess testProcess.c)
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestProcess PROPERTY LABELS ${KWSYS_LABELS_EXE})
+ TARGET_LINK_LIBRARIES(${KWSYS_NAMESPACE}TestProcess ${KWSYS_TARGET_C_LINK})
+ IF(NOT CYGWIN)
+ SET(KWSYS_TEST_PROCESS_7 7)
+ ENDIF()
+ FOREACH(n 1 2 3 4 5 6 ${KWSYS_TEST_PROCESS_7} 9 10)
+ ADD_TEST(kwsys.testProcess-${n} ${EXEC_DIR}/${KWSYS_NAMESPACE}TestProcess ${n})
+ SET_PROPERTY(TEST kwsys.testProcess-${n} PROPERTY LABELS ${KWSYS_LABELS_TEST})
+ SET_TESTS_PROPERTIES(kwsys.testProcess-${n} PROPERTIES TIMEOUT 120)
+ ENDFOREACH()
+
+ SET(testProcess_COMPILE_FLAGS "")
+ # Some Apple compilers produce bad optimizations in this source.
+ IF(APPLE AND CMAKE_C_COMPILER_ID MATCHES "^(GNU|LLVM)$")
+ SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -O0")
+ ELSEIF(CMAKE_C_COMPILER_ID STREQUAL "XL")
+ # Tell IBM XL not to warn about our test infinite loop
+ IF(CMAKE_SYSTEM MATCHES "Linux.*ppc64le"
+ AND CMAKE_C_COMPILER_VERSION VERSION_LESS "16.1.0"
+ AND NOT CMAKE_C_COMPILER_VERSION VERSION_LESS "13.1.1")
+ # v13.1.[1-6] on Linux ppc64le is clang based and does not accept
+ # the -qsuppress option, so just suppress all warnings.
+ SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -w")
+ ELSE()
+ SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -qsuppress=1500-010")
+ ENDIF()
+ ENDIF()
+ IF(CMAKE_C_FLAGS MATCHES "-fsanitize=")
+ SET(testProcess_COMPILE_FLAGS "${testProcess_COMPILE_FLAGS} -DCRASH_USING_ABORT")
+ ENDIF()
+ SET_PROPERTY(SOURCE testProcess.c PROPERTY COMPILE_FLAGS "${testProcess_COMPILE_FLAGS}")
+
+ # Test SharedForward
+ CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/testSharedForward.c.in
+ ${PROJECT_BINARY_DIR}/testSharedForward.c @ONLY IMMEDIATE)
+ ADD_EXECUTABLE(${KWSYS_NAMESPACE}TestSharedForward
+ ${PROJECT_BINARY_DIR}/testSharedForward.c)
+ SET_PROPERTY(TARGET ${KWSYS_NAMESPACE}TestSharedForward PROPERTY LABELS ${KWSYS_LABELS_EXE})
+ ADD_DEPENDENCIES(${KWSYS_NAMESPACE}TestSharedForward ${KWSYS_TARGET_C_LINK})
+ ADD_TEST(kwsys.testSharedForward ${EXEC_DIR}/${KWSYS_NAMESPACE}TestSharedForward 1)
+ SET_PROPERTY(TEST kwsys.testSharedForward PROPERTY LABELS ${KWSYS_LABELS_TEST})
+
+ # Configure some test properties.
+ IF(KWSYS_STANDALONE)
+ # We expect test to fail
+ SET_TESTS_PROPERTIES(kwsys.testFail PROPERTIES WILL_FAIL ON)
+ GET_TEST_PROPERTY(kwsys.testFail WILL_FAIL wfv)
+ SET_TESTS_PROPERTIES(kwsys.testFail PROPERTIES MEASUREMENT "Some Key=Some Value")
+ MESSAGE(STATUS "GET_TEST_PROPERTY returned: ${wfv}")
+ ENDIF()
+
+ # Set up ctest custom configuration file.
+ CONFIGURE_FILE(${PROJECT_SOURCE_DIR}/CTestCustom.cmake.in
+ ${PROJECT_BINARY_DIR}/CTestCustom.cmake @ONLY)
+
+ # Suppress known consistent failures on buggy systems.
+ IF(KWSYS_TEST_BOGUS_FAILURES)
+ SET_TESTS_PROPERTIES(${KWSYS_TEST_BOGUS_FAILURES} PROPERTIES WILL_FAIL ON)
+ ENDIF()
+
+ ENDIF()
+ENDIF()
diff --git a/test/API/driver/kwsys/CONTRIBUTING.rst b/test/API/driver/kwsys/CONTRIBUTING.rst
new file mode 100644
index 0000000..32e7b83
--- /dev/null
+++ b/test/API/driver/kwsys/CONTRIBUTING.rst
@@ -0,0 +1,49 @@
+Contributing to KWSys
+*********************
+
+Patches
+=======
+
+KWSys is kept in its own Git repository and shared by several projects
+via copies in their source trees. Changes to KWSys should not be made
+directly in a host project, except perhaps in maintenance branches.
+
+KWSys uses `Kitware's GitLab Instance`_ to manage development and code review.
+To contribute patches:
+
+#. Fork the upstream `KWSys Repository`_ into a personal account.
+#. Base all new work on the upstream ``master`` branch.
+#. Run ``./SetupForDevelopment.sh`` in new local work trees.
+#. Create commits making incremental, distinct, logically complete changes.
+#. Push a topic branch to a personal repository fork on GitLab.
+#. Create a GitLab Merge Request targeting the upstream ``master`` branch.
+
+Once changes are reviewed, tested, and integrated to KWSys upstream then
+copies of KWSys within dependent projects can be updated to get the changes.
+
+.. _`Kitware's GitLab Instance`: https://gitlab.kitware.com
+.. _`KWSys Repository`: https://gitlab.kitware.com/utils/kwsys
+
+Code Style
+==========
+
+We use `clang-format`_ version **6.0** to define our style for C++ code in
+the KWSys source tree. See the `.clang-format`_ configuration file for
+our style settings. Use the `clang-format.bash`_ script to format source
+code. It automatically runs ``clang-format`` on the set of source files
+for which we enforce style. The script also has options to format only
+a subset of files, such as those that are locally modified.
+
+.. _`clang-format`: http://clang.llvm.org/docs/ClangFormat.html
+.. _`.clang-format`: .clang-format
+.. _`clang-format.bash`: clang-format.bash
+
+License
+=======
+
+We do not require any formal copyright assignment or contributor license
+agreement. Any contributions intentionally sent upstream are presumed
+to be offered under terms of the OSI-approved BSD 3-clause License.
+See `Copyright.txt`_ for details.
+
+.. _`Copyright.txt`: Copyright.txt
diff --git a/test/API/driver/kwsys/CTestConfig.cmake b/test/API/driver/kwsys/CTestConfig.cmake
new file mode 100644
index 0000000..1339ffc
--- /dev/null
+++ b/test/API/driver/kwsys/CTestConfig.cmake
@@ -0,0 +1,9 @@
+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+# file Copyright.txt or https://cmake.org/licensing#kwsys for details.
+
+set(CTEST_PROJECT_NAME "KWSys")
+set(CTEST_NIGHTLY_START_TIME "21:00:00 EDT")
+set(CTEST_DROP_METHOD "http")
+set(CTEST_DROP_SITE "open.cdash.org")
+set(CTEST_DROP_LOCATION "/submit.php?project=KWSys")
+set(CTEST_DROP_SITE_CDASH TRUE)
diff --git a/test/API/driver/kwsys/CTestCustom.cmake.in b/test/API/driver/kwsys/CTestCustom.cmake.in
new file mode 100644
index 0000000..760221b
--- /dev/null
+++ b/test/API/driver/kwsys/CTestCustom.cmake.in
@@ -0,0 +1,14 @@
+# kwsys.testProcess-10 involves sending SIGINT to a child process, which then
+# exits abnormally via a call to _exit(). (On Windows, a call to ExitProcess).
+# Naturally, this results in plenty of memory being "leaked" by this child
+# process - the memory check results are not meaningful in this case.
+#
+# kwsys.testProcess-9 also tests sending SIGINT to a child process. However,
+# normal operation of that test involves the child process timing out, and the
+# host process kills (SIGKILL) it as a result. Since it was SIGKILL'ed, the
+# resulting memory leaks are not logged by valgrind anyway. Therefore, we
+# don't have to exclude it.
+
+list(APPEND CTEST_CUSTOM_MEMCHECK_IGNORE
+ kwsys.testProcess-10
+ )
diff --git a/test/API/driver/kwsys/CommandLineArguments.cxx b/test/API/driver/kwsys/CommandLineArguments.cxx
new file mode 100644
index 0000000..3fd1955
--- /dev/null
+++ b/test/API/driver/kwsys/CommandLineArguments.cxx
@@ -0,0 +1,768 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(CommandLineArguments.hxx)
+
+#include KWSYS_HEADER(Configure.hxx)
+#include KWSYS_HEADER(String.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "CommandLineArguments.hxx.in"
+# include "Configure.hxx.in"
+# include "String.hxx.in"
+#endif
+
+#include <iostream>
+#include <map>
+#include <set>
+#include <sstream>
+#include <vector>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4786)
+#endif
+
+#if defined(__sgi) && !defined(__GNUC__)
+# pragma set woff 1375 /* base class destructor not virtual */
+#endif
+
+#if 0
+# define CommandLineArguments_DEBUG(x) \
+ std::cout << __LINE__ << " CLA: " << x << std::endl
+#else
+# define CommandLineArguments_DEBUG(x)
+#endif
+
+namespace KWSYS_NAMESPACE {
+
+struct CommandLineArgumentsCallbackStructure
+{
+ const char* Argument;
+ int ArgumentType;
+ CommandLineArguments::CallbackType Callback;
+ void* CallData;
+ void* Variable;
+ int VariableType;
+ const char* Help;
+};
+
+class CommandLineArgumentsVectorOfStrings : public std::vector<kwsys::String>
+{
+};
+class CommandLineArgumentsSetOfStrings : public std::set<kwsys::String>
+{
+};
+class CommandLineArgumentsMapOfStrucs
+ : public std::map<kwsys::String, CommandLineArgumentsCallbackStructure>
+{
+};
+
+class CommandLineArgumentsInternal
+{
+public:
+ CommandLineArgumentsInternal()
+ : UnknownArgumentCallback{ nullptr }
+ , ClientData{ nullptr }
+ , LastArgument{ 0 }
+ {
+ }
+
+ typedef CommandLineArgumentsVectorOfStrings VectorOfStrings;
+ typedef CommandLineArgumentsMapOfStrucs CallbacksMap;
+ typedef kwsys::String String;
+ typedef CommandLineArgumentsSetOfStrings SetOfStrings;
+
+ VectorOfStrings Argv;
+ String Argv0;
+ CallbacksMap Callbacks;
+
+ CommandLineArguments::ErrorCallbackType UnknownArgumentCallback;
+ void* ClientData;
+
+ VectorOfStrings::size_type LastArgument;
+
+ VectorOfStrings UnusedArguments;
+};
+
+CommandLineArguments::CommandLineArguments()
+{
+ this->Internals = new CommandLineArguments::Internal;
+ this->Help = "";
+ this->LineLength = 80;
+ this->StoreUnusedArgumentsFlag = false;
+}
+
+CommandLineArguments::~CommandLineArguments()
+{
+ delete this->Internals;
+}
+
+void CommandLineArguments::Initialize(int argc, const char* const argv[])
+{
+ int cc;
+
+ this->Initialize();
+ this->Internals->Argv0 = argv[0];
+ for (cc = 1; cc < argc; cc++) {
+ this->ProcessArgument(argv[cc]);
+ }
+}
+
+void CommandLineArguments::Initialize(int argc, char* argv[])
+{
+ this->Initialize(argc, static_cast<const char* const*>(argv));
+}
+
+void CommandLineArguments::Initialize()
+{
+ this->Internals->Argv.clear();
+ this->Internals->LastArgument = 0;
+}
+
+void CommandLineArguments::ProcessArgument(const char* arg)
+{
+ this->Internals->Argv.push_back(arg);
+}
+
+bool CommandLineArguments::GetMatchedArguments(
+ std::vector<std::string>* matches, const std::string& arg)
+{
+ matches->clear();
+ CommandLineArguments::Internal::CallbacksMap::iterator it;
+
+ // Does the argument match to any we know about?
+ for (it = this->Internals->Callbacks.begin();
+ it != this->Internals->Callbacks.end(); it++) {
+ const CommandLineArguments::Internal::String& parg = it->first;
+ CommandLineArgumentsCallbackStructure* cs = &it->second;
+ if (cs->ArgumentType == CommandLineArguments::NO_ARGUMENT ||
+ cs->ArgumentType == CommandLineArguments::SPACE_ARGUMENT) {
+ if (arg == parg) {
+ matches->push_back(parg);
+ }
+ } else if (arg.find(parg) == 0) {
+ matches->push_back(parg);
+ }
+ }
+ return !matches->empty();
+}
+
+int CommandLineArguments::Parse()
+{
+ std::vector<std::string>::size_type cc;
+ std::vector<std::string> matches;
+ if (this->StoreUnusedArgumentsFlag) {
+ this->Internals->UnusedArguments.clear();
+ }
+ for (cc = 0; cc < this->Internals->Argv.size(); cc++) {
+ const std::string& arg = this->Internals->Argv[cc];
+ CommandLineArguments_DEBUG("Process argument: " << arg);
+ this->Internals->LastArgument = cc;
+ if (this->GetMatchedArguments(&matches, arg)) {
+ // Ok, we found one or more arguments that match what user specified.
+ // Let's find the longest one.
+ CommandLineArguments::Internal::VectorOfStrings::size_type kk;
+ CommandLineArguments::Internal::VectorOfStrings::size_type maxidx = 0;
+ CommandLineArguments::Internal::String::size_type maxlen = 0;
+ for (kk = 0; kk < matches.size(); kk++) {
+ if (matches[kk].size() > maxlen) {
+ maxlen = matches[kk].size();
+ maxidx = kk;
+ }
+ }
+ // So, the longest one is probably the right one. Now see if it has any
+ // additional value
+ CommandLineArgumentsCallbackStructure* cs =
+ &this->Internals->Callbacks[matches[maxidx]];
+ const std::string& sarg = matches[maxidx];
+ if (cs->Argument != sarg) {
+ abort();
+ }
+ switch (cs->ArgumentType) {
+ case NO_ARGUMENT:
+ // No value
+ if (!this->PopulateVariable(cs, nullptr)) {
+ return 0;
+ }
+ break;
+ case SPACE_ARGUMENT:
+ if (cc == this->Internals->Argv.size() - 1) {
+ this->Internals->LastArgument--;
+ return 0;
+ }
+ CommandLineArguments_DEBUG("This is a space argument: "
+ << arg << " value: "
+ << this->Internals->Argv[cc + 1]);
+ // Value is the next argument
+ if (!this->PopulateVariable(cs,
+ this->Internals->Argv[cc + 1].c_str())) {
+ return 0;
+ }
+ cc++;
+ break;
+ case EQUAL_ARGUMENT:
+ if (arg.size() == sarg.size() || arg.at(sarg.size()) != '=') {
+ this->Internals->LastArgument--;
+ return 0;
+ }
+ // Value is everythng followed the '=' sign
+ if (!this->PopulateVariable(cs, arg.c_str() + sarg.size() + 1)) {
+ return 0;
+ }
+ break;
+ case CONCAT_ARGUMENT:
+ // Value is whatever follows the argument
+ if (!this->PopulateVariable(cs, arg.c_str() + sarg.size())) {
+ return 0;
+ }
+ break;
+ case MULTI_ARGUMENT:
+ // Suck in all the rest of the arguments
+ CommandLineArguments_DEBUG("This is a multi argument: " << arg);
+ for (cc++; cc < this->Internals->Argv.size(); ++cc) {
+ const std::string& marg = this->Internals->Argv[cc];
+ CommandLineArguments_DEBUG(
+ " check multi argument value: " << marg);
+ if (this->GetMatchedArguments(&matches, marg)) {
+ CommandLineArguments_DEBUG("End of multi argument "
+ << arg << " with value: " << marg);
+ break;
+ }
+ CommandLineArguments_DEBUG(
+ " populate multi argument value: " << marg);
+ if (!this->PopulateVariable(cs, marg.c_str())) {
+ return 0;
+ }
+ }
+ if (cc != this->Internals->Argv.size()) {
+ CommandLineArguments_DEBUG("Again End of multi argument " << arg);
+ cc--;
+ continue;
+ }
+ break;
+ default:
+ std::cerr << "Got unknown argument type: \"" << cs->ArgumentType
+ << "\"" << std::endl;
+ this->Internals->LastArgument--;
+ return 0;
+ }
+ } else {
+ // Handle unknown arguments
+ if (this->Internals->UnknownArgumentCallback) {
+ if (!this->Internals->UnknownArgumentCallback(
+ arg.c_str(), this->Internals->ClientData)) {
+ this->Internals->LastArgument--;
+ return 0;
+ }
+ return 1;
+ } else if (this->StoreUnusedArgumentsFlag) {
+ CommandLineArguments_DEBUG("Store unused argument " << arg);
+ this->Internals->UnusedArguments.push_back(arg);
+ } else {
+ std::cerr << "Got unknown argument: \"" << arg << "\"" << std::endl;
+ this->Internals->LastArgument--;
+ return 0;
+ }
+ }
+ }
+ return 1;
+}
+
+void CommandLineArguments::GetRemainingArguments(int* argc, char*** argv)
+{
+ CommandLineArguments::Internal::VectorOfStrings::size_type size =
+ this->Internals->Argv.size() - this->Internals->LastArgument + 1;
+ CommandLineArguments::Internal::VectorOfStrings::size_type cc;
+
+ // Copy Argv0 as the first argument
+ char** args = new char*[size];
+ args[0] = new char[this->Internals->Argv0.size() + 1];
+ strcpy(args[0], this->Internals->Argv0.c_str());
+ int cnt = 1;
+
+ // Copy everything after the LastArgument, since that was not parsed.
+ for (cc = this->Internals->LastArgument + 1;
+ cc < this->Internals->Argv.size(); cc++) {
+ args[cnt] = new char[this->Internals->Argv[cc].size() + 1];
+ strcpy(args[cnt], this->Internals->Argv[cc].c_str());
+ cnt++;
+ }
+ *argc = cnt;
+ *argv = args;
+}
+
+void CommandLineArguments::GetUnusedArguments(int* argc, char*** argv)
+{
+ CommandLineArguments::Internal::VectorOfStrings::size_type size =
+ this->Internals->UnusedArguments.size() + 1;
+ CommandLineArguments::Internal::VectorOfStrings::size_type cc;
+
+ // Copy Argv0 as the first argument
+ char** args = new char*[size];
+ args[0] = new char[this->Internals->Argv0.size() + 1];
+ strcpy(args[0], this->Internals->Argv0.c_str());
+ int cnt = 1;
+
+ // Copy everything after the LastArgument, since that was not parsed.
+ for (cc = 0; cc < this->Internals->UnusedArguments.size(); cc++) {
+ kwsys::String& str = this->Internals->UnusedArguments[cc];
+ args[cnt] = new char[str.size() + 1];
+ strcpy(args[cnt], str.c_str());
+ cnt++;
+ }
+ *argc = cnt;
+ *argv = args;
+}
+
+void CommandLineArguments::DeleteRemainingArguments(int argc, char*** argv)
+{
+ int cc;
+ for (cc = 0; cc < argc; ++cc) {
+ delete[](*argv)[cc];
+ }
+ delete[] * argv;
+}
+
+void CommandLineArguments::AddCallback(const char* argument,
+ ArgumentTypeEnum type,
+ CallbackType callback, void* call_data,
+ const char* help)
+{
+ CommandLineArgumentsCallbackStructure s;
+ s.Argument = argument;
+ s.ArgumentType = type;
+ s.Callback = callback;
+ s.CallData = call_data;
+ s.VariableType = CommandLineArguments::NO_VARIABLE_TYPE;
+ s.Variable = nullptr;
+ s.Help = help;
+
+ this->Internals->Callbacks[argument] = s;
+ this->GenerateHelp();
+}
+
+void CommandLineArguments::AddArgument(const char* argument,
+ ArgumentTypeEnum type,
+ VariableTypeEnum vtype, void* variable,
+ const char* help)
+{
+ CommandLineArgumentsCallbackStructure s;
+ s.Argument = argument;
+ s.ArgumentType = type;
+ s.Callback = nullptr;
+ s.CallData = nullptr;
+ s.VariableType = vtype;
+ s.Variable = variable;
+ s.Help = help;
+
+ this->Internals->Callbacks[argument] = s;
+ this->GenerateHelp();
+}
+
+#define CommandLineArgumentsAddArgumentMacro(type, ctype) \
+ void CommandLineArguments::AddArgument(const char* argument, \
+ ArgumentTypeEnum type, \
+ ctype* variable, const char* help) \
+ { \
+ this->AddArgument(argument, type, CommandLineArguments::type##_TYPE, \
+ variable, help); \
+ }
+
+/* clang-format off */
+CommandLineArgumentsAddArgumentMacro(BOOL, bool)
+CommandLineArgumentsAddArgumentMacro(INT, int)
+CommandLineArgumentsAddArgumentMacro(DOUBLE, double)
+CommandLineArgumentsAddArgumentMacro(STRING, char*)
+CommandLineArgumentsAddArgumentMacro(STL_STRING, std::string)
+
+CommandLineArgumentsAddArgumentMacro(VECTOR_BOOL, std::vector<bool>)
+CommandLineArgumentsAddArgumentMacro(VECTOR_INT, std::vector<int>)
+CommandLineArgumentsAddArgumentMacro(VECTOR_DOUBLE, std::vector<double>)
+CommandLineArgumentsAddArgumentMacro(VECTOR_STRING, std::vector<char*>)
+CommandLineArgumentsAddArgumentMacro(VECTOR_STL_STRING,
+ std::vector<std::string>)
+#ifdef HELP_CLANG_FORMAT
+;
+#endif
+/* clang-format on */
+
+#define CommandLineArgumentsAddBooleanArgumentMacro(type, ctype) \
+ void CommandLineArguments::AddBooleanArgument( \
+ const char* argument, ctype* variable, const char* help) \
+ { \
+ this->AddArgument(argument, CommandLineArguments::NO_ARGUMENT, \
+ CommandLineArguments::type##_TYPE, variable, help); \
+ }
+
+/* clang-format off */
+CommandLineArgumentsAddBooleanArgumentMacro(BOOL, bool)
+CommandLineArgumentsAddBooleanArgumentMacro(INT, int)
+CommandLineArgumentsAddBooleanArgumentMacro(DOUBLE, double)
+CommandLineArgumentsAddBooleanArgumentMacro(STRING, char*)
+CommandLineArgumentsAddBooleanArgumentMacro(STL_STRING, std::string)
+#ifdef HELP_CLANG_FORMAT
+;
+#endif
+/* clang-format on */
+
+void CommandLineArguments::SetClientData(void* client_data)
+{
+ this->Internals->ClientData = client_data;
+}
+
+void CommandLineArguments::SetUnknownArgumentCallback(
+ CommandLineArguments::ErrorCallbackType callback)
+{
+ this->Internals->UnknownArgumentCallback = callback;
+}
+
+const char* CommandLineArguments::GetHelp(const char* arg)
+{
+ CommandLineArguments::Internal::CallbacksMap::iterator it =
+ this->Internals->Callbacks.find(arg);
+ if (it == this->Internals->Callbacks.end()) {
+ return nullptr;
+ }
+
+ // Since several arguments may point to the same argument, find the one this
+ // one point to if this one is pointing to another argument.
+ CommandLineArgumentsCallbackStructure* cs = &(it->second);
+ for (;;) {
+ CommandLineArguments::Internal::CallbacksMap::iterator hit =
+ this->Internals->Callbacks.find(cs->Help);
+ if (hit == this->Internals->Callbacks.end()) {
+ break;
+ }
+ cs = &(hit->second);
+ }
+ return cs->Help;
+}
+
+void CommandLineArguments::SetLineLength(unsigned int ll)
+{
+ if (ll < 9 || ll > 1000) {
+ return;
+ }
+ this->LineLength = ll;
+ this->GenerateHelp();
+}
+
+const char* CommandLineArguments::GetArgv0()
+{
+ return this->Internals->Argv0.c_str();
+}
+
+unsigned int CommandLineArguments::GetLastArgument()
+{
+ return static_cast<unsigned int>(this->Internals->LastArgument + 1);
+}
+
+void CommandLineArguments::GenerateHelp()
+{
+ std::ostringstream str;
+
+ // Collapse all arguments into the map of vectors of all arguments that do
+ // the same thing.
+ CommandLineArguments::Internal::CallbacksMap::iterator it;
+ typedef std::map<CommandLineArguments::Internal::String,
+ CommandLineArguments::Internal::SetOfStrings>
+ MapArgs;
+ MapArgs mp;
+ MapArgs::iterator mpit, smpit;
+ for (it = this->Internals->Callbacks.begin();
+ it != this->Internals->Callbacks.end(); it++) {
+ CommandLineArgumentsCallbackStructure* cs = &(it->second);
+ mpit = mp.find(cs->Help);
+ if (mpit != mp.end()) {
+ mpit->second.insert(it->first);
+ mp[it->first].insert(it->first);
+ } else {
+ mp[it->first].insert(it->first);
+ }
+ }
+ for (it = this->Internals->Callbacks.begin();
+ it != this->Internals->Callbacks.end(); it++) {
+ CommandLineArgumentsCallbackStructure* cs = &(it->second);
+ mpit = mp.find(cs->Help);
+ if (mpit != mp.end()) {
+ mpit->second.insert(it->first);
+ smpit = mp.find(it->first);
+ CommandLineArguments::Internal::SetOfStrings::iterator sit;
+ for (sit = smpit->second.begin(); sit != smpit->second.end(); sit++) {
+ mpit->second.insert(*sit);
+ }
+ mp.erase(smpit);
+ } else {
+ mp[it->first].insert(it->first);
+ }
+ }
+
+ // Find the length of the longest string
+ CommandLineArguments::Internal::String::size_type maxlen = 0;
+ for (mpit = mp.begin(); mpit != mp.end(); mpit++) {
+ CommandLineArguments::Internal::SetOfStrings::iterator sit;
+ for (sit = mpit->second.begin(); sit != mpit->second.end(); sit++) {
+ CommandLineArguments::Internal::String::size_type clen = sit->size();
+ switch (this->Internals->Callbacks[*sit].ArgumentType) {
+ case CommandLineArguments::NO_ARGUMENT:
+ clen += 0;
+ break;
+ case CommandLineArguments::CONCAT_ARGUMENT:
+ clen += 3;
+ break;
+ case CommandLineArguments::SPACE_ARGUMENT:
+ clen += 4;
+ break;
+ case CommandLineArguments::EQUAL_ARGUMENT:
+ clen += 4;
+ break;
+ }
+ if (clen > maxlen) {
+ maxlen = clen;
+ }
+ }
+ }
+
+ CommandLineArguments::Internal::String::size_type maxstrlen = maxlen;
+ maxlen += 4; // For the space before and after the option
+
+ // Print help for each option
+ for (mpit = mp.begin(); mpit != mp.end(); mpit++) {
+ CommandLineArguments::Internal::SetOfStrings::iterator sit;
+ for (sit = mpit->second.begin(); sit != mpit->second.end(); sit++) {
+ str << std::endl;
+ std::string argument = *sit;
+ switch (this->Internals->Callbacks[*sit].ArgumentType) {
+ case CommandLineArguments::NO_ARGUMENT:
+ break;
+ case CommandLineArguments::CONCAT_ARGUMENT:
+ argument += "opt";
+ break;
+ case CommandLineArguments::SPACE_ARGUMENT:
+ argument += " opt";
+ break;
+ case CommandLineArguments::EQUAL_ARGUMENT:
+ argument += "=opt";
+ break;
+ case CommandLineArguments::MULTI_ARGUMENT:
+ argument += " opt opt ...";
+ break;
+ }
+ str << " " << argument.substr(0, maxstrlen) << " ";
+ }
+ const char* ptr = this->Internals->Callbacks[mpit->first].Help;
+ size_t len = strlen(ptr);
+ int cnt = 0;
+ while (len > 0) {
+ // If argument with help is longer than line length, split it on previous
+ // space (or tab) and continue on the next line
+ CommandLineArguments::Internal::String::size_type cc;
+ for (cc = 0; ptr[cc]; cc++) {
+ if (*ptr == ' ' || *ptr == '\t') {
+ ptr++;
+ len--;
+ }
+ }
+ if (cnt > 0) {
+ for (cc = 0; cc < maxlen; cc++) {
+ str << " ";
+ }
+ }
+ CommandLineArguments::Internal::String::size_type skip = len;
+ if (skip > this->LineLength - maxlen) {
+ skip = this->LineLength - maxlen;
+ for (cc = skip - 1; cc > 0; cc--) {
+ if (ptr[cc] == ' ' || ptr[cc] == '\t') {
+ break;
+ }
+ }
+ if (cc != 0) {
+ skip = cc;
+ }
+ }
+ str.write(ptr, static_cast<std::streamsize>(skip));
+ str << std::endl;
+ ptr += skip;
+ len -= skip;
+ cnt++;
+ }
+ }
+ /*
+ // This can help debugging help string
+ str << endl;
+ unsigned int cc;
+ for ( cc = 0; cc < this->LineLength; cc ++ )
+ {
+ str << cc % 10;
+ }
+ str << endl;
+ */
+ this->Help = str.str();
+}
+
+void CommandLineArguments::PopulateVariable(bool* variable,
+ const std::string& value)
+{
+ if (value == "1" || value == "ON" || value == "on" || value == "On" ||
+ value == "TRUE" || value == "true" || value == "True" ||
+ value == "yes" || value == "Yes" || value == "YES") {
+ *variable = true;
+ } else {
+ *variable = false;
+ }
+}
+
+void CommandLineArguments::PopulateVariable(int* variable,
+ const std::string& value)
+{
+ char* res = nullptr;
+ *variable = static_cast<int>(strtol(value.c_str(), &res, 10));
+ // if ( res && *res )
+ // {
+ // Can handle non-int
+ // }
+}
+
+void CommandLineArguments::PopulateVariable(double* variable,
+ const std::string& value)
+{
+ char* res = nullptr;
+ *variable = strtod(value.c_str(), &res);
+ // if ( res && *res )
+ // {
+ // Can handle non-double
+ // }
+}
+
+void CommandLineArguments::PopulateVariable(char** variable,
+ const std::string& value)
+{
+ delete[] * variable;
+ *variable = new char[value.size() + 1];
+ strcpy(*variable, value.c_str());
+}
+
+void CommandLineArguments::PopulateVariable(std::string* variable,
+ const std::string& value)
+{
+ *variable = value;
+}
+
+void CommandLineArguments::PopulateVariable(std::vector<bool>* variable,
+ const std::string& value)
+{
+ bool val = false;
+ if (value == "1" || value == "ON" || value == "on" || value == "On" ||
+ value == "TRUE" || value == "true" || value == "True" ||
+ value == "yes" || value == "Yes" || value == "YES") {
+ val = true;
+ }
+ variable->push_back(val);
+}
+
+void CommandLineArguments::PopulateVariable(std::vector<int>* variable,
+ const std::string& value)
+{
+ char* res = nullptr;
+ variable->push_back(static_cast<int>(strtol(value.c_str(), &res, 10)));
+ // if ( res && *res )
+ // {
+ // Can handle non-int
+ // }
+}
+
+void CommandLineArguments::PopulateVariable(std::vector<double>* variable,
+ const std::string& value)
+{
+ char* res = nullptr;
+ variable->push_back(strtod(value.c_str(), &res));
+ // if ( res && *res )
+ // {
+ // Can handle non-int
+ // }
+}
+
+void CommandLineArguments::PopulateVariable(std::vector<char*>* variable,
+ const std::string& value)
+{
+ char* var = new char[value.size() + 1];
+ strcpy(var, value.c_str());
+ variable->push_back(var);
+}
+
+void CommandLineArguments::PopulateVariable(std::vector<std::string>* variable,
+ const std::string& value)
+{
+ variable->push_back(value);
+}
+
+bool CommandLineArguments::PopulateVariable(
+ CommandLineArgumentsCallbackStructure* cs, const char* value)
+{
+ // Call the callback
+ if (cs->Callback) {
+ if (!cs->Callback(cs->Argument, value, cs->CallData)) {
+ this->Internals->LastArgument--;
+ return 0;
+ }
+ }
+ CommandLineArguments_DEBUG("Set argument: " << cs->Argument << " to "
+ << value);
+ if (cs->Variable) {
+ std::string var = "1";
+ if (value) {
+ var = value;
+ }
+ switch (cs->VariableType) {
+ case CommandLineArguments::INT_TYPE:
+ this->PopulateVariable(static_cast<int*>(cs->Variable), var);
+ break;
+ case CommandLineArguments::DOUBLE_TYPE:
+ this->PopulateVariable(static_cast<double*>(cs->Variable), var);
+ break;
+ case CommandLineArguments::STRING_TYPE:
+ this->PopulateVariable(static_cast<char**>(cs->Variable), var);
+ break;
+ case CommandLineArguments::STL_STRING_TYPE:
+ this->PopulateVariable(static_cast<std::string*>(cs->Variable), var);
+ break;
+ case CommandLineArguments::BOOL_TYPE:
+ this->PopulateVariable(static_cast<bool*>(cs->Variable), var);
+ break;
+ case CommandLineArguments::VECTOR_BOOL_TYPE:
+ this->PopulateVariable(static_cast<std::vector<bool>*>(cs->Variable),
+ var);
+ break;
+ case CommandLineArguments::VECTOR_INT_TYPE:
+ this->PopulateVariable(static_cast<std::vector<int>*>(cs->Variable),
+ var);
+ break;
+ case CommandLineArguments::VECTOR_DOUBLE_TYPE:
+ this->PopulateVariable(static_cast<std::vector<double>*>(cs->Variable),
+ var);
+ break;
+ case CommandLineArguments::VECTOR_STRING_TYPE:
+ this->PopulateVariable(static_cast<std::vector<char*>*>(cs->Variable),
+ var);
+ break;
+ case CommandLineArguments::VECTOR_STL_STRING_TYPE:
+ this->PopulateVariable(
+ static_cast<std::vector<std::string>*>(cs->Variable), var);
+ break;
+ default:
+ std::cerr << "Got unknown variable type: \"" << cs->VariableType
+ << "\"" << std::endl;
+ this->Internals->LastArgument--;
+ return 0;
+ }
+ }
+ return 1;
+}
+
+} // namespace KWSYS_NAMESPACE
diff --git a/test/API/driver/kwsys/CommandLineArguments.hxx.in b/test/API/driver/kwsys/CommandLineArguments.hxx.in
new file mode 100644
index 0000000..7db9015
--- /dev/null
+++ b/test/API/driver/kwsys/CommandLineArguments.hxx.in
@@ -0,0 +1,270 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_CommandLineArguments_hxx
+#define @KWSYS_NAMESPACE@_CommandLineArguments_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <string>
+#include <vector>
+
+namespace @KWSYS_NAMESPACE@ {
+
+class CommandLineArgumentsInternal;
+struct CommandLineArgumentsCallbackStructure;
+
+/** \class CommandLineArguments
+ * \brief Command line arguments processing code.
+ *
+ * Find specified arguments with optional options and execute specified methods
+ * or set given variables.
+ *
+ * The two interfaces it knows are callback based and variable based. For
+ * callback based, you have to register callback for particular argument using
+ * AddCallback method. When that argument is passed, the callback will be
+ * called with argument, value, and call data. For boolean (NO_ARGUMENT)
+ * arguments, the value is "1". If the callback returns 0 the argument parsing
+ * will stop with an error.
+ *
+ * For the variable interface you associate variable with each argument. When
+ * the argument is specified, the variable is set to the specified value casted
+ * to the appropriate type. For boolean (NO_ARGUMENT), the value is "1".
+ *
+ * Both interfaces can be used at the same time.
+ *
+ * Possible argument types are:
+ * NO_ARGUMENT - The argument takes no value : --A
+ * CONCAT_ARGUMENT - The argument takes value after no space : --Aval
+ * SPACE_ARGUMENT - The argument takes value after space : --A val
+ * EQUAL_ARGUMENT - The argument takes value after equal : --A=val
+ * MULTI_ARGUMENT - The argument takes values after space : --A val1 val2
+ * val3 ...
+ *
+ * Example use:
+ *
+ * kwsys::CommandLineArguments arg;
+ * arg.Initialize(argc, argv);
+ * typedef kwsys::CommandLineArguments argT;
+ * arg.AddArgument("--something", argT::EQUAL_ARGUMENT, &some_variable,
+ * "This is help string for --something");
+ * if ( !arg.Parse() )
+ * {
+ * std::cerr << "Problem parsing arguments" << std::endl;
+ * res = 1;
+ * }
+ *
+ */
+
+class @KWSYS_NAMESPACE@_EXPORT CommandLineArguments
+{
+public:
+ CommandLineArguments();
+ ~CommandLineArguments();
+
+ CommandLineArguments(const CommandLineArguments&) = delete;
+ CommandLineArguments& operator=(const CommandLineArguments&) = delete;
+
+ /**
+ * Various argument types.
+ */
+ enum ArgumentTypeEnum
+ {
+ NO_ARGUMENT,
+ CONCAT_ARGUMENT,
+ SPACE_ARGUMENT,
+ EQUAL_ARGUMENT,
+ MULTI_ARGUMENT
+ };
+
+ /**
+ * Various variable types. When using the variable interface, this specifies
+ * what type the variable is.
+ */
+ enum VariableTypeEnum
+ {
+ NO_VARIABLE_TYPE = 0, // The variable is not specified
+ INT_TYPE, // The variable is integer (int)
+ BOOL_TYPE, // The variable is boolean (bool)
+ DOUBLE_TYPE, // The variable is float (double)
+ STRING_TYPE, // The variable is string (char*)
+ STL_STRING_TYPE, // The variable is string (char*)
+ VECTOR_INT_TYPE, // The variable is integer (int)
+ VECTOR_BOOL_TYPE, // The variable is boolean (bool)
+ VECTOR_DOUBLE_TYPE, // The variable is float (double)
+ VECTOR_STRING_TYPE, // The variable is string (char*)
+ VECTOR_STL_STRING_TYPE, // The variable is string (char*)
+ LAST_VARIABLE_TYPE
+ };
+
+ /**
+ * Prototypes for callbacks for callback interface.
+ */
+ typedef int (*CallbackType)(const char* argument, const char* value,
+ void* call_data);
+ typedef int (*ErrorCallbackType)(const char* argument, void* client_data);
+
+ /**
+ * Initialize internal data structures. This should be called before parsing.
+ */
+ void Initialize(int argc, const char* const argv[]);
+ void Initialize(int argc, char* argv[]);
+
+ /**
+ * Initialize internal data structure and pass arguments one by one. This is
+ * convenience method for use from scripting languages where argc and argv
+ * are not available.
+ */
+ void Initialize();
+ void ProcessArgument(const char* arg);
+
+ /**
+ * This method will parse arguments and call appropriate methods.
+ */
+ int Parse();
+
+ /**
+ * This method will add a callback for a specific argument. The arguments to
+ * it are argument, argument type, callback method, and call data. The
+ * argument help specifies the help string used with this option. The
+ * callback and call_data can be skipped.
+ */
+ void AddCallback(const char* argument, ArgumentTypeEnum type,
+ CallbackType callback, void* call_data, const char* help);
+
+ /**
+ * Add handler for argument which is going to set the variable to the
+ * specified value. If the argument is specified, the option is casted to the
+ * appropriate type.
+ */
+ void AddArgument(const char* argument, ArgumentTypeEnum type, bool* variable,
+ const char* help);
+ void AddArgument(const char* argument, ArgumentTypeEnum type, int* variable,
+ const char* help);
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ double* variable, const char* help);
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ char** variable, const char* help);
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ std::string* variable, const char* help);
+
+ /**
+ * Add handler for argument which is going to set the variable to the
+ * specified value. If the argument is specified, the option is casted to the
+ * appropriate type. This will handle the multi argument values.
+ */
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ std::vector<bool>* variable, const char* help);
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ std::vector<int>* variable, const char* help);
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ std::vector<double>* variable, const char* help);
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ std::vector<char*>* variable, const char* help);
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ std::vector<std::string>* variable, const char* help);
+
+ /**
+ * Add handler for boolean argument. The argument does not take any option
+ * and if it is specified, the value of the variable is true/1, otherwise it
+ * is false/0.
+ */
+ void AddBooleanArgument(const char* argument, bool* variable,
+ const char* help);
+ void AddBooleanArgument(const char* argument, int* variable,
+ const char* help);
+ void AddBooleanArgument(const char* argument, double* variable,
+ const char* help);
+ void AddBooleanArgument(const char* argument, char** variable,
+ const char* help);
+ void AddBooleanArgument(const char* argument, std::string* variable,
+ const char* help);
+
+ /**
+ * Set the callbacks for error handling.
+ */
+ void SetClientData(void* client_data);
+ void SetUnknownArgumentCallback(ErrorCallbackType callback);
+
+ /**
+ * Get remaining arguments. It allocates space for argv, so you have to call
+ * delete[] on it.
+ */
+ void GetRemainingArguments(int* argc, char*** argv);
+ void DeleteRemainingArguments(int argc, char*** argv);
+
+ /**
+ * If StoreUnusedArguments is set to true, then all unknown arguments will be
+ * stored and the user can access the modified argc, argv without known
+ * arguments.
+ */
+ void StoreUnusedArguments(bool val) { this->StoreUnusedArgumentsFlag = val; }
+ void GetUnusedArguments(int* argc, char*** argv);
+
+ /**
+ * Return string containing help. If the argument is specified, only return
+ * help for that argument.
+ */
+ const char* GetHelp() { return this->Help.c_str(); }
+ const char* GetHelp(const char* arg);
+
+ /**
+ * Get / Set the help line length. This length is used when generating the
+ * help page. Default length is 80.
+ */
+ void SetLineLength(unsigned int);
+ unsigned int GetLineLength();
+
+ /**
+ * Get the executable name (argv0). This is only available when using
+ * Initialize with argc/argv.
+ */
+ const char* GetArgv0();
+
+ /**
+ * Get index of the last argument parsed. This is the last argument that was
+ * parsed ok in the original argc/argv list.
+ */
+ unsigned int GetLastArgument();
+
+protected:
+ void GenerateHelp();
+
+ //! This is internal method that registers variable with argument
+ void AddArgument(const char* argument, ArgumentTypeEnum type,
+ VariableTypeEnum vtype, void* variable, const char* help);
+
+ bool GetMatchedArguments(std::vector<std::string>* matches,
+ const std::string& arg);
+
+ //! Populate individual variables
+ bool PopulateVariable(CommandLineArgumentsCallbackStructure* cs,
+ const char* value);
+
+ //! Populate individual variables of type ...
+ void PopulateVariable(bool* variable, const std::string& value);
+ void PopulateVariable(int* variable, const std::string& value);
+ void PopulateVariable(double* variable, const std::string& value);
+ void PopulateVariable(char** variable, const std::string& value);
+ void PopulateVariable(std::string* variable, const std::string& value);
+ void PopulateVariable(std::vector<bool>* variable, const std::string& value);
+ void PopulateVariable(std::vector<int>* variable, const std::string& value);
+ void PopulateVariable(std::vector<double>* variable,
+ const std::string& value);
+ void PopulateVariable(std::vector<char*>* variable,
+ const std::string& value);
+ void PopulateVariable(std::vector<std::string>* variable,
+ const std::string& value);
+
+ typedef CommandLineArgumentsInternal Internal;
+ Internal* Internals;
+ std::string Help;
+
+ unsigned int LineLength;
+
+ bool StoreUnusedArgumentsFlag;
+};
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/Configure.h.in b/test/API/driver/kwsys/Configure.h.in
new file mode 100644
index 0000000..5323c57
--- /dev/null
+++ b/test/API/driver/kwsys/Configure.h.in
@@ -0,0 +1,89 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Configure_h
+#define @KWSYS_NAMESPACE@_Configure_h
+
+/* If we are building a kwsys .c or .cxx file, let it use the kwsys
+ namespace. When not building a kwsys source file these macros are
+ temporarily defined inside the headers that use them. */
+#if defined(KWSYS_NAMESPACE)
+# define kwsys_ns(x) @KWSYS_NAMESPACE@##x
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+
+/* Disable some warnings inside kwsys source files. */
+#if defined(KWSYS_NAMESPACE)
+# if defined(__BORLANDC__)
+# pragma warn - 8027 /* function not inlined. */
+# endif
+# if defined(__INTEL_COMPILER)
+# pragma warning(disable : 1572) /* floating-point equality test */
+# endif
+# if defined(__sgi) && !defined(__GNUC__)
+# pragma set woff 3970 /* pointer to int conversion */
+# pragma set woff 3968 /* 64 bit conversion */
+# endif
+#endif
+
+/* Whether kwsys namespace is "kwsys". */
+#define @KWSYS_NAMESPACE@_NAME_IS_KWSYS @KWSYS_NAME_IS_KWSYS@
+
+/* Setup the export macro. */
+#if @KWSYS_BUILD_SHARED@
+# if defined(_WIN32) || defined(__CYGWIN__)
+# if defined(@KWSYS_NAMESPACE@_EXPORTS)
+# define @KWSYS_NAMESPACE@_EXPORT __declspec(dllexport)
+# else
+# define @KWSYS_NAMESPACE@_EXPORT __declspec(dllimport)
+# endif
+# elif __GNUC__ >= 4
+# define @KWSYS_NAMESPACE@_EXPORT __attribute__((visibility("default")))
+# else
+# define @KWSYS_NAMESPACE@_EXPORT
+# endif
+#else
+# define @KWSYS_NAMESPACE@_EXPORT
+#endif
+
+/* Enable warnings that are off by default but are useful. */
+#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_ENABLE)
+# if defined(_MSC_VER)
+# pragma warning(default : 4263) /* no override, call convention differs \
+ */
+# endif
+#endif
+
+/* Disable warnings that are on by default but occur in valid code. */
+#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_DISABLE)
+# if defined(_MSC_VER)
+# pragma warning(disable : 4097) /* typedef is synonym for class */
+# pragma warning(disable : 4127) /* conditional expression is constant */
+# pragma warning(disable : 4244) /* possible loss in conversion */
+# pragma warning(disable : 4251) /* missing DLL-interface */
+# pragma warning(disable : 4305) /* truncation from type1 to type2 */
+# pragma warning(disable : 4309) /* truncation of constant value */
+# pragma warning(disable : 4514) /* unreferenced inline function */
+# pragma warning(disable : 4706) /* assignment in conditional expression \
+ */
+# pragma warning(disable : 4710) /* function not inlined */
+# pragma warning(disable : 4786) /* identifier truncated in debug info */
+# endif
+# if defined(__BORLANDC__) && !defined(__cplusplus)
+/* Code has no effect; raised by winnt.h in C (not C++) when ignoring an
+ unused parameter using "(param)" syntax (i.e. no cast to void). */
+# pragma warn - 8019
+# endif
+#endif
+
+/* MSVC 6.0 in release mode will warn about code it produces with its
+ optimizer. Disable the warnings specifically for this
+ configuration. Real warnings will be revealed by a debug build or
+ by other compilers. */
+#if !defined(@KWSYS_NAMESPACE@_NO_WARNING_DISABLE_BOGUS)
+# if defined(_MSC_VER) && (_MSC_VER < 1300) && defined(NDEBUG)
+# pragma warning(disable : 4701) /* Variable may be used uninitialized. */
+# pragma warning(disable : 4702) /* Unreachable code. */
+# endif
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/Configure.hxx.in b/test/API/driver/kwsys/Configure.hxx.in
new file mode 100644
index 0000000..29a2dd1
--- /dev/null
+++ b/test/API/driver/kwsys/Configure.hxx.in
@@ -0,0 +1,65 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Configure_hxx
+#define @KWSYS_NAMESPACE@_Configure_hxx
+
+/* Include C configuration. */
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+/* Whether wstring is available. */
+#define @KWSYS_NAMESPACE@_STL_HAS_WSTRING @KWSYS_STL_HAS_WSTRING@
+/* Whether <ext/stdio_filebuf.h> is available. */
+#define @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H \
+ @KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H@
+/* Whether the translation map is available or not. */
+#define @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP \
+ @KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP@
+
+#if defined(__SUNPRO_CC) && __SUNPRO_CC > 0x5130 && defined(__has_attribute)
+# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) __has_attribute(x)
+#elif defined(__has_cpp_attribute)
+# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) __has_cpp_attribute(x)
+#else
+# define @KWSYS_NAMESPACE@__has_cpp_attribute(x) 0
+#endif
+
+#if __cplusplus >= 201103L
+# define @KWSYS_NAMESPACE@_NULLPTR nullptr
+#else
+# define @KWSYS_NAMESPACE@_NULLPTR 0
+#endif
+
+#ifndef @KWSYS_NAMESPACE@_FALLTHROUGH
+# if __cplusplus >= 201703L && \
+ @KWSYS_NAMESPACE@__has_cpp_attribute(fallthrough)
+# define @KWSYS_NAMESPACE@_FALLTHROUGH [[fallthrough]]
+# elif __cplusplus >= 201103L && \
+ @KWSYS_NAMESPACE@__has_cpp_attribute(gnu::fallthrough)
+# define @KWSYS_NAMESPACE@_FALLTHROUGH [[gnu::fallthrough]]
+# elif __cplusplus >= 201103L && \
+ @KWSYS_NAMESPACE@__has_cpp_attribute(clang::fallthrough)
+# define @KWSYS_NAMESPACE@_FALLTHROUGH [[clang::fallthrough]]
+# endif
+#endif
+#ifndef @KWSYS_NAMESPACE@_FALLTHROUGH
+# define @KWSYS_NAMESPACE@_FALLTHROUGH static_cast<void>(0)
+#endif
+
+#undef @KWSYS_NAMESPACE@__has_cpp_attribute
+
+/* If building a C++ file in kwsys itself, give the source file
+ access to the macros without a configured namespace. */
+#if defined(KWSYS_NAMESPACE)
+# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsys @KWSYS_NAMESPACE@
+# endif
+# define KWSYS_NAME_IS_KWSYS @KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define KWSYS_STL_HAS_WSTRING @KWSYS_NAMESPACE@_STL_HAS_WSTRING
+# define KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H \
+ @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H
+# define KWSYS_FALLTHROUGH @KWSYS_NAMESPACE@_FALLTHROUGH
+# define KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP \
+ @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/ConsoleBuf.hxx.in b/test/API/driver/kwsys/ConsoleBuf.hxx.in
new file mode 100644
index 0000000..49dbdf7
--- /dev/null
+++ b/test/API/driver/kwsys/ConsoleBuf.hxx.in
@@ -0,0 +1,398 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_ConsoleBuf_hxx
+#define @KWSYS_NAMESPACE@_ConsoleBuf_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <@KWSYS_NAMESPACE@/Encoding.hxx>
+
+#include <cstring>
+#include <iostream>
+#include <sstream>
+#include <stdexcept>
+#include <streambuf>
+#include <string>
+
+#if defined(_WIN32)
+# include <windows.h>
+# if __cplusplus >= 201103L
+# include <system_error>
+# endif
+#endif
+
+namespace @KWSYS_NAMESPACE@ {
+#if defined(_WIN32)
+
+template <class CharT, class Traits = std::char_traits<CharT> >
+class BasicConsoleBuf : public std::basic_streambuf<CharT, Traits>
+{
+public:
+ typedef typename Traits::int_type int_type;
+ typedef typename Traits::char_type char_type;
+
+ class Manager
+ {
+ public:
+ Manager(std::basic_ios<CharT, Traits>& ios, const bool err = false)
+ : m_consolebuf(0)
+ {
+ m_ios = &ios;
+ try {
+ m_consolebuf = new BasicConsoleBuf<CharT, Traits>(err);
+ m_streambuf = m_ios->rdbuf(m_consolebuf);
+ } catch (const std::runtime_error& ex) {
+ std::cerr << "Failed to create ConsoleBuf!" << std::endl
+ << ex.what() << std::endl;
+ };
+ }
+
+ BasicConsoleBuf<CharT, Traits>* GetConsoleBuf() { return m_consolebuf; }
+
+ void SetUTF8Pipes()
+ {
+ if (m_consolebuf) {
+ m_consolebuf->input_pipe_codepage = CP_UTF8;
+ m_consolebuf->output_pipe_codepage = CP_UTF8;
+ m_consolebuf->activateCodepageChange();
+ }
+ }
+
+ ~Manager()
+ {
+ if (m_consolebuf) {
+ delete m_consolebuf;
+ m_ios->rdbuf(m_streambuf);
+ }
+ }
+
+ private:
+ std::basic_ios<CharT, Traits>* m_ios;
+ std::basic_streambuf<CharT, Traits>* m_streambuf;
+ BasicConsoleBuf<CharT, Traits>* m_consolebuf;
+ };
+
+ BasicConsoleBuf(const bool err = false)
+ : flush_on_newline(true)
+ , input_pipe_codepage(0)
+ , output_pipe_codepage(0)
+ , input_file_codepage(CP_UTF8)
+ , output_file_codepage(CP_UTF8)
+ , m_consolesCodepage(0)
+ {
+ m_hInput = ::GetStdHandle(STD_INPUT_HANDLE);
+ checkHandle(true, "STD_INPUT_HANDLE");
+ if (!setActiveInputCodepage()) {
+ throw std::runtime_error("setActiveInputCodepage failed!");
+ }
+ m_hOutput = err ? ::GetStdHandle(STD_ERROR_HANDLE)
+ : ::GetStdHandle(STD_OUTPUT_HANDLE);
+ checkHandle(false, err ? "STD_ERROR_HANDLE" : "STD_OUTPUT_HANDLE");
+ if (!setActiveOutputCodepage()) {
+ throw std::runtime_error("setActiveOutputCodepage failed!");
+ }
+ _setg();
+ _setp();
+ }
+
+ ~BasicConsoleBuf() throw() { sync(); }
+
+ bool activateCodepageChange()
+ {
+ return setActiveInputCodepage() && setActiveOutputCodepage();
+ }
+
+protected:
+ virtual int sync()
+ {
+ bool success = true;
+ if (m_hInput && m_isConsoleInput &&
+ ::FlushConsoleInputBuffer(m_hInput) == 0) {
+ success = false;
+ }
+ if (m_hOutput && !m_obuffer.empty()) {
+ const std::wstring wbuffer = getBuffer(m_obuffer);
+ if (m_isConsoleOutput) {
+ DWORD charsWritten;
+ success =
+ ::WriteConsoleW(m_hOutput, wbuffer.c_str(), (DWORD)wbuffer.size(),
+ &charsWritten, nullptr) == 0
+ ? false
+ : true;
+ } else {
+ DWORD bytesWritten;
+ std::string buffer;
+ success = encodeOutputBuffer(wbuffer, buffer);
+ if (success) {
+ success =
+ ::WriteFile(m_hOutput, buffer.c_str(), (DWORD)buffer.size(),
+ &bytesWritten, nullptr) == 0
+ ? false
+ : true;
+ }
+ }
+ }
+ m_ibuffer.clear();
+ m_obuffer.clear();
+ _setg();
+ _setp();
+ return success ? 0 : -1;
+ }
+
+ virtual int_type underflow()
+ {
+ if (this->gptr() >= this->egptr()) {
+ if (!m_hInput) {
+ _setg(true);
+ return Traits::eof();
+ }
+ if (m_isConsoleInput) {
+ // ReadConsole doesn't tell if there's more input available
+ // don't support reading more characters than this
+ wchar_t wbuffer[8192];
+ DWORD charsRead;
+ if (ReadConsoleW(m_hInput, wbuffer,
+ (sizeof(wbuffer) / sizeof(wbuffer[0])), &charsRead,
+ nullptr) == 0 ||
+ charsRead == 0) {
+ _setg(true);
+ return Traits::eof();
+ }
+ setBuffer(std::wstring(wbuffer, charsRead), m_ibuffer);
+ } else {
+ std::wstring wbuffer;
+ std::string strbuffer;
+ DWORD bytesRead;
+ LARGE_INTEGER size;
+ if (GetFileSizeEx(m_hInput, &size) == 0) {
+ _setg(true);
+ return Traits::eof();
+ }
+ char* buffer = new char[size.LowPart];
+ while (ReadFile(m_hInput, buffer, size.LowPart, &bytesRead, nullptr) ==
+ 0) {
+ if (GetLastError() == ERROR_MORE_DATA) {
+ strbuffer += std::string(buffer, bytesRead);
+ continue;
+ }
+ _setg(true);
+ delete[] buffer;
+ return Traits::eof();
+ }
+ if (bytesRead > 0) {
+ strbuffer += std::string(buffer, bytesRead);
+ }
+ delete[] buffer;
+ if (!decodeInputBuffer(strbuffer, wbuffer)) {
+ _setg(true);
+ return Traits::eof();
+ }
+ setBuffer(wbuffer, m_ibuffer);
+ }
+ _setg();
+ }
+ return Traits::to_int_type(*this->gptr());
+ }
+
+ virtual int_type overflow(int_type ch = Traits::eof())
+ {
+ if (!Traits::eq_int_type(ch, Traits::eof())) {
+ char_type chr = Traits::to_char_type(ch);
+ m_obuffer += chr;
+ if ((flush_on_newline && Traits::eq(chr, '\n')) ||
+ Traits::eq_int_type(ch, 0x00)) {
+ sync();
+ }
+ return ch;
+ }
+ sync();
+ return Traits::eof();
+ }
+
+public:
+ bool flush_on_newline;
+ UINT input_pipe_codepage;
+ UINT output_pipe_codepage;
+ UINT input_file_codepage;
+ UINT output_file_codepage;
+
+private:
+ HANDLE m_hInput;
+ HANDLE m_hOutput;
+ std::basic_string<char_type> m_ibuffer;
+ std::basic_string<char_type> m_obuffer;
+ bool m_isConsoleInput;
+ bool m_isConsoleOutput;
+ UINT m_activeInputCodepage;
+ UINT m_activeOutputCodepage;
+ UINT m_consolesCodepage;
+ void checkHandle(bool input, std::string handleName)
+ {
+ if ((input && m_hInput == INVALID_HANDLE_VALUE) ||
+ (!input && m_hOutput == INVALID_HANDLE_VALUE)) {
+ std::string errmsg =
+ "GetStdHandle(" + handleName + ") returned INVALID_HANDLE_VALUE";
+# if __cplusplus >= 201103L
+ throw std::system_error(::GetLastError(), std::system_category(),
+ errmsg);
+# else
+ throw std::runtime_error(errmsg);
+# endif
+ }
+ }
+ UINT getConsolesCodepage()
+ {
+ if (!m_consolesCodepage) {
+ m_consolesCodepage = GetConsoleCP();
+ if (!m_consolesCodepage) {
+ m_consolesCodepage = GetACP();
+ }
+ }
+ return m_consolesCodepage;
+ }
+ bool setActiveInputCodepage()
+ {
+ m_isConsoleInput = false;
+ switch (GetFileType(m_hInput)) {
+ case FILE_TYPE_DISK:
+ m_activeInputCodepage = input_file_codepage;
+ break;
+ case FILE_TYPE_CHAR:
+ // Check for actual console.
+ DWORD consoleMode;
+ m_isConsoleInput =
+ GetConsoleMode(m_hInput, &consoleMode) == 0 ? false : true;
+ if (m_isConsoleInput) {
+ break;
+ }
+ @KWSYS_NAMESPACE@_FALLTHROUGH;
+ case FILE_TYPE_PIPE:
+ m_activeInputCodepage = input_pipe_codepage;
+ break;
+ default:
+ return false;
+ }
+ if (!m_isConsoleInput && m_activeInputCodepage == 0) {
+ m_activeInputCodepage = getConsolesCodepage();
+ }
+ return true;
+ }
+ bool setActiveOutputCodepage()
+ {
+ m_isConsoleOutput = false;
+ switch (GetFileType(m_hOutput)) {
+ case FILE_TYPE_DISK:
+ m_activeOutputCodepage = output_file_codepage;
+ break;
+ case FILE_TYPE_CHAR:
+ // Check for actual console.
+ DWORD consoleMode;
+ m_isConsoleOutput =
+ GetConsoleMode(m_hOutput, &consoleMode) == 0 ? false : true;
+ if (m_isConsoleOutput) {
+ break;
+ }
+ @KWSYS_NAMESPACE@_FALLTHROUGH;
+ case FILE_TYPE_PIPE:
+ m_activeOutputCodepage = output_pipe_codepage;
+ break;
+ default:
+ return false;
+ }
+ if (!m_isConsoleOutput && m_activeOutputCodepage == 0) {
+ m_activeOutputCodepage = getConsolesCodepage();
+ }
+ return true;
+ }
+ void _setg(bool empty = false)
+ {
+ if (!empty) {
+ this->setg((char_type*)m_ibuffer.data(), (char_type*)m_ibuffer.data(),
+ (char_type*)m_ibuffer.data() + m_ibuffer.size());
+ } else {
+ this->setg((char_type*)m_ibuffer.data(),
+ (char_type*)m_ibuffer.data() + m_ibuffer.size(),
+ (char_type*)m_ibuffer.data() + m_ibuffer.size());
+ }
+ }
+ void _setp()
+ {
+ this->setp((char_type*)m_obuffer.data(),
+ (char_type*)m_obuffer.data() + m_obuffer.size());
+ }
+ bool encodeOutputBuffer(const std::wstring wbuffer, std::string& buffer)
+ {
+ if (wbuffer.size() == 0) {
+ buffer = std::string();
+ return true;
+ }
+ const int length =
+ WideCharToMultiByte(m_activeOutputCodepage, 0, wbuffer.c_str(),
+ (int)wbuffer.size(), nullptr, 0, nullptr, nullptr);
+ char* buf = new char[length];
+ const bool success =
+ WideCharToMultiByte(m_activeOutputCodepage, 0, wbuffer.c_str(),
+ (int)wbuffer.size(), buf, length, nullptr,
+ nullptr) > 0
+ ? true
+ : false;
+ buffer = std::string(buf, length);
+ delete[] buf;
+ return success;
+ }
+ bool decodeInputBuffer(const std::string buffer, std::wstring& wbuffer)
+ {
+ size_t length = buffer.length();
+ if (length == 0) {
+ wbuffer = std::wstring();
+ return true;
+ }
+ int actualCodepage = m_activeInputCodepage;
+ const char BOM_UTF8[] = { char(0xEF), char(0xBB), char(0xBF) };
+ const char* data = buffer.data();
+ const size_t BOMsize = sizeof(BOM_UTF8);
+ if (length >= BOMsize && std::memcmp(data, BOM_UTF8, BOMsize) == 0) {
+ // PowerShell uses UTF-8 with BOM for pipes
+ actualCodepage = CP_UTF8;
+ data += BOMsize;
+ length -= BOMsize;
+ }
+ const size_t wlength = static_cast<size_t>(MultiByteToWideChar(
+ actualCodepage, 0, data, static_cast<int>(length), nullptr, 0));
+ wchar_t* wbuf = new wchar_t[wlength];
+ const bool success =
+ MultiByteToWideChar(actualCodepage, 0, data, static_cast<int>(length),
+ wbuf, static_cast<int>(wlength)) > 0
+ ? true
+ : false;
+ wbuffer = std::wstring(wbuf, wlength);
+ delete[] wbuf;
+ return success;
+ }
+ std::wstring getBuffer(const std::basic_string<char> buffer)
+ {
+ return Encoding::ToWide(buffer);
+ }
+ std::wstring getBuffer(const std::basic_string<wchar_t> buffer)
+ {
+ return buffer;
+ }
+ void setBuffer(const std::wstring wbuffer, std::basic_string<char>& target)
+ {
+ target = Encoding::ToNarrow(wbuffer);
+ }
+ void setBuffer(const std::wstring wbuffer,
+ std::basic_string<wchar_t>& target)
+ {
+ target = wbuffer;
+ }
+
+}; // BasicConsoleBuf class
+
+typedef BasicConsoleBuf<char> ConsoleBuf;
+typedef BasicConsoleBuf<wchar_t> WConsoleBuf;
+
+#endif
+} // KWSYS_NAMESPACE
+
+#endif
diff --git a/test/API/driver/kwsys/Copyright.txt b/test/API/driver/kwsys/Copyright.txt
new file mode 100644
index 0000000..33d7fb4
--- /dev/null
+++ b/test/API/driver/kwsys/Copyright.txt
@@ -0,0 +1,38 @@
+KWSys - Kitware System Library
+Copyright 2000-2016 Kitware, Inc. and Contributors
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+* Neither the name of Kitware, Inc. nor the names of Contributors
+ may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+------------------------------------------------------------------------------
+
+The following individuals and institutions are among the Contributors:
+
+* Insight Software Consortium <insightsoftwareconsortium.org>
+
+See version control history for details of individual contributions.
diff --git a/test/API/driver/kwsys/Directory.cxx b/test/API/driver/kwsys/Directory.cxx
new file mode 100644
index 0000000..e379182
--- /dev/null
+++ b/test/API/driver/kwsys/Directory.cxx
@@ -0,0 +1,236 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Directory.hxx)
+
+#include KWSYS_HEADER(Configure.hxx)
+
+#include KWSYS_HEADER(Encoding.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Configure.hxx.in"
+# include "Directory.hxx.in"
+# include "Encoding.hxx.in"
+#endif
+
+#include <string>
+#include <vector>
+
+namespace KWSYS_NAMESPACE {
+
+class DirectoryInternals
+{
+public:
+ // Array of Files
+ std::vector<std::string> Files;
+
+ // Path to Open'ed directory
+ std::string Path;
+};
+
+Directory::Directory()
+{
+ this->Internal = new DirectoryInternals;
+}
+
+Directory::~Directory()
+{
+ delete this->Internal;
+}
+
+unsigned long Directory::GetNumberOfFiles() const
+{
+ return static_cast<unsigned long>(this->Internal->Files.size());
+}
+
+const char* Directory::GetFile(unsigned long dindex) const
+{
+ if (dindex >= this->Internal->Files.size()) {
+ return nullptr;
+ }
+ return this->Internal->Files[dindex].c_str();
+}
+
+const char* Directory::GetPath() const
+{
+ return this->Internal->Path.c_str();
+}
+
+void Directory::Clear()
+{
+ this->Internal->Path.resize(0);
+ this->Internal->Files.clear();
+}
+
+} // namespace KWSYS_NAMESPACE
+
+// First Windows platforms
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+# include <windows.h>
+
+# include <ctype.h>
+# include <fcntl.h>
+# include <io.h>
+# include <stdio.h>
+# include <stdlib.h>
+# include <string.h>
+# include <sys/stat.h>
+# include <sys/types.h>
+
+// Wide function names can vary depending on compiler:
+# ifdef __BORLANDC__
+# define _wfindfirst_func __wfindfirst
+# define _wfindnext_func __wfindnext
+# else
+# define _wfindfirst_func _wfindfirst
+# define _wfindnext_func _wfindnext
+# endif
+
+namespace KWSYS_NAMESPACE {
+
+bool Directory::Load(const std::string& name)
+{
+ this->Clear();
+# if (defined(_MSC_VER) && _MSC_VER < 1300) || defined(__BORLANDC__)
+ // Older Visual C++ and Embarcadero compilers.
+ long srchHandle;
+# else // Newer Visual C++
+ intptr_t srchHandle;
+# endif
+ char* buf;
+ size_t n = name.size();
+ if (name.back() == '/' || name.back() == '\\') {
+ buf = new char[n + 1 + 1];
+ sprintf(buf, "%s*", name.c_str());
+ } else {
+ // Make sure the slashes in the wildcard suffix are consistent with the
+ // rest of the path
+ buf = new char[n + 2 + 1];
+ if (name.find('\\') != std::string::npos) {
+ sprintf(buf, "%s\\*", name.c_str());
+ } else {
+ sprintf(buf, "%s/*", name.c_str());
+ }
+ }
+ struct _wfinddata_t data; // data of current file
+
+ // Now put them into the file array
+ srchHandle = _wfindfirst_func(
+ (wchar_t*)Encoding::ToWindowsExtendedPath(buf).c_str(), &data);
+ delete[] buf;
+
+ if (srchHandle == -1) {
+ return 0;
+ }
+
+ // Loop through names
+ do {
+ this->Internal->Files.push_back(Encoding::ToNarrow(data.name));
+ } while (_wfindnext_func(srchHandle, &data) != -1);
+ this->Internal->Path = name;
+ return _findclose(srchHandle) != -1;
+}
+
+unsigned long Directory::GetNumberOfFilesInDirectory(const std::string& name)
+{
+# if (defined(_MSC_VER) && _MSC_VER < 1300) || defined(__BORLANDC__)
+ // Older Visual C++ and Embarcadero compilers.
+ long srchHandle;
+# else // Newer Visual C++
+ intptr_t srchHandle;
+# endif
+ char* buf;
+ size_t n = name.size();
+ if (name.back() == '/') {
+ buf = new char[n + 1 + 1];
+ sprintf(buf, "%s*", name.c_str());
+ } else {
+ buf = new char[n + 2 + 1];
+ sprintf(buf, "%s/*", name.c_str());
+ }
+ struct _wfinddata_t data; // data of current file
+
+ // Now put them into the file array
+ srchHandle =
+ _wfindfirst_func((wchar_t*)Encoding::ToWide(buf).c_str(), &data);
+ delete[] buf;
+
+ if (srchHandle == -1) {
+ return 0;
+ }
+
+ // Loop through names
+ unsigned long count = 0;
+ do {
+ count++;
+ } while (_wfindnext_func(srchHandle, &data) != -1);
+ _findclose(srchHandle);
+ return count;
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#else
+
+// Now the POSIX style directory access
+
+# include <sys/types.h>
+
+# include <dirent.h>
+
+// PGI with glibc has trouble with dirent and large file support:
+// http://www.pgroup.com/userforum/viewtopic.php?
+// p=1992&sid=f16167f51964f1a68fe5041b8eb213b6
+// Work around the problem by mapping dirent the same way as readdir.
+# if defined(__PGI) && defined(__GLIBC__)
+# define kwsys_dirent_readdir dirent
+# define kwsys_dirent_readdir64 dirent64
+# define kwsys_dirent kwsys_dirent_lookup(readdir)
+# define kwsys_dirent_lookup(x) kwsys_dirent_lookup_delay(x)
+# define kwsys_dirent_lookup_delay(x) kwsys_dirent_##x
+# else
+# define kwsys_dirent dirent
+# endif
+
+namespace KWSYS_NAMESPACE {
+
+bool Directory::Load(const std::string& name)
+{
+ this->Clear();
+
+ DIR* dir = opendir(name.c_str());
+
+ if (!dir) {
+ return 0;
+ }
+
+ for (kwsys_dirent* d = readdir(dir); d; d = readdir(dir)) {
+ this->Internal->Files.push_back(d->d_name);
+ }
+ this->Internal->Path = name;
+ closedir(dir);
+ return 1;
+}
+
+unsigned long Directory::GetNumberOfFilesInDirectory(const std::string& name)
+{
+ DIR* dir = opendir(name.c_str());
+
+ if (!dir) {
+ return 0;
+ }
+
+ unsigned long count = 0;
+ for (kwsys_dirent* d = readdir(dir); d; d = readdir(dir)) {
+ count++;
+ }
+ closedir(dir);
+ return count;
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#endif
diff --git a/test/API/driver/kwsys/Directory.hxx.in b/test/API/driver/kwsys/Directory.hxx.in
new file mode 100644
index 0000000..ad8c51b
--- /dev/null
+++ b/test/API/driver/kwsys/Directory.hxx.in
@@ -0,0 +1,72 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Directory_hxx
+#define @KWSYS_NAMESPACE@_Directory_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+#include <string>
+
+namespace @KWSYS_NAMESPACE@ {
+
+class DirectoryInternals;
+
+/** \class Directory
+ * \brief Portable directory/filename traversal.
+ *
+ * Directory provides a portable way of finding the names of the files
+ * in a system directory.
+ *
+ * Directory currently works with Windows and Unix operating systems.
+ */
+class @KWSYS_NAMESPACE@_EXPORT Directory
+{
+public:
+ Directory();
+ ~Directory();
+
+ /**
+ * Load the specified directory and load the names of the files
+ * in that directory. 0 is returned if the directory can not be
+ * opened, 1 if it is opened.
+ */
+ bool Load(const std::string&);
+
+ /**
+ * Return the number of files in the current directory.
+ */
+ unsigned long GetNumberOfFiles() const;
+
+ /**
+ * Return the number of files in the specified directory.
+ * A higher performance static method.
+ */
+ static unsigned long GetNumberOfFilesInDirectory(const std::string&);
+
+ /**
+ * Return the file at the given index, the indexing is 0 based
+ */
+ const char* GetFile(unsigned long) const;
+
+ /**
+ * Return the path to Open'ed directory
+ */
+ const char* GetPath() const;
+
+ /**
+ * Clear the internal structure. Used internally at beginning of Load(...)
+ * to clear the cache.
+ */
+ void Clear();
+
+private:
+ // Private implementation details.
+ DirectoryInternals* Internal;
+
+ Directory(const Directory&); // Not implemented.
+ void operator=(const Directory&); // Not implemented.
+}; // End Class: Directory
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/DynamicLoader.cxx b/test/API/driver/kwsys/DynamicLoader.cxx
new file mode 100644
index 0000000..a4b8641
--- /dev/null
+++ b/test/API/driver/kwsys/DynamicLoader.cxx
@@ -0,0 +1,495 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#if defined(_WIN32)
+# define NOMINMAX // hide min,max to not conflict with <limits>
+#endif
+
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(DynamicLoader.hxx)
+
+#include KWSYS_HEADER(Configure.hxx)
+#include KWSYS_HEADER(Encoding.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Configure.hxx.in"
+# include "DynamicLoader.hxx.in"
+#endif
+
+// This file actually contains several different implementations:
+// * NOOP for environments without dynamic libs
+// * HP machines which uses shl_load
+// * Mac OS X 10.2.x and earlier which uses NSLinkModule
+// * Windows which uses LoadLibrary
+// * BeOS / Haiku
+// * FreeMiNT for Atari
+// * Default implementation for *NIX systems (including Mac OS X 10.3 and
+// later) which use dlopen
+//
+// Each part of the ifdef contains a complete implementation for
+// the static methods of DynamicLoader.
+
+#define CHECK_OPEN_FLAGS(var, supported, ret) \
+ do { \
+ /* Check for unknown flags. */ \
+ if ((var & AllOpenFlags) != var) { \
+ return ret; \
+ } \
+ \
+ /* Check for unsupported flags. */ \
+ if ((var & (supported)) != var) { \
+ return ret; \
+ } \
+ } while (0)
+
+namespace KWSYS_NAMESPACE {
+
+DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary(
+ const std::string& libname)
+{
+ return DynamicLoader::OpenLibrary(libname, 0);
+}
+}
+
+#if !KWSYS_SUPPORTS_SHARED_LIBS
+// Implementation for environments without dynamic libs
+# include <string.h> // for strerror()
+
+namespace KWSYS_NAMESPACE {
+
+DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary(
+ const std::string& libname, int flags)
+{
+ return 0;
+}
+
+int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib)
+{
+ if (!lib) {
+ return 0;
+ }
+
+ return 1;
+}
+
+DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress(
+ DynamicLoader::LibraryHandle lib, const std::string& sym)
+{
+ return 0;
+}
+
+const char* DynamicLoader::LastError()
+{
+ return "General error";
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#elif defined(__hpux)
+// Implementation for HPUX machines
+# include <dl.h>
+# include <errno.h>
+
+namespace KWSYS_NAMESPACE {
+
+DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary(
+ const std::string& libname, int flags)
+{
+ CHECK_OPEN_FLAGS(flags, 0, 0);
+
+ return shl_load(libname.c_str(), BIND_DEFERRED | DYNAMIC_PATH, 0L);
+}
+
+int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib)
+{
+ if (!lib) {
+ return 0;
+ }
+ return !shl_unload(lib);
+}
+
+DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress(
+ DynamicLoader::LibraryHandle lib, const std::string& sym)
+{
+ void* addr;
+ int status;
+
+ /* TYPE_PROCEDURE Look for a function or procedure. (This used to be default)
+ * TYPE_DATA Look for a symbol in the data segment (for example,
+ * variables).
+ * TYPE_UNDEFINED Look for any symbol.
+ */
+ status = shl_findsym(&lib, sym.c_str(), TYPE_UNDEFINED, &addr);
+ void* result = (status < 0) ? (void*)0 : addr;
+
+ // Hack to cast pointer-to-data to pointer-to-function.
+ return *reinterpret_cast<DynamicLoader::SymbolPointer*>(&result);
+}
+
+const char* DynamicLoader::LastError()
+{
+ // TODO: Need implementation with errno/strerror
+ /* If successful, shl_findsym returns an integer (int) value zero. If
+ * shl_findsym cannot find sym, it returns -1 and sets errno to zero.
+ * If any other errors occur, shl_findsym returns -1 and sets errno to one
+ * of these values (defined in <errno.h>):
+ * ENOEXEC
+ * A format error was detected in the specified library.
+ * ENOSYM
+ * A symbol on which sym depends could not be found.
+ * EINVAL
+ * The specified handle is invalid.
+ */
+
+ if (errno == ENOEXEC || errno == ENOSYM || errno == EINVAL) {
+ return strerror(errno);
+ }
+ // else
+ return 0;
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#elif defined(__APPLE__) && (MAC_OS_X_VERSION_MAX_ALLOWED < 1030)
+// Implementation for Mac OS X 10.2.x and earlier
+# include <mach-o/dyld.h>
+# include <string.h> // for strlen
+
+namespace KWSYS_NAMESPACE {
+
+DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary(
+ const std::string& libname, int flags)
+{
+ CHECK_OPEN_FLAGS(flags, 0, 0);
+
+ NSObjectFileImageReturnCode rc;
+ NSObjectFileImage image = 0;
+
+ rc = NSCreateObjectFileImageFromFile(libname.c_str(), &image);
+ // rc == NSObjectFileImageInappropriateFile when trying to load a dylib file
+ if (rc != NSObjectFileImageSuccess) {
+ return 0;
+ }
+ NSModule handle = NSLinkModule(image, libname.c_str(),
+ NSLINKMODULE_OPTION_BINDNOW |
+ NSLINKMODULE_OPTION_RETURN_ON_ERROR);
+ NSDestroyObjectFileImage(image);
+ return handle;
+}
+
+int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib)
+{
+ // NSUNLINKMODULE_OPTION_KEEP_MEMORY_MAPPED
+ // With this option the memory for the module is not deallocated
+ // allowing pointers into the module to still be valid.
+ // You should use this option instead if your code experience some problems
+ // reported against Panther 10.3.9 (fixed in Tiger 10.4.2 and up)
+ bool success = NSUnLinkModule(lib, NSUNLINKMODULE_OPTION_NONE);
+ return success;
+}
+
+DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress(
+ DynamicLoader::LibraryHandle lib, const std::string& sym)
+{
+ void* result = 0;
+ // Need to prepend symbols with '_' on Apple-gcc compilers
+ std::string rsym = '_' + sym;
+
+ NSSymbol symbol = NSLookupSymbolInModule(lib, rsym.c_str());
+ if (symbol) {
+ result = NSAddressOfSymbol(symbol);
+ }
+
+ // Hack to cast pointer-to-data to pointer-to-function.
+ return *reinterpret_cast<DynamicLoader::SymbolPointer*>(&result);
+}
+
+const char* DynamicLoader::LastError()
+{
+ return 0;
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#elif defined(_WIN32) && !defined(__CYGWIN__)
+// Implementation for Windows win32 code but not cygwin
+# include <windows.h>
+
+# include <stdio.h>
+
+namespace KWSYS_NAMESPACE {
+
+DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary(
+ const std::string& libname, int flags)
+{
+ CHECK_OPEN_FLAGS(flags, SearchBesideLibrary, nullptr);
+
+ DWORD llFlags = 0;
+ if (flags & SearchBesideLibrary) {
+ llFlags |= LOAD_WITH_ALTERED_SEARCH_PATH;
+ }
+
+ return LoadLibraryExW(Encoding::ToWindowsExtendedPath(libname).c_str(),
+ nullptr, llFlags);
+}
+
+int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib)
+{
+ return (int)FreeLibrary(lib);
+}
+
+DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress(
+ DynamicLoader::LibraryHandle lib, const std::string& sym)
+{
+ // TODO: The calling convention affects the name of the symbol. We
+ // should have a tool to help get the symbol with the desired
+ // calling convention. Currently we assume cdecl.
+ //
+ // Borland:
+ // __cdecl = "_func" (default)
+ // __fastcall = "@_func"
+ // __stdcall = "func"
+ //
+ // Watcom:
+ // __cdecl = "_func"
+ // __fastcall = "@_func@X"
+ // __stdcall = "_func@X"
+ // __watcall = "func_" (default)
+ //
+ // MSVC:
+ // __cdecl = "func" (default)
+ // __fastcall = "@_func@X"
+ // __stdcall = "_func@X"
+ //
+ // Note that the "@X" part of the name above is the total size (in
+ // bytes) of the arguments on the stack.
+ void* result;
+# if defined(__BORLANDC__) || defined(__WATCOMC__)
+ // Need to prepend symbols with '_'
+ std::string ssym = '_' + sym;
+ const char* rsym = ssym.c_str();
+# else
+ const char* rsym = sym.c_str();
+# endif
+ result = (void*)GetProcAddress(lib, rsym);
+// Hack to cast pointer-to-data to pointer-to-function.
+# ifdef __WATCOMC__
+ return *(DynamicLoader::SymbolPointer*)(&result);
+# else
+ return *reinterpret_cast<DynamicLoader::SymbolPointer*>(&result);
+# endif
+}
+
+# define DYNLOAD_ERROR_BUFFER_SIZE 1024
+
+const char* DynamicLoader::LastError()
+{
+ wchar_t lpMsgBuf[DYNLOAD_ERROR_BUFFER_SIZE + 1];
+
+ DWORD error = GetLastError();
+ DWORD length = FormatMessageW(
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, nullptr, error,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), // Default language
+ lpMsgBuf, DYNLOAD_ERROR_BUFFER_SIZE, nullptr);
+
+ static char str[DYNLOAD_ERROR_BUFFER_SIZE + 1];
+
+ if (length < 1) {
+ /* FormatMessage failed. Use a default message. */
+ _snprintf(str, DYNLOAD_ERROR_BUFFER_SIZE,
+ "DynamicLoader encountered error 0x%X. "
+ "FormatMessage failed with error 0x%X",
+ error, GetLastError());
+ return str;
+ }
+
+ if (!WideCharToMultiByte(CP_UTF8, 0, lpMsgBuf, -1, str,
+ DYNLOAD_ERROR_BUFFER_SIZE, nullptr, nullptr)) {
+ /* WideCharToMultiByte failed. Use a default message. */
+ _snprintf(str, DYNLOAD_ERROR_BUFFER_SIZE,
+ "DynamicLoader encountered error 0x%X. "
+ "WideCharToMultiByte failed with error 0x%X",
+ error, GetLastError());
+ }
+
+ return str;
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#elif defined(__BEOS__)
+// Implementation for BeOS / Haiku
+# include <string.h> // for strerror()
+
+# include <be/kernel/image.h>
+# include <be/support/Errors.h>
+
+namespace KWSYS_NAMESPACE {
+
+static image_id last_dynamic_err = B_OK;
+
+DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary(
+ const std::string& libname, int flags)
+{
+ CHECK_OPEN_FLAGS(flags, 0, 0);
+
+ // image_id's are integers, errors are negative. Add one just in case we
+ // get a valid image_id of zero (is that even possible?).
+ image_id rc = load_add_on(libname.c_str());
+ if (rc < 0) {
+ last_dynamic_err = rc;
+ return 0;
+ }
+
+ return rc + 1;
+}
+
+int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib)
+{
+ if (!lib) {
+ last_dynamic_err = B_BAD_VALUE;
+ return 0;
+ } else {
+ // The function dlclose() returns 0 on success, and non-zero on error.
+ status_t rc = unload_add_on(lib - 1);
+ if (rc != B_OK) {
+ last_dynamic_err = rc;
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress(
+ DynamicLoader::LibraryHandle lib, const std::string& sym)
+{
+ // Hack to cast pointer-to-data to pointer-to-function.
+ union
+ {
+ void* pvoid;
+ DynamicLoader::SymbolPointer psym;
+ } result;
+
+ result.psym = nullptr;
+
+ if (!lib) {
+ last_dynamic_err = B_BAD_VALUE;
+ } else {
+ // !!! FIXME: BeOS can do function-only lookups...does this ever
+ // !!! FIXME: actually _want_ a data symbol lookup, or was this union
+ // !!! FIXME: a leftover of dlsym()? (s/ANY/TEXT for functions only).
+ status_t rc =
+ get_image_symbol(lib - 1, sym.c_str(), B_SYMBOL_TYPE_ANY, &result.pvoid);
+ if (rc != B_OK) {
+ last_dynamic_err = rc;
+ result.psym = nullptr;
+ }
+ }
+ return result.psym;
+}
+
+const char* DynamicLoader::LastError()
+{
+ const char* retval = strerror(last_dynamic_err);
+ last_dynamic_err = B_OK;
+ return retval;
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#elif defined(__MINT__)
+// Implementation for FreeMiNT on Atari
+# define _GNU_SOURCE /* for program_invocation_name */
+# include <dld.h>
+# include <errno.h>
+# include <malloc.h>
+# include <string.h>
+
+namespace KWSYS_NAMESPACE {
+
+DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary(
+ const std::string& libname, int flags)
+{
+ CHECK_OPEN_FLAGS(flags, 0, nullptr);
+
+ char* name = (char*)calloc(1, libname.size() + 1);
+ dld_init(program_invocation_name);
+ strncpy(name, libname.c_str(), libname.size());
+ dld_link(libname.c_str());
+ return (void*)name;
+}
+
+int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib)
+{
+ dld_unlink_by_file((char*)lib, 0);
+ free(lib);
+ return 0;
+}
+
+DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress(
+ DynamicLoader::LibraryHandle lib, const std::string& sym)
+{
+ // Hack to cast pointer-to-data to pointer-to-function.
+ union
+ {
+ void* pvoid;
+ DynamicLoader::SymbolPointer psym;
+ } result;
+ result.pvoid = dld_get_symbol(sym.c_str());
+ return result.psym;
+}
+
+const char* DynamicLoader::LastError()
+{
+ return dld_strerror(dld_errno);
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#else
+// Default implementation for *NIX systems (including Mac OS X 10.3 and
+// later) which use dlopen
+# include <dlfcn.h>
+
+namespace KWSYS_NAMESPACE {
+
+DynamicLoader::LibraryHandle DynamicLoader::OpenLibrary(
+ const std::string& libname, int flags)
+{
+ CHECK_OPEN_FLAGS(flags, 0, nullptr);
+
+ return dlopen(libname.c_str(), RTLD_LAZY);
+}
+
+int DynamicLoader::CloseLibrary(DynamicLoader::LibraryHandle lib)
+{
+ if (lib) {
+ // The function dlclose() returns 0 on success, and non-zero on error.
+ return !dlclose(lib);
+ }
+ // else
+ return 0;
+}
+
+DynamicLoader::SymbolPointer DynamicLoader::GetSymbolAddress(
+ DynamicLoader::LibraryHandle lib, const std::string& sym)
+{
+ // Hack to cast pointer-to-data to pointer-to-function.
+ union
+ {
+ void* pvoid;
+ DynamicLoader::SymbolPointer psym;
+ } result;
+ result.pvoid = dlsym(lib, sym.c_str());
+ return result.psym;
+}
+
+const char* DynamicLoader::LastError()
+{
+ return dlerror();
+}
+
+} // namespace KWSYS_NAMESPACE
+#endif
diff --git a/test/API/driver/kwsys/DynamicLoader.hxx.in b/test/API/driver/kwsys/DynamicLoader.hxx.in
new file mode 100644
index 0000000..539c742
--- /dev/null
+++ b/test/API/driver/kwsys/DynamicLoader.hxx.in
@@ -0,0 +1,106 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_DynamicLoader_hxx
+#define @KWSYS_NAMESPACE@_DynamicLoader_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <string>
+
+#if defined(__hpux)
+# include <dl.h>
+#elif defined(_WIN32) && !defined(__CYGWIN__)
+# include <windows.h>
+#elif defined(__APPLE__)
+# include <AvailabilityMacros.h>
+# if MAC_OS_X_VERSION_MAX_ALLOWED < 1030
+# include <mach-o/dyld.h>
+# endif
+#elif defined(__BEOS__)
+# include <be/kernel/image.h>
+#endif
+
+namespace @KWSYS_NAMESPACE@ {
+/** \class DynamicLoader
+ * \brief Portable loading of dynamic libraries or dll's.
+ *
+ * DynamicLoader provides a portable interface to loading dynamic
+ * libraries or dll's into a process.
+ *
+ * Directory currently works with Windows, Apple, HP-UX and Unix (POSIX)
+ * operating systems
+ *
+ * \warning dlopen on *nix system works the following way:
+ * If filename contains a slash ("/"), then it is interpreted as a (relative
+ * or absolute) pathname. Otherwise, the dynamic linker searches for the
+ * library as follows : see ld.so(8) for further details):
+ * Whereas this distinction does not exist on Win32. Therefore ideally you
+ * should be doing full path to guarantee to have a consistent way of dealing
+ * with dynamic loading of shared library.
+ *
+ * \warning the Cygwin implementation do not use the Win32 HMODULE. Put extra
+ * condition so that we can include the correct declaration (POSIX)
+ */
+
+class @KWSYS_NAMESPACE@_EXPORT DynamicLoader
+{
+public:
+// Ugly stuff for library handles
+// They are different on several different OS's
+#if defined(__hpux)
+ typedef shl_t LibraryHandle;
+#elif defined(_WIN32) && !defined(__CYGWIN__)
+ typedef HMODULE LibraryHandle;
+#elif defined(__APPLE__)
+# if MAC_OS_X_VERSION_MAX_ALLOWED < 1030
+ typedef NSModule LibraryHandle;
+# else
+ typedef void* LibraryHandle;
+# endif
+#elif defined(__BEOS__)
+ typedef image_id LibraryHandle;
+#else // POSIX
+ typedef void* LibraryHandle;
+#endif
+
+ // Return type from DynamicLoader::GetSymbolAddress.
+ typedef void (*SymbolPointer)();
+
+ enum OpenFlags
+ {
+ // Search for dependent libraries beside the library being loaded.
+ //
+ // This is currently only supported on Windows.
+ SearchBesideLibrary = 0x00000001,
+
+ AllOpenFlags = SearchBesideLibrary
+ };
+
+ /** Load a dynamic library into the current process.
+ * The returned LibraryHandle can be used to access the symbols in the
+ * library. The optional second argument is a set of flags to use when
+ * opening the library. If unrecognized or unsupported flags are specified,
+ * the library is not opened. */
+ static LibraryHandle OpenLibrary(const std::string&);
+ static LibraryHandle OpenLibrary(const std::string&, int);
+
+ /** Attempt to detach a dynamic library from the
+ * process. A value of true is returned if it is successful. */
+ static int CloseLibrary(LibraryHandle);
+
+ /** Find the address of the symbol in the given library. */
+ static SymbolPointer GetSymbolAddress(LibraryHandle, const std::string&);
+
+ /** Return the default module prefix for the current platform. */
+ static const char* LibPrefix() { return "@KWSYS_DynamicLoader_PREFIX@"; }
+
+ /** Return the default module suffix for the current platform. */
+ static const char* LibExtension() { return "@KWSYS_DynamicLoader_SUFFIX@"; }
+
+ /** Return the last error produced from a calls made on this class. */
+ static const char* LastError();
+}; // End Class: DynamicLoader
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/Encoding.h.in b/test/API/driver/kwsys/Encoding.h.in
new file mode 100644
index 0000000..86a2669
--- /dev/null
+++ b/test/API/driver/kwsys/Encoding.h.in
@@ -0,0 +1,69 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Encoding_h
+#define @KWSYS_NAMESPACE@_Encoding_h
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+#include <wchar.h>
+
+/* Redefine all public interface symbol names to be in the proper
+ namespace. These macros are used internally to kwsys only, and are
+ not visible to user code. Use kwsysHeaderDump.pl to reproduce
+ these macros after making changes to the interface. */
+#if !defined(KWSYS_NAMESPACE)
+# define kwsys_ns(x) @KWSYS_NAMESPACE@##x
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsysEncoding kwsys_ns(Encoding)
+# define kwsysEncoding_mbstowcs kwsys_ns(Encoding_mbstowcs)
+# define kwsysEncoding_DupToWide kwsys_ns(Encoding_DupToWide)
+# define kwsysEncoding_wcstombs kwsys_ns(Encoding_wcstombs)
+# define kwsysEncoding_DupToNarrow kwsys_ns(Encoding_DupToNarrow)
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Convert a narrow string to a wide string.
+ On Windows, UTF-8 is assumed, and on other platforms,
+ the current locale is assumed.
+ */
+kwsysEXPORT size_t kwsysEncoding_mbstowcs(wchar_t* dest, const char* src,
+ size_t n);
+
+/* Convert a narrow string to a wide string.
+ This can return NULL if the conversion fails. */
+kwsysEXPORT wchar_t* kwsysEncoding_DupToWide(const char* src);
+
+/* Convert a wide string to a narrow string.
+ On Windows, UTF-8 is assumed, and on other platforms,
+ the current locale is assumed. */
+kwsysEXPORT size_t kwsysEncoding_wcstombs(char* dest, const wchar_t* src,
+ size_t n);
+
+/* Convert a wide string to a narrow string.
+ This can return NULL if the conversion fails. */
+kwsysEXPORT char* kwsysEncoding_DupToNarrow(const wchar_t* str);
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif
+
+/* If we are building a kwsys .c or .cxx file, let it use these macros.
+ Otherwise, undefine them to keep the namespace clean. */
+#if !defined(KWSYS_NAMESPACE)
+# undef kwsys_ns
+# undef kwsysEXPORT
+# if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# undef kwsysEncoding
+# undef kwsysEncoding_mbstowcs
+# undef kwsysEncoding_DupToWide
+# undef kwsysEncoding_wcstombs
+# undef kwsysEncoding_DupToNarrow
+# endif
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/Encoding.hxx.in b/test/API/driver/kwsys/Encoding.hxx.in
new file mode 100644
index 0000000..75a2d4d
--- /dev/null
+++ b/test/API/driver/kwsys/Encoding.hxx.in
@@ -0,0 +1,80 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Encoding_hxx
+#define @KWSYS_NAMESPACE@_Encoding_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <string>
+#include <vector>
+
+namespace @KWSYS_NAMESPACE@ {
+class @KWSYS_NAMESPACE@_EXPORT Encoding
+{
+public:
+ // Container class for argc/argv.
+ class @KWSYS_NAMESPACE@_EXPORT CommandLineArguments
+ {
+ public:
+ // On Windows, get the program command line arguments
+ // in this Encoding module's 8 bit encoding.
+ // On other platforms the given argc/argv is used, and
+ // to be consistent, should be the argc/argv from main().
+ static CommandLineArguments Main(int argc, char const* const* argv);
+
+ // Construct CommandLineArguments with the given
+ // argc/argv. It is assumed that the string is already
+ // in the encoding used by this module.
+ CommandLineArguments(int argc, char const* const* argv);
+
+ // Construct CommandLineArguments with the given
+ // argc and wide argv. This is useful if wmain() is used.
+ CommandLineArguments(int argc, wchar_t const* const* argv);
+ ~CommandLineArguments();
+ CommandLineArguments(const CommandLineArguments&);
+ CommandLineArguments& operator=(const CommandLineArguments&);
+
+ int argc() const;
+ char const* const* argv() const;
+
+ protected:
+ std::vector<char*> argv_;
+ };
+
+ /**
+ * Convert between char and wchar_t
+ */
+
+#if @KWSYS_NAMESPACE@_STL_HAS_WSTRING
+
+ // Convert a narrow string to a wide string.
+ // On Windows, UTF-8 is assumed, and on other platforms,
+ // the current locale is assumed.
+ static std::wstring ToWide(const std::string& str);
+ static std::wstring ToWide(const char* str);
+
+ // Convert a wide string to a narrow string.
+ // On Windows, UTF-8 is assumed, and on other platforms,
+ // the current locale is assumed.
+ static std::string ToNarrow(const std::wstring& str);
+ static std::string ToNarrow(const wchar_t* str);
+
+# if defined(_WIN32)
+ /**
+ * Convert the path to an extended length path to avoid MAX_PATH length
+ * limitations on Windows. If the input is a local path the result will be
+ * prefixed with \\?\; if the input is instead a network path, the result
+ * will be prefixed with \\?\UNC\. All output will also be converted to
+ * absolute paths with Windows-style backslashes.
+ **/
+ static std::wstring ToWindowsExtendedPath(std::string const&);
+ static std::wstring ToWindowsExtendedPath(const char* source);
+ static std::wstring ToWindowsExtendedPath(std::wstring const& wsource);
+# endif
+
+#endif // @KWSYS_NAMESPACE@_STL_HAS_WSTRING
+
+}; // class Encoding
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/EncodingC.c b/test/API/driver/kwsys/EncodingC.c
new file mode 100644
index 0000000..e12236a
--- /dev/null
+++ b/test/API/driver/kwsys/EncodingC.c
@@ -0,0 +1,72 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Encoding.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "Encoding.h.in"
+#endif
+
+#include <stdlib.h>
+
+#ifdef _WIN32
+# include <windows.h>
+#endif
+
+size_t kwsysEncoding_mbstowcs(wchar_t* dest, const char* str, size_t n)
+{
+ if (str == 0) {
+ return (size_t)-1;
+ }
+#ifdef _WIN32
+ return MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str, -1, dest,
+ (int)n) -
+ 1;
+#else
+ return mbstowcs(dest, str, n);
+#endif
+}
+
+wchar_t* kwsysEncoding_DupToWide(const char* str)
+{
+ wchar_t* ret = NULL;
+ size_t length = kwsysEncoding_mbstowcs(NULL, str, 0) + 1;
+ if (length > 0) {
+ ret = (wchar_t*)malloc((length) * sizeof(wchar_t));
+ if (ret) {
+ ret[0] = 0;
+ kwsysEncoding_mbstowcs(ret, str, length);
+ }
+ }
+ return ret;
+}
+
+size_t kwsysEncoding_wcstombs(char* dest, const wchar_t* str, size_t n)
+{
+ if (str == 0) {
+ return (size_t)-1;
+ }
+#ifdef _WIN32
+ return WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str, -1, dest,
+ (int)n, NULL, NULL) -
+ 1;
+#else
+ return wcstombs(dest, str, n);
+#endif
+}
+
+char* kwsysEncoding_DupToNarrow(const wchar_t* str)
+{
+ char* ret = NULL;
+ size_t length = kwsysEncoding_wcstombs(0, str, 0) + 1;
+ if (length > 0) {
+ ret = (char*)malloc(length);
+ if (ret) {
+ ret[0] = 0;
+ kwsysEncoding_wcstombs(ret, str, length);
+ }
+ }
+ return ret;
+}
diff --git a/test/API/driver/kwsys/EncodingCXX.cxx b/test/API/driver/kwsys/EncodingCXX.cxx
new file mode 100644
index 0000000..5cad934
--- /dev/null
+++ b/test/API/driver/kwsys/EncodingCXX.cxx
@@ -0,0 +1,288 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifdef __osf__
+# define _OSF_SOURCE
+# define _POSIX_C_SOURCE 199506L
+# define _XOPEN_SOURCE_EXTENDED
+#endif
+
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Encoding.hxx)
+#include KWSYS_HEADER(Encoding.h)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Encoding.h.in"
+# include "Encoding.hxx.in"
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+#include <vector>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4786)
+#endif
+
+// Windows API.
+#if defined(_WIN32)
+# include <windows.h>
+
+# include <ctype.h>
+# include <shellapi.h>
+#endif
+
+namespace KWSYS_NAMESPACE {
+
+Encoding::CommandLineArguments Encoding::CommandLineArguments::Main(
+ int argc, char const* const* argv)
+{
+#ifdef _WIN32
+ (void)argc;
+ (void)argv;
+
+ int ac;
+ LPWSTR* w_av = CommandLineToArgvW(GetCommandLineW(), &ac);
+
+ std::vector<std::string> av1(ac);
+ std::vector<char const*> av2(ac);
+ for (int i = 0; i < ac; i++) {
+ av1[i] = ToNarrow(w_av[i]);
+ av2[i] = av1[i].c_str();
+ }
+ LocalFree(w_av);
+ return CommandLineArguments(ac, &av2[0]);
+#else
+ return CommandLineArguments(argc, argv);
+#endif
+}
+
+Encoding::CommandLineArguments::CommandLineArguments(int ac,
+ char const* const* av)
+{
+ this->argv_.resize(ac + 1);
+ for (int i = 0; i < ac; i++) {
+ this->argv_[i] = strdup(av[i]);
+ }
+ this->argv_[ac] = nullptr;
+}
+
+Encoding::CommandLineArguments::CommandLineArguments(int ac,
+ wchar_t const* const* av)
+{
+ this->argv_.resize(ac + 1);
+ for (int i = 0; i < ac; i++) {
+ this->argv_[i] = kwsysEncoding_DupToNarrow(av[i]);
+ }
+ this->argv_[ac] = nullptr;
+}
+
+Encoding::CommandLineArguments::~CommandLineArguments()
+{
+ for (size_t i = 0; i < this->argv_.size(); i++) {
+ free(argv_[i]);
+ }
+}
+
+Encoding::CommandLineArguments::CommandLineArguments(
+ const CommandLineArguments& other)
+{
+ this->argv_.resize(other.argv_.size());
+ for (size_t i = 0; i < this->argv_.size(); i++) {
+ this->argv_[i] = other.argv_[i] ? strdup(other.argv_[i]) : nullptr;
+ }
+}
+
+Encoding::CommandLineArguments& Encoding::CommandLineArguments::operator=(
+ const CommandLineArguments& other)
+{
+ if (this != &other) {
+ size_t i;
+ for (i = 0; i < this->argv_.size(); i++) {
+ free(this->argv_[i]);
+ }
+
+ this->argv_.resize(other.argv_.size());
+ for (i = 0; i < this->argv_.size(); i++) {
+ this->argv_[i] = other.argv_[i] ? strdup(other.argv_[i]) : nullptr;
+ }
+ }
+
+ return *this;
+}
+
+int Encoding::CommandLineArguments::argc() const
+{
+ return static_cast<int>(this->argv_.size() - 1);
+}
+
+char const* const* Encoding::CommandLineArguments::argv() const
+{
+ return &this->argv_[0];
+}
+
+#if KWSYS_STL_HAS_WSTRING
+
+std::wstring Encoding::ToWide(const std::string& str)
+{
+ std::wstring wstr;
+# if defined(_WIN32)
+ const int wlength =
+ MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.data(),
+ int(str.size()), nullptr, 0);
+ if (wlength > 0) {
+ wchar_t* wdata = new wchar_t[wlength];
+ int r = MultiByteToWideChar(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.data(),
+ int(str.size()), wdata, wlength);
+ if (r > 0) {
+ wstr = std::wstring(wdata, wlength);
+ }
+ delete[] wdata;
+ }
+# else
+ size_t pos = 0;
+ size_t nullPos = 0;
+ do {
+ if (pos < str.size() && str.at(pos) != '\0') {
+ wstr += ToWide(str.c_str() + pos);
+ }
+ nullPos = str.find('\0', pos);
+ if (nullPos != std::string::npos) {
+ pos = nullPos + 1;
+ wstr += wchar_t('\0');
+ }
+ } while (nullPos != std::string::npos);
+# endif
+ return wstr;
+}
+
+std::string Encoding::ToNarrow(const std::wstring& str)
+{
+ std::string nstr;
+# if defined(_WIN32)
+ int length =
+ WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.c_str(),
+ int(str.size()), nullptr, 0, nullptr, nullptr);
+ if (length > 0) {
+ char* data = new char[length];
+ int r =
+ WideCharToMultiByte(KWSYS_ENCODING_DEFAULT_CODEPAGE, 0, str.c_str(),
+ int(str.size()), data, length, nullptr, nullptr);
+ if (r > 0) {
+ nstr = std::string(data, length);
+ }
+ delete[] data;
+ }
+# else
+ size_t pos = 0;
+ size_t nullPos = 0;
+ do {
+ if (pos < str.size() && str.at(pos) != '\0') {
+ nstr += ToNarrow(str.c_str() + pos);
+ }
+ nullPos = str.find(wchar_t('\0'), pos);
+ if (nullPos != std::string::npos) {
+ pos = nullPos + 1;
+ nstr += '\0';
+ }
+ } while (nullPos != std::string::npos);
+# endif
+ return nstr;
+}
+
+std::wstring Encoding::ToWide(const char* cstr)
+{
+ std::wstring wstr;
+ size_t length = kwsysEncoding_mbstowcs(nullptr, cstr, 0) + 1;
+ if (length > 0) {
+ std::vector<wchar_t> wchars(length);
+ if (kwsysEncoding_mbstowcs(&wchars[0], cstr, length) > 0) {
+ wstr = &wchars[0];
+ }
+ }
+ return wstr;
+}
+
+std::string Encoding::ToNarrow(const wchar_t* wcstr)
+{
+ std::string str;
+ size_t length = kwsysEncoding_wcstombs(nullptr, wcstr, 0) + 1;
+ if (length > 0) {
+ std::vector<char> chars(length);
+ if (kwsysEncoding_wcstombs(&chars[0], wcstr, length) > 0) {
+ str = &chars[0];
+ }
+ }
+ return str;
+}
+
+# if defined(_WIN32)
+// Convert local paths to UNC style paths
+std::wstring Encoding::ToWindowsExtendedPath(std::string const& source)
+{
+ return ToWindowsExtendedPath(ToWide(source));
+}
+
+// Convert local paths to UNC style paths
+std::wstring Encoding::ToWindowsExtendedPath(const char* source)
+{
+ return ToWindowsExtendedPath(ToWide(source));
+}
+
+// Convert local paths to UNC style paths
+std::wstring Encoding::ToWindowsExtendedPath(std::wstring const& wsource)
+{
+ // Resolve any relative paths
+ DWORD wfull_len;
+
+ /* The +3 is a workaround for a bug in some versions of GetFullPathNameW that
+ * won't return a large enough buffer size if the input is too small */
+ wfull_len = GetFullPathNameW(wsource.c_str(), 0, nullptr, nullptr) + 3;
+ std::vector<wchar_t> wfull(wfull_len);
+ GetFullPathNameW(wsource.c_str(), wfull_len, &wfull[0], nullptr);
+
+ /* This should get the correct size without any extra padding from the
+ * previous size workaround. */
+ wfull_len = static_cast<DWORD>(wcslen(&wfull[0]));
+
+ if (wfull_len >= 2 && isalpha(wfull[0]) &&
+ wfull[1] == L':') { /* C:\Foo\bar\FooBar.txt */
+ return L"\\\\?\\" + std::wstring(&wfull[0]);
+ } else if (wfull_len >= 2 && wfull[0] == L'\\' &&
+ wfull[1] == L'\\') { /* Starts with \\ */
+ if (wfull_len >= 4 && wfull[2] == L'?' &&
+ wfull[3] == L'\\') { /* Starts with \\?\ */
+ if (wfull_len >= 8 && wfull[4] == L'U' && wfull[5] == L'N' &&
+ wfull[6] == L'C' &&
+ wfull[7] == L'\\') { /* \\?\UNC\Foo\bar\FooBar.txt */
+ return std::wstring(&wfull[0]);
+ } else if (wfull_len >= 6 && isalpha(wfull[4]) &&
+ wfull[5] == L':') { /* \\?\C:\Foo\bar\FooBar.txt */
+ return std::wstring(&wfull[0]);
+ } else if (wfull_len >= 5) { /* \\?\Foo\bar\FooBar.txt */
+ return L"\\\\?\\UNC\\" + std::wstring(&wfull[4]);
+ }
+ } else if (wfull_len >= 4 && wfull[2] == L'.' &&
+ wfull[3] == L'\\') { /* Starts with \\.\ a device name */
+ if (wfull_len >= 6 && isalpha(wfull[4]) &&
+ wfull[5] == L':') { /* \\.\C:\Foo\bar\FooBar.txt */
+ return L"\\\\?\\" + std::wstring(&wfull[4]);
+ } else if (wfull_len >=
+ 5) { /* \\.\Foo\bar\ Device name is left unchanged */
+ return std::wstring(&wfull[0]);
+ }
+ } else if (wfull_len >= 3) { /* \\Foo\bar\FooBar.txt */
+ return L"\\\\?\\UNC\\" + std::wstring(&wfull[2]);
+ }
+ }
+
+ // If this case has been reached, then the path is invalid. Leave it
+ // unchanged
+ return wsource;
+}
+# endif
+
+#endif // KWSYS_STL_HAS_WSTRING
+
+} // namespace KWSYS_NAMESPACE
diff --git a/test/API/driver/kwsys/ExtraTest.cmake.in b/test/API/driver/kwsys/ExtraTest.cmake.in
new file mode 100644
index 0000000..e8c0a1c
--- /dev/null
+++ b/test/API/driver/kwsys/ExtraTest.cmake.in
@@ -0,0 +1 @@
+MESSAGE("*** This message is generated by message inside a file that is included in DartTestfile.txt ***")
diff --git a/test/API/driver/kwsys/FStream.cxx b/test/API/driver/kwsys/FStream.cxx
new file mode 100644
index 0000000..5e4133a
--- /dev/null
+++ b/test/API/driver/kwsys/FStream.cxx
@@ -0,0 +1,55 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(FStream.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "FStream.hxx.in"
+#endif
+
+namespace KWSYS_NAMESPACE {
+namespace FStream {
+
+BOM ReadBOM(std::istream& in)
+{
+ if (!in.good()) {
+ return BOM_None;
+ }
+ unsigned long orig = in.tellg();
+ unsigned char bom[4];
+ in.read(reinterpret_cast<char*>(bom), 2);
+ if (!in.good()) {
+ in.clear();
+ in.seekg(orig);
+ return BOM_None;
+ }
+ if (bom[0] == 0xEF && bom[1] == 0xBB) {
+ in.read(reinterpret_cast<char*>(bom + 2), 1);
+ if (in.good() && bom[2] == 0xBF) {
+ return BOM_UTF8;
+ }
+ } else if (bom[0] == 0xFE && bom[1] == 0xFF) {
+ return BOM_UTF16BE;
+ } else if (bom[0] == 0x00 && bom[1] == 0x00) {
+ in.read(reinterpret_cast<char*>(bom + 2), 2);
+ if (in.good() && bom[2] == 0xFE && bom[3] == 0xFF) {
+ return BOM_UTF32BE;
+ }
+ } else if (bom[0] == 0xFF && bom[1] == 0xFE) {
+ unsigned long p = in.tellg();
+ in.read(reinterpret_cast<char*>(bom + 2), 2);
+ if (in.good() && bom[2] == 0x00 && bom[3] == 0x00) {
+ return BOM_UTF32LE;
+ }
+ in.seekg(p);
+ return BOM_UTF16LE;
+ }
+ in.clear();
+ in.seekg(orig);
+ return BOM_None;
+}
+
+} // FStream namespace
+} // KWSYS_NAMESPACE
diff --git a/test/API/driver/kwsys/FStream.hxx.in b/test/API/driver/kwsys/FStream.hxx.in
new file mode 100644
index 0000000..d79bbdf
--- /dev/null
+++ b/test/API/driver/kwsys/FStream.hxx.in
@@ -0,0 +1,278 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_FStream_hxx
+#define @KWSYS_NAMESPACE@_FStream_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <@KWSYS_NAMESPACE@/Encoding.hxx>
+
+#include <fstream>
+#if defined(_WIN32)
+# if !defined(_MSC_VER) && @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H
+# include <ext/stdio_filebuf.h>
+# endif
+#endif
+
+namespace @KWSYS_NAMESPACE@ {
+#if defined(_WIN32) && \
+ (defined(_MSC_VER) || @KWSYS_NAMESPACE@_CXX_HAS_EXT_STDIO_FILEBUF_H)
+# if defined(_NOEXCEPT)
+# define @KWSYS_NAMESPACE@_FStream_NOEXCEPT _NOEXCEPT
+# else
+# define @KWSYS_NAMESPACE@_FStream_NOEXCEPT
+# endif
+
+# if defined(_MSC_VER)
+
+template <typename CharType, typename Traits>
+class basic_filebuf : public std::basic_filebuf<CharType, Traits>
+{
+# if _MSC_VER >= 1400
+public:
+ typedef std::basic_filebuf<CharType, Traits> my_base_type;
+ basic_filebuf* open(char const* s, std::ios_base::openmode mode)
+ {
+ const std::wstring wstr = Encoding::ToWindowsExtendedPath(s);
+ return static_cast<basic_filebuf*>(my_base_type::open(wstr.c_str(), mode));
+ }
+# endif
+};
+
+# else
+
+inline std::wstring getcmode(const std::ios_base::openmode mode)
+{
+ std::wstring cmode;
+ bool plus = false;
+ if (mode & std::ios_base::app) {
+ cmode += L"a";
+ plus = mode & std::ios_base::in ? true : false;
+ } else if (mode & std::ios_base::trunc ||
+ (mode & std::ios_base::out && (mode & std::ios_base::in) == 0)) {
+ cmode += L"w";
+ plus = mode & std::ios_base::in ? true : false;
+ } else {
+ cmode += L"r";
+ plus = mode & std::ios_base::out ? true : false;
+ }
+ if (plus) {
+ cmode += L"+";
+ }
+ if (mode & std::ios_base::binary) {
+ cmode += L"b";
+ } else {
+ cmode += L"t";
+ }
+ return cmode;
+};
+
+# endif
+
+template <typename CharType, typename Traits = std::char_traits<CharType> >
+class basic_efilebuf
+{
+public:
+# if defined(_MSC_VER)
+ typedef basic_filebuf<CharType, Traits> internal_buffer_type;
+# else
+ typedef __gnu_cxx::stdio_filebuf<CharType, Traits> internal_buffer_type;
+# endif
+
+ basic_efilebuf()
+ : file_(0)
+ {
+ buf_ = 0;
+ }
+
+ bool _open(char const* file_name, std::ios_base::openmode mode)
+ {
+ if (is_open() || file_) {
+ return false;
+ }
+# if defined(_MSC_VER)
+ const bool success = buf_->open(file_name, mode) != 0;
+# else
+ const std::wstring wstr = Encoding::ToWindowsExtendedPath(file_name);
+ bool success = false;
+ std::wstring cmode = getcmode(mode);
+ file_ = _wfopen(wstr.c_str(), cmode.c_str());
+ if (file_) {
+ if (buf_) {
+ delete buf_;
+ }
+ buf_ = new internal_buffer_type(file_, mode);
+ success = true;
+ }
+# endif
+ return success;
+ }
+
+ bool is_open()
+ {
+ if (!buf_) {
+ return false;
+ }
+ return buf_->is_open();
+ }
+
+ bool is_open() const
+ {
+ if (!buf_) {
+ return false;
+ }
+ return buf_->is_open();
+ }
+
+ bool _close()
+ {
+ bool success = false;
+ if (buf_) {
+ success = buf_->close() != 0;
+# if !defined(_MSC_VER)
+ if (file_) {
+ success = fclose(file_) == 0 ? success : false;
+ file_ = 0;
+ }
+# endif
+ }
+ return success;
+ }
+
+ static void _set_state(bool success, std::basic_ios<CharType, Traits>* ios,
+ basic_efilebuf* efilebuf)
+ {
+# if !defined(_MSC_VER)
+ ios->rdbuf(efilebuf->buf_);
+# else
+ static_cast<void>(efilebuf);
+# endif
+ if (!success) {
+ ios->setstate(std::ios_base::failbit);
+ } else {
+ ios->clear();
+ }
+ }
+
+ ~basic_efilebuf()
+ {
+ if (buf_) {
+ delete buf_;
+ }
+ }
+
+protected:
+ internal_buffer_type* buf_;
+ FILE* file_;
+};
+
+template <typename CharType, typename Traits = std::char_traits<CharType> >
+class basic_ifstream
+ : public std::basic_istream<CharType, Traits>
+ , public basic_efilebuf<CharType, Traits>
+{
+public:
+ typedef typename basic_efilebuf<CharType, Traits>::internal_buffer_type
+ internal_buffer_type;
+ typedef std::basic_istream<CharType, Traits> internal_stream_type;
+
+ basic_ifstream()
+ : internal_stream_type(new internal_buffer_type())
+ {
+ this->buf_ =
+ static_cast<internal_buffer_type*>(internal_stream_type::rdbuf());
+ }
+ explicit basic_ifstream(char const* file_name,
+ std::ios_base::openmode mode = std::ios_base::in)
+ : internal_stream_type(new internal_buffer_type())
+ {
+ this->buf_ =
+ static_cast<internal_buffer_type*>(internal_stream_type::rdbuf());
+ open(file_name, mode);
+ }
+
+ void open(char const* file_name,
+ std::ios_base::openmode mode = std::ios_base::in)
+ {
+ mode = mode | std::ios_base::in;
+ this->_set_state(this->_open(file_name, mode), this, this);
+ }
+
+ void close() { this->_set_state(this->_close(), this, this); }
+
+ using basic_efilebuf<CharType, Traits>::is_open;
+
+ internal_buffer_type* rdbuf() const { return this->buf_; }
+
+ ~basic_ifstream() @KWSYS_NAMESPACE@_FStream_NOEXCEPT { close(); }
+};
+
+template <typename CharType, typename Traits = std::char_traits<CharType> >
+class basic_ofstream
+ : public std::basic_ostream<CharType, Traits>
+ , public basic_efilebuf<CharType, Traits>
+{
+ using basic_efilebuf<CharType, Traits>::is_open;
+
+public:
+ typedef typename basic_efilebuf<CharType, Traits>::internal_buffer_type
+ internal_buffer_type;
+ typedef std::basic_ostream<CharType, Traits> internal_stream_type;
+
+ basic_ofstream()
+ : internal_stream_type(new internal_buffer_type())
+ {
+ this->buf_ =
+ static_cast<internal_buffer_type*>(internal_stream_type::rdbuf());
+ }
+ explicit basic_ofstream(char const* file_name,
+ std::ios_base::openmode mode = std::ios_base::out)
+ : internal_stream_type(new internal_buffer_type())
+ {
+ this->buf_ =
+ static_cast<internal_buffer_type*>(internal_stream_type::rdbuf());
+ open(file_name, mode);
+ }
+ void open(char const* file_name,
+ std::ios_base::openmode mode = std::ios_base::out)
+ {
+ mode = mode | std::ios_base::out;
+ this->_set_state(this->_open(file_name, mode), this, this);
+ }
+
+ void close() { this->_set_state(this->_close(), this, this); }
+
+ internal_buffer_type* rdbuf() const { return this->buf_; }
+
+ ~basic_ofstream() @KWSYS_NAMESPACE@_FStream_NOEXCEPT { close(); }
+};
+
+typedef basic_ifstream<char> ifstream;
+typedef basic_ofstream<char> ofstream;
+
+# undef @KWSYS_NAMESPACE@_FStream_NOEXCEPT
+#else
+using std::ofstream;
+using std::ifstream;
+#endif
+
+namespace FStream {
+enum BOM
+{
+ BOM_None,
+ BOM_UTF8,
+ BOM_UTF16BE,
+ BOM_UTF16LE,
+ BOM_UTF32BE,
+ BOM_UTF32LE
+};
+
+// Read a BOM, if one exists.
+// If a BOM exists, the stream is advanced to after the BOM.
+// This function requires a seekable stream (but not a relative
+// seekable stream).
+@KWSYS_NAMESPACE@_EXPORT BOM ReadBOM(std::istream& in);
+}
+}
+
+#endif
diff --git a/test/API/driver/kwsys/GitSetup/.gitattributes b/test/API/driver/kwsys/GitSetup/.gitattributes
new file mode 100644
index 0000000..e96d1f8
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/.gitattributes
@@ -0,0 +1,6 @@
+.git* export-ignore
+
+config* eol=lf whitespace=indent-with-non-tab
+git-* eol=lf whitespace=indent-with-non-tab
+tips eol=lf whitespace=indent-with-non-tab
+setup-* eol=lf whitespace=indent-with-non-tab
diff --git a/test/API/driver/kwsys/GitSetup/LICENSE b/test/API/driver/kwsys/GitSetup/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/test/API/driver/kwsys/GitSetup/NOTICE b/test/API/driver/kwsys/GitSetup/NOTICE
new file mode 100644
index 0000000..0d32c02
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/NOTICE
@@ -0,0 +1,5 @@
+Kitware Local Git Setup Scripts
+Copyright 2010-2012 Kitware, Inc.
+
+This product includes software developed at Kitware, Inc.
+(http://www.kitware.com/).
diff --git a/test/API/driver/kwsys/GitSetup/README b/test/API/driver/kwsys/GitSetup/README
new file mode 100644
index 0000000..2f9f1ec
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/README
@@ -0,0 +1,87 @@
+Kitware Local Git Setup Scripts
+
+
+Introduction
+------------
+
+This is a collection of local Git development setup scripts meant for
+inclusion in project source trees to aid their development workflow.
+Project-specific information needed by the scripts may be configured
+in a "config" file added next to them in the project.
+
+
+Import
+------
+
+A project may import these scripts into their source tree by
+initializing a subtree merge. Bring up a Git prompt and set the
+current working directory inside a clone of the target project.
+Fetch the "setup" branch from the GitSetup repository:
+
+ $ git fetch ../GitSetup setup:setup
+
+Prepare to merge the branch but place the content in a subdirectory.
+Any prefix (with trailing '/') may be chosen so long as it is used
+consistently within a project through the rest of these instructions:
+
+ $ git merge -s ours --no-commit setup
+ $ git read-tree -u --prefix=Utilities/GitSetup/ setup
+
+Commit the merge with an informative message:
+
+ $ git commit
+ ------------------------------------------------------------------------
+ Merge branch 'setup'
+
+ Add Utilities/GitSetup/ directory using subtree merge from
+ the general GitSetup repository "setup" branch.
+ ------------------------------------------------------------------------
+
+Optionally add to the project ".gitattributes" file the line
+
+ /Utilities/GitSetup export-ignore
+
+to exclude the GitSetup directory from inclusion by "git archive"
+since it does not make sense in source tarballs.
+
+
+Configuration
+-------------
+
+Read the "Project configuration instructions" comment in each script.
+Add a "config" file next to the scripts with desired configuration
+(optionally copy and modify "config.sample"). For example, to
+configure the "setup-hooks" script:
+
+ $ git config -f Utilities/GitSetup/config hooks.url "$url"
+
+where "$url" is the project repository publishing the "hooks" branch.
+When finished, add and commit the configuration file:
+
+ $ git add Utilities/GitSetup/config
+ $ git commit
+
+
+Update
+------
+
+A project may update these scripts from the GitSetup repository.
+Bring up a Git prompt and set the current working directory inside a
+clone of the target project. Fetch the "setup" branch from the
+GitSetup repository:
+
+ $ git fetch ../GitSetup setup:setup
+
+Merge the "setup" branch into the subtree:
+
+ $ git merge -X subtree=Utilities/GitSetup setup
+
+where "Utilities/GitSetup" is the same prefix used during the import
+setup, but without a trailing '/'.
+
+
+License
+-------
+
+Distributed under the Apache License 2.0.
+See LICENSE and NOTICE for details.
diff --git a/test/API/driver/kwsys/GitSetup/config b/test/API/driver/kwsys/GitSetup/config
new file mode 100644
index 0000000..cba4c14
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/config
@@ -0,0 +1,4 @@
+[hooks]
+ url = https://gitlab.kitware.com/utils/gitsetup.git
+[upstream]
+ url = https://gitlab.kitware.com/utils/kwsys.git
diff --git a/test/API/driver/kwsys/GitSetup/config.sample b/test/API/driver/kwsys/GitSetup/config.sample
new file mode 100644
index 0000000..eeb468b
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/config.sample
@@ -0,0 +1,32 @@
+# Kitware Local Git Setup Scripts - Sample Project Configuration
+#
+# Copy to "config" and edit as necessary.
+
+[hooks]
+ url = http://public.kitware.com/GitSetup.git
+ #branch = hooks
+
+[ssh]
+ host = public.kitware.com
+ key = id_git_public
+ request-url = https://www.kitware.com/Admin/SendPassword.cgi
+
+[stage]
+ #url = git://public.kitware.com/stage/Project.git
+ #pushurl = git@public.kitware.com:stage/Project.git
+
+[gerrit]
+ #project = Project
+ site = http://review.source.kitware.com
+ # pushurl placeholder "$username" is literal
+ pushurl = $username@review.source.kitware.com:Project
+
+[upstream]
+ url = git://public.kitware.com/Project.git
+
+[gitlab]
+ host = gitlab.kitware.com
+ group-path = group
+ group-name = Group
+ project-path = project
+ project-name = Project
diff --git a/test/API/driver/kwsys/GitSetup/git-gerrit-push b/test/API/driver/kwsys/GitSetup/git-gerrit-push
new file mode 100644
index 0000000..b46f753
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/git-gerrit-push
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2015 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+USAGE="[<remote>] [--no-topic] [--dry-run] [--]"
+OPTIONS_SPEC=
+SUBDIRECTORY_OK=Yes
+. "$(git --exec-path)/git-sh-setup"
+
+#-----------------------------------------------------------------------------
+
+remote=''
+refspecs=''
+no_topic=''
+dry_run=''
+
+# Parse the command line options.
+while test $# != 0; do
+ case "$1" in
+ --no-topic) no_topic=1 ;;
+ --dry-run) dry_run=--dry-run ;;
+ --) shift; break ;;
+ -*) usage ;;
+ *) test -z "$remote" || usage ; remote="$1" ;;
+ esac
+ shift
+done
+test $# = 0 || usage
+
+# Default remote.
+test -n "$remote" || remote="gerrit"
+
+if test -z "$no_topic"; then
+ # Identify and validate the topic branch name.
+ head="$(git symbolic-ref HEAD)" && topic="${head#refs/heads/}" || topic=''
+ if test -z "$topic" -o "$topic" = "master"; then
+ die 'Please name your topic:
+ git checkout -b descriptive-name'
+ fi
+ # The topic branch will be pushed by name.
+ refspecs="HEAD:refs/for/master/$topic $refspecs"
+fi
+
+# Fetch the current upstream master branch head.
+# This helps computation of a minimal pack to push.
+echo "Fetching $remote master"
+fetch_out=$(git fetch "$remote" master 2>&1) || die "$fetch_out"
+
+# Exit early if we have nothing to push.
+if test -z "$refspecs"; then
+ echo 'Nothing to push!'
+ exit 0
+fi
+
+# Push. Save output and exit code.
+echo "Pushing to $remote"
+push_stdout=$(git push --porcelain $dry_run "$remote" $refspecs); push_exit=$?
+echo "$push_stdout"
+
+# Reproduce the push exit code.
+exit $push_exit
diff --git a/test/API/driver/kwsys/GitSetup/git-gitlab-push b/test/API/driver/kwsys/GitSetup/git-gitlab-push
new file mode 100644
index 0000000..768f853
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/git-gitlab-push
@@ -0,0 +1,177 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2015 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+USAGE='[<remote>] [<options>...] [--]
+
+OPTIONS
+
+--dry-run
+ Show what would be pushed without actually updating the destination
+
+-f,--force
+ Force-push the topic HEAD to rewrite the destination branch
+
+--no-default
+ Do not push the default branch (e.g. master)
+
+--no-topic
+ Do not push the topic HEAD.
+'
+OPTIONS_SPEC=
+SUBDIRECTORY_OK=Yes
+. "$(git --exec-path)/git-sh-setup"
+
+egrep-q() {
+ egrep "$@" >/dev/null 2>/dev/null
+}
+
+# Load the project configuration.
+gitlab_upstream='' &&
+gitlab_configured='' &&
+config="${BASH_SOURCE%/*}/config" &&
+protocol=$(git config -f "$config" --get gitlab.protocol ||
+ echo "https") &&
+host=$(git config -f "$config" --get gitlab.host) &&
+site=$(git config -f "$config" --get gitlab.site ||
+ echo "$protocol://$host") &&
+group_path=$(git config -f "$config" --get gitlab.group-path) &&
+project_path=$(git config -f "$config" --get gitlab.project-path) &&
+gitlab_upstream="$site/$group_path/$project_path.git" &&
+gitlab_pushurl=$(git config --get remote.gitlab.pushurl ||
+ git config --get remote.gitlab.url) &&
+gitlab_configured=1
+
+#-----------------------------------------------------------------------------
+
+remote=''
+refspecs=''
+force=''
+lease=false
+lease_flag=''
+no_topic=''
+no_default=''
+dry_run=''
+
+# Parse the command line options.
+while test $# != 0; do
+ case "$1" in
+ -f|--force) force='+'; lease=true ;;
+ --no-topic) no_topic=1 ;;
+ --dry-run) dry_run=--dry-run ;;
+ --no-default) no_default=1 ;;
+ --) shift; break ;;
+ -*) usage ;;
+ *) test -z "$remote" || usage ; remote="$1" ;;
+ esac
+ shift
+done
+test $# = 0 || usage
+
+# Default remote.
+test -n "$remote" || remote="gitlab"
+
+if test -z "$no_topic"; then
+ # Identify and validate the topic branch name.
+ head="$(git symbolic-ref HEAD)" && topic="${head#refs/heads/}" || topic=''
+ if test -z "$topic" -o "$topic" = "master"; then
+ die 'Please name your topic:
+ git checkout -b descriptive-name'
+ fi
+
+ if $lease; then
+ have_ref=false
+ remoteref="refs/remotes/$remote/$topic"
+ if git rev-parse --verify -q "$remoteref"; then
+ have_ref=true
+ else
+ die "It seems that a local ref for the branch is
+missing; forcing a push is dangerous and may overwrite
+previous work. Fetch from the $remote remote first or
+push without '-f' or '--force'."
+ fi
+
+ have_lease_flag=false
+ if git push -h | egrep-q -e '--force-with-lease'; then
+ have_lease_flag=true
+ fi
+
+ if $have_lease_flag && $have_ref; then
+ # Set the lease flag.
+ lease_flag="--force-with-lease=$topic:$remoteref"
+ # Clear the force string.
+ force=''
+ fi
+ fi
+
+ # The topic branch will be pushed by name.
+ refspecs="${force}HEAD:refs/heads/$topic $refspecs"
+fi
+
+# Fetch the current remote master branch head.
+# This helps computation of a minimal pack to push.
+echo "Fetching $remote master"
+fetch_out=$(git fetch "$remote" master 2>&1) || die "$fetch_out"
+gitlab_head=$(git rev-parse FETCH_HEAD) || exit
+
+# Fetch the current upstream master branch head.
+if origin_fetchurl=$(git config --get remote.origin.url) &&
+ test "$origin_fetchurl" = "$gitlab_upstream"; then
+ upstream_remote='origin'
+else
+ upstream_remote="$gitlab_upstream"
+fi
+echo "Fetching $upstream_remote master"
+fetch_out=$(git fetch "$upstream_remote" master 2>&1) || die "$fetch_out"
+upstream_head=$(git rev-parse FETCH_HEAD) || exit
+
+# Add a refspec to keep the remote master up to date if possible.
+if test -z "$no_default" &&
+ base=$(git merge-base "$gitlab_head" "$upstream_head") &&
+ test "$base" = "$gitlab_head"; then
+ refspecs="$upstream_head:refs/heads/master $refspecs"
+fi
+
+# Exit early if we have nothing to push.
+if test -z "$refspecs"; then
+ echo 'Nothing to push!'
+ exit 0
+fi
+
+# Push. Save output and exit code.
+echo "Pushing to $remote"
+push_config='-c advice.pushUpdateRejected=false'
+push_stdout=$(git $push_config push $lease_flag --porcelain $dry_run "$remote" $refspecs); push_exit=$?
+echo "$push_stdout"
+
+if test "$push_exit" -ne 0 && test -z "$force"; then
+ # Advise the user to fetch if needed.
+ if echo "$push_stdout" | egrep-q 'stale info'; then
+ echo "
+You have pushed to your branch from another machine; you may be overwriting
+commits unintentionally. Fetch from the $remote remote and check that you are
+not pushing an outdated branch."
+ fi
+
+ # Advise the user to force-push if needed.
+ if echo "$push_stdout" | egrep-q 'non-fast-forward'; then
+ echo '
+Add "-f" or "--force" to push a rewritten topic.'
+ fi
+fi
+
+# Reproduce the push exit code.
+exit $push_exit
diff --git a/test/API/driver/kwsys/GitSetup/pre-commit b/test/API/driver/kwsys/GitSetup/pre-commit
new file mode 100644
index 0000000..1f1d3f5
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/pre-commit
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+egrep-q() {
+ egrep "$@" >/dev/null 2>/dev/null
+}
+
+die() {
+ echo 'pre-commit hook failure' 1>&2
+ echo '-----------------------' 1>&2
+ echo '' 1>&2
+ echo "$@" 1>&2
+ exit 1
+}
+
+#-----------------------------------------------------------------------------
+
+# Check that developmer setup is up-to-date.
+lastSetupForDevelopment=$(git config --get hooks.SetupForDevelopment || echo 0)
+eval $(grep '^SetupForDevelopment_VERSION=' "${BASH_SOURCE%/*}/../SetupForDevelopment.sh")
+test -n "$SetupForDevelopment_VERSION" || SetupForDevelopment_VERSION=0
+if test $lastSetupForDevelopment -lt $SetupForDevelopment_VERSION; then
+ die 'Developer setup in this work tree is out of date. Please re-run
+
+ ./SetupForDevelopment.sh
+'
+fi
diff --git a/test/API/driver/kwsys/GitSetup/setup-aliases b/test/API/driver/kwsys/GitSetup/setup-aliases
new file mode 100644
index 0000000..98810ad
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/setup-aliases
@@ -0,0 +1,6 @@
+#!/usr/bin/env bash
+echo "Adding 'git prepush' alias" &&
+git config alias.prepush 'log --graph --stat origin/master..' &&
+gerrit_disabled="KWSys no longer uses Gerrit. Please use GitLab." &&
+git config alias.gerrit-push '!sh -c "echo '"${gerrit_disabled}"'"' &&
+true
diff --git a/test/API/driver/kwsys/GitSetup/setup-gerrit b/test/API/driver/kwsys/GitSetup/setup-gerrit
new file mode 100644
index 0000000..6d46e3c
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/setup-gerrit
@@ -0,0 +1,147 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2012 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+# Run this script to set up the local Git repository to push to
+# a Gerrit Code Review instance for this project.
+
+# Project configuration instructions:
+#
+# - Run a Gerrit Code Review server
+#
+# - Populate adjacent "config" file with:
+# gerrit.site = Top Gerrit URL (not project-specific)
+# gerrit.project = Name of project in Gerrit
+# gerrit.pushurl = Review site push URL with "$username" placeholder
+# gerrit.remote = Gerrit remote name, if not "gerrit"
+# gerrit.url = Gerrit project URL, if not "$site/p/$project"
+# optionally with "$username" placeholder
+
+die() {
+ echo 1>&2 "$@" ; exit 1
+}
+
+# Make sure we are inside the repository.
+cd "${BASH_SOURCE%/*}" &&
+
+# Load the project configuration.
+site=$(git config -f config --get gerrit.site) &&
+project=$(git config -f config --get gerrit.project) &&
+remote=$(git config -f config --get gerrit.remote ||
+ echo "gerrit") &&
+fetchurl_=$(git config -f config --get gerrit.url ||
+ echo "$site/p/$project") &&
+pushurl_=$(git config -f config --get gerrit.pushurl ||
+ git config -f config --get gerrit.url) ||
+die 'This project is not configured to use Gerrit.'
+
+# Get current gerrit push URL.
+pushurl=$(git config --get remote."$remote".pushurl ||
+ git config --get remote."$remote".url || echo '') &&
+
+# Tell user about current configuration.
+if test -n "$pushurl"; then
+ echo 'Remote "'"$remote"'" is currently configured to push to
+
+ '"$pushurl"'
+' &&
+ read -ep 'Reconfigure Gerrit? [y/N]: ' ans &&
+ if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then
+ setup=1
+ else
+ setup=''
+ fi
+else
+ echo 'Remote "'"$remote"'" is not yet configured.
+
+'"$project"' changes must be pushed to our Gerrit Code Review site:
+
+ '"$site/p/$project"'
+
+Register a Gerrit account and select a username (used below).
+You will need an OpenID:
+
+ http://openid.net/get-an-openid/
+' &&
+ read -ep 'Configure Gerrit? [Y/n]: ' ans &&
+ if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then
+ exit 0
+ else
+ setup=1
+ fi
+fi &&
+
+# Perform setup if necessary.
+if test -n "$setup"; then
+ echo 'Sign-in to Gerrit to get/set your username at
+
+ '"$site"'/#/settings
+
+Add your SSH public keys at
+
+ '"$site"'/#/settings/ssh-keys
+' &&
+ read -ep "Gerrit username? [$USER]: " gu &&
+ if test -z "$gu"; then
+ gu="$USER"
+ fi &&
+ fetchurl="${fetchurl_/\$username/$gu}" &&
+ if test -z "$pushurl"; then
+ git remote add "$remote" "$fetchurl"
+ else
+ git config remote."$remote".url "$fetchurl"
+ fi &&
+ pushurl="${pushurl_/\$username/$gu}" &&
+ if test "$pushurl" != "$fetchurl"; then
+ git config remote."$remote".pushurl "$pushurl"
+ fi &&
+ echo 'Remote "'"$remote"'" is now configured to push to
+
+ '"$pushurl"'
+'
+fi &&
+
+# Optionally test Gerrit access.
+if test -n "$pushurl"; then
+ read -ep 'Test access to Gerrit (SSH)? [y/N]: ' ans &&
+ if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then
+ echo -n 'Testing Gerrit access by SSH...'
+ if git ls-remote --heads "$pushurl" >/dev/null; then
+ echo 'passed.'
+ else
+ echo 'failed.' &&
+ die 'Could not access Gerrit. Add your SSH public keys at
+
+ '"$site"'/#/settings/ssh-keys
+'
+ fi
+ fi
+fi &&
+
+# Set up GerritId hook.
+hook=$(git config --get hooks.GerritId || echo '') &&
+if test -z "$hook"; then
+ echo '
+Enabling GerritId hook to add a "Change-Id" footer to commit
+messages for interaction with Gerrit. Run
+
+ git config hooks.GerritId false
+
+to disable this feature (but you will be on your own).' &&
+ git config hooks.GerritId true
+else
+ echo 'GerritId hook already configured to "'"$hook"'".'
+fi
diff --git a/test/API/driver/kwsys/GitSetup/setup-gitlab b/test/API/driver/kwsys/GitSetup/setup-gitlab
new file mode 100644
index 0000000..9c7574d
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/setup-gitlab
@@ -0,0 +1,140 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2015 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+# Run this script to set up the local Git repository to push to
+# a personal fork for this project in GitLab.
+
+# Project configuration instructions:
+#
+# - Run a GitLab server
+#
+# - Populate adjacent "config" file with:
+# gitlab.protocol = Top GitLab protocol, if not 'https'
+# gitlab.host = Top GitLab fully qualified host name
+# gitlab.site = Top GitLab URL, if not "<protocol>://<host>"
+# gitlab.group-name = Name of group containing project in GitLab
+# gitlab.group-path = Path of group containing project in GitLab
+# gitlab.project-name = Name of project within GitLab group
+# gitlab.project-path = Path of project within GitLab group
+# gitlab.url = GitLab push URL with "$username" placeholder,
+# if not "<site>/$username/<project-path>.git"
+# gitlab.pushurl = GitLab push URL with "$username" placeholder,
+# if not "git@<host>:$username/<project-path>.git"
+# gitlab.remote = GitLab remote name, if not "gitlab"
+
+die() {
+ echo 1>&2 "$@" ; exit 1
+}
+
+# Make sure we are inside the repository.
+cd "${BASH_SOURCE%/*}" &&
+
+# Load the project configuration.
+protocol=$(git config -f config --get gitlab.protocol ||
+ echo "https") &&
+host=$(git config -f config --get gitlab.host) &&
+site=$(git config -f config --get gitlab.site ||
+ echo "$protocol://$host") &&
+group_path=$(git config -f config --get gitlab.group-path) &&
+group_name=$(git config -f config --get gitlab.group-name) &&
+project_name=$(git config -f config --get gitlab.project-name) &&
+project_path=$(git config -f config --get gitlab.project-path) &&
+pushurl_=$(git config -f config --get gitlab.pushurl ||
+ echo "git@$host:\$username/$project_path.git") &&
+remote=$(git config -f config --get gitlab.remote ||
+ echo "gitlab") &&
+fetchurl_=$(git config -f config --get gitlab.url ||
+ echo "$site/\$username/$project_path.git") ||
+die 'This project is not configured to use GitLab.'
+
+# Get current gitlab push URL.
+pushurl=$(git config --get remote."$remote".pushurl ||
+ git config --get remote."$remote".url || echo '') &&
+
+# Tell user about current configuration.
+if test -n "$pushurl"; then
+ echo 'Remote "'"$remote"'" is currently configured to push to
+
+ '"$pushurl"'
+' &&
+ read -ep 'Reconfigure GitLab? [y/N]: ' ans &&
+ if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then
+ setup=1
+ else
+ setup=''
+ fi
+else
+ echo 'Remote "'"$remote"'" is not yet configured.
+' &&
+ read -ep 'Configure GitLab to contribute to '"$project_name"'? [Y/n]: ' ans &&
+ if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then
+ exit 0
+ else
+ setup=1
+ fi
+fi &&
+
+setup_instructions='Add your SSH public keys at
+
+ '"$site"'/profile/keys
+
+Then visit the main repository at:
+
+ '"$site/$group_path/$project_path"'
+
+and use the Fork button in the upper right.
+'
+
+# Perform setup if necessary.
+if test -n "$setup"; then
+ echo 'Sign-in to GitLab to get/set your username at
+
+ '"$site/profile/account"'
+
+'"$setup_instructions" &&
+ read -ep "GitLab username? [$USER]: " gu &&
+ if test -z "$gu"; then
+ gu="$USER"
+ fi &&
+ fetchurl="${fetchurl_/\$username/$gu}" &&
+ if test -z "$pushurl"; then
+ git remote add "$remote" "$fetchurl"
+ else
+ git config remote."$remote".url "$fetchurl"
+ fi &&
+ pushurl="${pushurl_/\$username/$gu}" &&
+ git config remote."$remote".pushurl "$pushurl" &&
+ echo 'Remote "'"$remote"'" is now configured to push to
+
+ '"$pushurl"'
+'
+fi &&
+
+# Optionally test GitLab access.
+if test -n "$pushurl"; then
+ read -ep 'Test access to GitLab (SSH)? [y/N]: ' ans &&
+ if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then
+ echo -n 'Testing GitLab access by SSH...'
+ if git ls-remote --heads "$pushurl" >/dev/null; then
+ echo 'passed.'
+ else
+ echo 'failed.' &&
+ die 'Could not access your GitLab fork of this project.
+'"$setup_instructions"
+ fi
+ fi
+fi
diff --git a/test/API/driver/kwsys/GitSetup/setup-hooks b/test/API/driver/kwsys/GitSetup/setup-hooks
new file mode 100644
index 0000000..ca07712
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/setup-hooks
@@ -0,0 +1,64 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2012 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+# Run this script to set up local Git hooks for this project.
+
+# Project configuration instructions:
+#
+# - Publish a "hooks" branch in the project repository such that
+# clones will have "refs/remotes/origin/hooks".
+#
+# - Populate adjacent "config" file with:
+# hooks.url = Repository URL publishing "hooks" branch
+# hooks.branch = Repository branch instead of "hooks"
+
+egrep-q() {
+ egrep "$@" >/dev/null 2>/dev/null
+}
+
+die() {
+ echo 1>&2 "$@" ; exit 1
+}
+
+# Make sure we are inside the repository.
+cd "${BASH_SOURCE%/*}" &&
+
+# Select a hooks branch.
+if url=$(git config --get hooks.url); then
+ # Fetch hooks from locally configured repository.
+ branch=$(git config hooks.branch || echo hooks)
+elif git for-each-ref refs/remotes/origin/hooks 2>/dev/null |
+ egrep-q 'refs/remotes/origin/hooks$'; then
+ # Use hooks cloned from origin.
+ url=.. && branch=remotes/origin/hooks
+elif url=$(git config -f config --get hooks.url); then
+ # Fetch hooks from project-configured repository.
+ branch=$(git config -f config hooks.branch || echo hooks)
+else
+ die 'This project is not configured to install local hooks.'
+fi &&
+
+# Populate ".git/hooks".
+echo 'Setting up git hooks...' &&
+git_dir=$(git rev-parse --git-dir) &&
+mkdir -p "$git_dir/hooks" &&
+cd "$git_dir/hooks" &&
+if ! test -e .git; then
+ git init -q || die 'Could not run git init for hooks.'
+fi &&
+git fetch -q "$url" "$branch" &&
+git reset -q --hard FETCH_HEAD || die 'Failed to install hooks'
diff --git a/test/API/driver/kwsys/GitSetup/setup-ssh b/test/API/driver/kwsys/GitSetup/setup-ssh
new file mode 100644
index 0000000..8920a5b
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/setup-ssh
@@ -0,0 +1,111 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2012 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+# Run this script to set up ssh push access to the repository host.
+
+# Project configuration instructions:
+#
+# - Populate adjacent "config" file with:
+# ssh.host = Repository host name
+# ssh.user = Username on host, if not "git"
+# ssh.key = Local ssh key name
+# ssh.request-url = Web page URL to request ssh access
+
+egrep-q() {
+ egrep "$@" >/dev/null 2>/dev/null
+}
+
+die() {
+ echo 1>&2 "$@" ; exit 1
+}
+
+# Make sure we are inside the repository.
+cd "${BASH_SOURCE%/*}" &&
+
+# Load the project configuration.
+host=$(git config -f config --get ssh.host) &&
+user=$(git config -f config --get ssh.user || echo git) &&
+key=$(git config -f config --get ssh.key) &&
+request_url=$(git config -f config --get ssh.request-url) ||
+die 'This project is not configured for ssh push access.'
+
+# Check for existing configuration.
+if test -r ~/.ssh/config &&
+ egrep-q 'Host[= ]'"${host//\./\\.}" ~/.ssh/config; then
+ echo 'Host "'"$host"'" is already in ~/.ssh/config' &&
+ setup= &&
+ question='Test'
+else
+ echo 'Host "'"$host"'" not found in ~/.ssh/config' &&
+ setup=1 &&
+ question='Setup and test'
+fi &&
+
+# Ask the user whether to make changes.
+echo '' &&
+read -ep "${question} push access by ssh to $user@$host? [y/N]: " access &&
+if test "$access" != "y" -a "$access" != "Y"; then
+ exit 0
+fi &&
+
+# Setup host configuration if necessary.
+if test -n "$setup"; then
+ if ! test -d ~/.ssh; then
+ mkdir -p ~/.ssh &&
+ chmod 700 ~/.ssh
+ fi &&
+ if ! test -f ~/.ssh/config; then
+ touch ~/.ssh/config &&
+ chmod 600 ~/.ssh/config
+ fi &&
+ ssh_config='Host='"$host"'
+ IdentityFile ~/.ssh/'"$key" &&
+ echo "Adding to ~/.ssh/config:
+
+$ssh_config
+" &&
+ echo "$ssh_config" >> ~/.ssh/config &&
+ if ! test -e ~/.ssh/"$key"; then
+ if test -f ~/.ssh/id_rsa; then
+ # Take care of the common case.
+ ln -s id_rsa ~/.ssh/"$key"
+ echo '
+Assuming ~/.ssh/id_rsa is the private key corresponding to the public key for
+
+ '"$user@$host"'
+
+If this is incorrect place private key at "~/.ssh/'"$key"'".'
+ else
+ echo '
+Place the private key corresponding to the public key registered for
+
+ '"$user@$host"'
+
+at "~/.ssh/'"$key"'".'
+ fi
+ read -e -n 1 -p 'Press any key to continue...'
+ fi
+fi || exit 1
+
+# Test access configuration.
+echo 'Testing ssh push access to "'"$user@$host"'"...' &&
+if ! ssh "$user@$host" info; then
+ die 'No ssh push access to "'"$user@$host"'". You may need to request access at
+
+ '"$request_url"'
+'
+fi
diff --git a/test/API/driver/kwsys/GitSetup/setup-stage b/test/API/driver/kwsys/GitSetup/setup-stage
new file mode 100644
index 0000000..ce6ec45
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/setup-stage
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2012 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+# Run this script to set up the topic stage for pushing changes.
+
+# Project configuration instructions:
+#
+# - Run a Topic Stage repository next to the main project repository.
+#
+# - Populate adjacent "config" file with:
+# stage.url = Topic Stage repository URL
+# stage.pushurl = Topic Stage push URL if not "$url"
+
+egrep-q() {
+ egrep "$@" >/dev/null 2>/dev/null
+}
+
+die() {
+ echo 1>&2 "$@" ; exit 1
+}
+
+# Make sure we are inside the repository.
+cd "${BASH_SOURCE%/*}" &&
+
+# Load the project configuration.
+fetchurl_=$(git config -f config --get stage.url) &&
+pushurl_=$(git config -f config --get stage.pushurl || echo "$fetchurl_") &&
+remote=$(git config -f config --get stage.remote || echo 'stage') ||
+die 'This project is not configured to use a topic stage.'
+
+# Get current stage push URL.
+pushurl=$(git config --get remote."$remote".pushurl ||
+ git config --get remote."$remote".url || echo '') &&
+
+# Tell user about current configuration.
+if test -n "$pushurl"; then
+ echo 'Remote "'"$remote"'" is currently configured to push to
+
+ '"$pushurl"'
+' &&
+ read -ep 'Reconfigure Topic Stage? [y/N]: ' ans &&
+ if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then
+ setup=1
+ else
+ setup=''
+ fi
+else
+ setup=1
+fi
+
+# Perform setup if necessary.
+if test -n "$setup"; then
+ echo 'Setting up the topic stage...' &&
+ fetchurl="${fetchurl_}" &&
+ if test -z "$pushurl"; then
+ git remote add "$remote" "$fetchurl"
+ else
+ git config remote."$remote".url "$fetchurl"
+ fi &&
+ pushurl="${pushurl_}" &&
+ if test "$pushurl" != "$fetchurl"; then
+ git config remote."$remote".pushurl "$pushurl"
+ fi &&
+ echo 'Remote "'"$remote"'" is now configured to push to
+
+ '"$pushurl"'
+'
+fi || die 'Could not configure the topic stage remote.'
diff --git a/test/API/driver/kwsys/GitSetup/setup-upstream b/test/API/driver/kwsys/GitSetup/setup-upstream
new file mode 100644
index 0000000..92ce1da
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/setup-upstream
@@ -0,0 +1,104 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2015 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+# Run this script to set up the local Git repository to use the
+# preferred upstream repository URLs.
+
+# Project configuration instructions:
+#
+# - Populate adjacent "config" file with:
+# upstream.url = Preferred fetch url for upstream remote
+# upstream.remote = Preferred name for upstream remote, if not "origin"
+
+die() {
+ echo 1>&2 "$@" ; exit 1
+}
+
+# Make sure we are inside the repository.
+cd "${BASH_SOURCE%/*}" &&
+
+# Load the project configuration.
+url=$(git config -f config --get upstream.url) &&
+remote=$(git config -f config --get upstream.remote ||
+ echo 'origin') ||
+die 'This project is not configured to use a preferred upstream repository.'
+
+# Get current upstream URLs.
+fetchurl=$(git config --get remote."$remote".url || echo '') &&
+pushurl=$(git config --get remote."$remote".pushurl || echo '') &&
+
+if test "$fetchurl" = "$url"; then
+ echo 'Remote "'"$remote"'" already uses recommended upstream repository.'
+ exit 0
+fi
+
+upstream_recommend='
+We recommended configuring the "'"$remote"'" remote to fetch from upstream at
+
+ '"$url"'
+'
+
+# Tell user about current configuration.
+if test -n "$fetchurl"; then
+ echo 'Remote "'"$remote"'" is currently configured to fetch from
+
+ '"$fetchurl"'
+' &&
+ if test -n "$pushurl"; then
+ echo 'and push to
+
+ '"$pushurl"
+ fi &&
+ echo "$upstream_recommend" &&
+ if test -n "$pushurl"; then
+ echo 'and to never push to it directly.
+'
+ fi &&
+
+ read -ep 'Reconfigure "'"$remote"'" remote as recommended? [y/N]: ' ans &&
+ if [ "$ans" == "y" ] || [ "$ans" == "Y" ]; then
+ setup=1
+ else
+ setup=''
+ fi
+else
+ echo 'Remote "'"$remote"'" is not yet configured.' &&
+ echo "$upstream_recommend" &&
+ read -ep 'Configure "'"$remote"'" remote as recommended? [Y/n]: ' ans &&
+ if [ "$ans" == "n" ] || [ "$ans" == "N" ]; then
+ exit 0
+ else
+ setup=1
+ fi
+fi &&
+
+# Perform setup if necessary.
+if test -n "$setup"; then
+ if test -z "$fetchurl"; then
+ git remote add "$remote" "$url"
+ else
+ git config remote."$remote".url "$url" &&
+ if old=$(git config --get remote."$remote".pushurl); then
+ git config --unset remote."$remote".pushurl ||
+ echo 'Warning: failed to unset remote.'"$remote"'.pushurl'
+ fi
+ fi &&
+ echo 'Remote "'"$remote"'" is now configured to fetch from
+
+ '"$url"'
+'
+fi
diff --git a/test/API/driver/kwsys/GitSetup/setup-user b/test/API/driver/kwsys/GitSetup/setup-user
new file mode 100644
index 0000000..1af439c
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/setup-user
@@ -0,0 +1,39 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2012 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+# Run this script to configure Git user info in this repository.
+
+# Project configuration instructions: NONE
+
+for (( ; ; )); do
+ user_name=$(git config user.name || echo '') &&
+ user_email=$(git config user.email || echo '') &&
+ if test -n "$user_name" -a -n "$user_email"; then
+ echo 'Your commits will record as Author:
+
+ '"$user_name <$user_email>"'
+' &&
+ read -ep 'Is the author name and email address above correct? [Y/n] ' correct &&
+ if test "$correct" != "n" -a "$correct" != "N"; then
+ break
+ fi
+ fi &&
+ read -ep 'Enter your full name e.g. "John Doe": ' name &&
+ read -ep 'Enter your email address e.g. "john@gmail.com": ' email &&
+ git config user.name "$name" &&
+ git config user.email "$email"
+done
diff --git a/test/API/driver/kwsys/GitSetup/tips b/test/API/driver/kwsys/GitSetup/tips
new file mode 100644
index 0000000..784e1ed
--- /dev/null
+++ b/test/API/driver/kwsys/GitSetup/tips
@@ -0,0 +1,55 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2010-2012 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+# This script makes optional suggestions for working with Git.
+
+# Project configuration instructions: NONE
+
+egrep-q() {
+ egrep "$@" >/dev/null 2>/dev/null
+}
+
+# Suggest color configuration.
+if test -z "$(git config --get color.ui)"; then
+ echo '
+One may enable color output from Git commands with
+
+ git config --global color.ui auto
+'
+fi
+
+# Suggest bash completion.
+if ! bash -i -c 'echo $PS1' | egrep-q '__git_ps1'; then
+ echo '
+A dynamic, informative Git shell prompt can be obtained by sourcing
+the git bash-completion script in your "~/.bashrc". Set the PS1
+environmental variable as suggested in the comments at the top of the
+bash-completion script. You may need to install the bash-completion
+package from your distribution to obtain it.
+'
+fi
+
+# Suggest merge tool.
+if test -z "$(git config --get merge.tool)"; then
+ echo '
+One may configure Git to load a merge tool with
+
+ git config merge.tool <toolname>
+
+See "git help mergetool" for more information.
+'
+fi
diff --git a/test/API/driver/kwsys/Glob.cxx b/test/API/driver/kwsys/Glob.cxx
new file mode 100644
index 0000000..34bb0d0
--- /dev/null
+++ b/test/API/driver/kwsys/Glob.cxx
@@ -0,0 +1,448 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Glob.hxx)
+
+#include KWSYS_HEADER(Configure.hxx)
+
+#include KWSYS_HEADER(RegularExpression.hxx)
+#include KWSYS_HEADER(SystemTools.hxx)
+#include KWSYS_HEADER(Directory.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Configure.hxx.in"
+# include "Directory.hxx.in"
+# include "Glob.hxx.in"
+# include "RegularExpression.hxx.in"
+# include "SystemTools.hxx.in"
+#endif
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include <ctype.h>
+#include <stdio.h>
+#include <string.h>
+namespace KWSYS_NAMESPACE {
+#if defined(_WIN32) || defined(__APPLE__) || defined(__CYGWIN__)
+// On Windows and Apple, no difference between lower and upper case
+# define KWSYS_GLOB_CASE_INDEPENDENT
+#endif
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+// Handle network paths
+# define KWSYS_GLOB_SUPPORT_NETWORK_PATHS
+#endif
+
+class GlobInternals
+{
+public:
+ std::vector<std::string> Files;
+ std::vector<kwsys::RegularExpression> Expressions;
+};
+
+Glob::Glob()
+{
+ this->Internals = new GlobInternals;
+ this->Recurse = false;
+ this->Relative = "";
+
+ this->RecurseThroughSymlinks = true;
+ // RecurseThroughSymlinks is true by default for backwards compatibility,
+ // not because it's a good idea...
+ this->FollowedSymlinkCount = 0;
+
+ // Keep separate variables for directory listing for back compatibility
+ this->ListDirs = true;
+ this->RecurseListDirs = false;
+}
+
+Glob::~Glob()
+{
+ delete this->Internals;
+}
+
+std::vector<std::string>& Glob::GetFiles()
+{
+ return this->Internals->Files;
+}
+
+std::string Glob::PatternToRegex(const std::string& pattern,
+ bool require_whole_string, bool preserve_case)
+{
+ // Incrementally build the regular expression from the pattern.
+ std::string regex = require_whole_string ? "^" : "";
+ std::string::const_iterator pattern_first = pattern.begin();
+ std::string::const_iterator pattern_last = pattern.end();
+ for (std::string::const_iterator i = pattern_first; i != pattern_last; ++i) {
+ int c = *i;
+ if (c == '*') {
+ // A '*' (not between brackets) matches any string.
+ // We modify this to not match slashes since the original glob
+ // pattern documentation was meant for matching file name
+ // components separated by slashes.
+ regex += "[^/]*";
+ } else if (c == '?') {
+ // A '?' (not between brackets) matches any single character.
+ // We modify this to not match slashes since the original glob
+ // pattern documentation was meant for matching file name
+ // components separated by slashes.
+ regex += "[^/]";
+ } else if (c == '[') {
+ // Parse out the bracket expression. It begins just after the
+ // opening character.
+ std::string::const_iterator bracket_first = i + 1;
+ std::string::const_iterator bracket_last = bracket_first;
+
+ // The first character may be complementation '!' or '^'.
+ if (bracket_last != pattern_last &&
+ (*bracket_last == '!' || *bracket_last == '^')) {
+ ++bracket_last;
+ }
+
+ // If the next character is a ']' it is included in the brackets
+ // because the bracket string may not be empty.
+ if (bracket_last != pattern_last && *bracket_last == ']') {
+ ++bracket_last;
+ }
+
+ // Search for the closing ']'.
+ while (bracket_last != pattern_last && *bracket_last != ']') {
+ ++bracket_last;
+ }
+
+ // Check whether we have a complete bracket string.
+ if (bracket_last == pattern_last) {
+ // The bracket string did not end, so it was opened simply by
+ // a '[' that is supposed to be matched literally.
+ regex += "\\[";
+ } else {
+ // Convert the bracket string to its regex equivalent.
+ std::string::const_iterator k = bracket_first;
+
+ // Open the regex block.
+ regex += "[";
+
+ // A regex range complement uses '^' instead of '!'.
+ if (k != bracket_last && *k == '!') {
+ regex += "^";
+ ++k;
+ }
+
+ // Convert the remaining characters.
+ for (; k != bracket_last; ++k) {
+ // Backslashes must be escaped.
+ if (*k == '\\') {
+ regex += "\\";
+ }
+
+ // Store this character.
+ regex += *k;
+ }
+
+ // Close the regex block.
+ regex += "]";
+
+ // Jump to the end of the bracket string.
+ i = bracket_last;
+ }
+ } else {
+ // A single character matches itself.
+ int ch = c;
+ if (!(('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||
+ ('0' <= ch && ch <= '9'))) {
+ // Escape the non-alphanumeric character.
+ regex += "\\";
+ }
+#if defined(KWSYS_GLOB_CASE_INDEPENDENT)
+ else {
+ // On case-insensitive systems file names are converted to lower
+ // case before matching.
+ if (!preserve_case) {
+ ch = tolower(ch);
+ }
+ }
+#endif
+ (void)preserve_case;
+ // Store the character.
+ regex.append(1, static_cast<char>(ch));
+ }
+ }
+
+ if (require_whole_string) {
+ regex += "$";
+ }
+ return regex;
+}
+
+bool Glob::RecurseDirectory(std::string::size_type start,
+ const std::string& dir, GlobMessages* messages)
+{
+ kwsys::Directory d;
+ if (!d.Load(dir)) {
+ return true;
+ }
+ unsigned long cc;
+ std::string realname;
+ std::string fname;
+ for (cc = 0; cc < d.GetNumberOfFiles(); cc++) {
+ fname = d.GetFile(cc);
+ if (fname == "." || fname == "..") {
+ continue;
+ }
+
+ if (start == 0) {
+ realname = dir + fname;
+ } else {
+ realname = dir + "/" + fname;
+ }
+
+#if defined(KWSYS_GLOB_CASE_INDEPENDENT)
+ // On Windows and Apple, no difference between lower and upper case
+ fname = kwsys::SystemTools::LowerCase(fname);
+#endif
+
+ bool isDir = kwsys::SystemTools::FileIsDirectory(realname);
+ bool isSymLink = kwsys::SystemTools::FileIsSymlink(realname);
+
+ if (isDir && (!isSymLink || this->RecurseThroughSymlinks)) {
+ if (isSymLink) {
+ ++this->FollowedSymlinkCount;
+ std::string realPathErrorMessage;
+ std::string canonicalPath(
+ SystemTools::GetRealPath(dir, &realPathErrorMessage));
+
+ if (!realPathErrorMessage.empty()) {
+ if (messages) {
+ messages->push_back(
+ Message(Glob::error,
+ "Canonical path generation from path '" + dir +
+ "' failed! Reason: '" + realPathErrorMessage + "'"));
+ }
+ return false;
+ }
+
+ if (std::find(this->VisitedSymlinks.begin(),
+ this->VisitedSymlinks.end(),
+ canonicalPath) == this->VisitedSymlinks.end()) {
+ if (this->RecurseListDirs) {
+ // symlinks are treated as directories
+ this->AddFile(this->Internals->Files, realname);
+ }
+
+ this->VisitedSymlinks.push_back(canonicalPath);
+ if (!this->RecurseDirectory(start + 1, realname, messages)) {
+ this->VisitedSymlinks.pop_back();
+
+ return false;
+ }
+ this->VisitedSymlinks.pop_back();
+ }
+ // else we have already visited this symlink - prevent cyclic recursion
+ else if (messages) {
+ std::string message;
+ for (std::vector<std::string>::const_iterator pathIt =
+ std::find(this->VisitedSymlinks.begin(),
+ this->VisitedSymlinks.end(), canonicalPath);
+ pathIt != this->VisitedSymlinks.end(); ++pathIt) {
+ message += *pathIt + "\n";
+ }
+ message += canonicalPath + "/" + fname;
+ messages->push_back(Message(Glob::cyclicRecursion, message));
+ }
+ } else {
+ if (this->RecurseListDirs) {
+ this->AddFile(this->Internals->Files, realname);
+ }
+ if (!this->RecurseDirectory(start + 1, realname, messages)) {
+ return false;
+ }
+ }
+ } else {
+ if (!this->Internals->Expressions.empty() &&
+ this->Internals->Expressions.back().find(fname)) {
+ this->AddFile(this->Internals->Files, realname);
+ }
+ }
+ }
+
+ return true;
+}
+
+void Glob::ProcessDirectory(std::string::size_type start,
+ const std::string& dir, GlobMessages* messages)
+{
+ // std::cout << "ProcessDirectory: " << dir << std::endl;
+ bool last = (start == this->Internals->Expressions.size() - 1);
+ if (last && this->Recurse) {
+ this->RecurseDirectory(start, dir, messages);
+ return;
+ }
+
+ if (start >= this->Internals->Expressions.size()) {
+ return;
+ }
+
+ kwsys::Directory d;
+ if (!d.Load(dir)) {
+ return;
+ }
+ unsigned long cc;
+ std::string realname;
+ std::string fname;
+ for (cc = 0; cc < d.GetNumberOfFiles(); cc++) {
+ fname = d.GetFile(cc);
+ if (fname == "." || fname == "..") {
+ continue;
+ }
+
+ if (start == 0) {
+ realname = dir + fname;
+ } else {
+ realname = dir + "/" + fname;
+ }
+
+#if defined(KWSYS_GLOB_CASE_INDEPENDENT)
+ // On case-insensitive file systems convert to lower case for matching.
+ fname = kwsys::SystemTools::LowerCase(fname);
+#endif
+
+ // std::cout << "Look at file: " << fname << std::endl;
+ // std::cout << "Match: "
+ // << this->Internals->TextExpressions[start].c_str() << std::endl;
+ // std::cout << "Real name: " << realname << std::endl;
+
+ if ((!last && !kwsys::SystemTools::FileIsDirectory(realname)) ||
+ (!this->ListDirs && last &&
+ kwsys::SystemTools::FileIsDirectory(realname))) {
+ continue;
+ }
+
+ if (this->Internals->Expressions[start].find(fname)) {
+ if (last) {
+ this->AddFile(this->Internals->Files, realname);
+ } else {
+ this->ProcessDirectory(start + 1, realname, messages);
+ }
+ }
+ }
+}
+
+bool Glob::FindFiles(const std::string& inexpr, GlobMessages* messages)
+{
+ std::string cexpr;
+ std::string::size_type cc;
+ std::string expr = inexpr;
+
+ this->Internals->Expressions.clear();
+ this->Internals->Files.clear();
+
+ if (!kwsys::SystemTools::FileIsFullPath(expr)) {
+ expr = kwsys::SystemTools::GetCurrentWorkingDirectory();
+ expr += "/" + inexpr;
+ }
+ std::string fexpr = expr;
+
+ std::string::size_type skip = 0;
+ std::string::size_type last_slash = 0;
+ for (cc = 0; cc < expr.size(); cc++) {
+ if (cc > 0 && expr[cc] == '/' && expr[cc - 1] != '\\') {
+ last_slash = cc;
+ }
+ if (cc > 0 && (expr[cc] == '[' || expr[cc] == '?' || expr[cc] == '*') &&
+ expr[cc - 1] != '\\') {
+ break;
+ }
+ }
+ if (last_slash > 0) {
+ // std::cout << "I can skip: " << fexpr.substr(0, last_slash)
+ // << std::endl;
+ skip = last_slash;
+ }
+ if (skip == 0) {
+#if defined(KWSYS_GLOB_SUPPORT_NETWORK_PATHS)
+ // Handle network paths
+ if (expr[0] == '/' && expr[1] == '/') {
+ int cnt = 0;
+ for (cc = 2; cc < expr.size(); cc++) {
+ if (expr[cc] == '/') {
+ cnt++;
+ if (cnt == 2) {
+ break;
+ }
+ }
+ }
+ skip = int(cc + 1);
+ } else
+#endif
+ // Handle drive letters on Windows
+ if (expr[1] == ':' && expr[0] != '/') {
+ skip = 2;
+ }
+ }
+
+ if (skip > 0) {
+ expr = expr.substr(skip);
+ }
+
+ cexpr = "";
+ for (cc = 0; cc < expr.size(); cc++) {
+ int ch = expr[cc];
+ if (ch == '/') {
+ if (!cexpr.empty()) {
+ this->AddExpression(cexpr);
+ }
+ cexpr = "";
+ } else {
+ cexpr.append(1, static_cast<char>(ch));
+ }
+ }
+ if (!cexpr.empty()) {
+ this->AddExpression(cexpr);
+ }
+
+ // Handle network paths
+ if (skip > 0) {
+ this->ProcessDirectory(0, fexpr.substr(0, skip) + "/", messages);
+ } else {
+ this->ProcessDirectory(0, "/", messages);
+ }
+ return true;
+}
+
+void Glob::AddExpression(const std::string& expr)
+{
+ this->Internals->Expressions.push_back(
+ kwsys::RegularExpression(this->PatternToRegex(expr)));
+}
+
+void Glob::SetRelative(const char* dir)
+{
+ if (!dir) {
+ this->Relative = "";
+ return;
+ }
+ this->Relative = dir;
+}
+
+const char* Glob::GetRelative()
+{
+ if (this->Relative.empty()) {
+ return nullptr;
+ }
+ return this->Relative.c_str();
+}
+
+void Glob::AddFile(std::vector<std::string>& files, const std::string& file)
+{
+ if (!this->Relative.empty()) {
+ files.push_back(kwsys::SystemTools::RelativePath(this->Relative, file));
+ } else {
+ files.push_back(file);
+ }
+}
+
+} // namespace KWSYS_NAMESPACE
diff --git a/test/API/driver/kwsys/Glob.hxx.in b/test/API/driver/kwsys/Glob.hxx.in
new file mode 100644
index 0000000..170766f
--- /dev/null
+++ b/test/API/driver/kwsys/Glob.hxx.in
@@ -0,0 +1,134 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Glob_hxx
+#define @KWSYS_NAMESPACE@_Glob_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <string>
+#include <vector>
+
+namespace @KWSYS_NAMESPACE@ {
+
+class GlobInternals;
+
+/** \class Glob
+ * \brief Portable globbing searches.
+ *
+ * Globbing expressions are much simpler than regular
+ * expressions. This class will search for files using
+ * globbing expressions.
+ *
+ * Finds all files that match a given globbing expression.
+ */
+class @KWSYS_NAMESPACE@_EXPORT Glob
+{
+public:
+ enum MessageType
+ {
+ error,
+ cyclicRecursion
+ };
+
+ struct Message
+ {
+ MessageType type;
+ std::string content;
+
+ Message(MessageType t, const std::string& c)
+ : type(t)
+ , content(c)
+ {
+ }
+ ~Message() = default;
+ Message(const Message& msg) = default;
+ Message& operator=(Message const& msg) = default;
+ };
+
+ typedef std::vector<Message> GlobMessages;
+ typedef std::vector<Message>::iterator GlobMessagesIterator;
+
+public:
+ Glob();
+ ~Glob();
+
+ //! Find all files that match the pattern.
+ bool FindFiles(const std::string& inexpr, GlobMessages* messages = nullptr);
+
+ //! Return the list of files that matched.
+ std::vector<std::string>& GetFiles();
+
+ //! Set recurse to true to match subdirectories.
+ void RecurseOn() { this->SetRecurse(true); }
+ void RecurseOff() { this->SetRecurse(false); }
+ void SetRecurse(bool i) { this->Recurse = i; }
+ bool GetRecurse() { return this->Recurse; }
+
+ //! Set recurse through symlinks to true if recursion should traverse the
+ // linked-to directories
+ void RecurseThroughSymlinksOn() { this->SetRecurseThroughSymlinks(true); }
+ void RecurseThroughSymlinksOff() { this->SetRecurseThroughSymlinks(false); }
+ void SetRecurseThroughSymlinks(bool i) { this->RecurseThroughSymlinks = i; }
+ bool GetRecurseThroughSymlinks() { return this->RecurseThroughSymlinks; }
+
+ //! Get the number of symlinks followed through recursion
+ unsigned int GetFollowedSymlinkCount() { return this->FollowedSymlinkCount; }
+
+ //! Set relative to true to only show relative path to files.
+ void SetRelative(const char* dir);
+ const char* GetRelative();
+
+ /** Convert the given globbing pattern to a regular expression.
+ There is no way to quote meta-characters. The
+ require_whole_string argument specifies whether the regex is
+ automatically surrounded by "^" and "$" to match the whole
+ string. This is on by default because patterns always match
+ whole strings, but may be disabled to support concatenating
+ expressions more easily (regex1|regex2|etc). */
+ static std::string PatternToRegex(const std::string& pattern,
+ bool require_whole_string = true,
+ bool preserve_case = false);
+
+ /** Getters and setters for enabling and disabling directory
+ listing in recursive and non recursive globbing mode.
+ If listing is enabled in recursive mode it also lists
+ directory symbolic links even if follow symlinks is enabled. */
+ void SetListDirs(bool list) { this->ListDirs = list; }
+ bool GetListDirs() const { return this->ListDirs; }
+ void SetRecurseListDirs(bool list) { this->RecurseListDirs = list; }
+ bool GetRecurseListDirs() const { return this->RecurseListDirs; }
+
+protected:
+ //! Process directory
+ void ProcessDirectory(std::string::size_type start, const std::string& dir,
+ GlobMessages* messages);
+
+ //! Process last directory, but only when recurse flags is on. That is
+ // effectively like saying: /path/to/file/**/file
+ bool RecurseDirectory(std::string::size_type start, const std::string& dir,
+ GlobMessages* messages);
+
+ //! Add regular expression
+ void AddExpression(const std::string& expr);
+
+ //! Add a file to the list
+ void AddFile(std::vector<std::string>& files, const std::string& file);
+
+ GlobInternals* Internals;
+ bool Recurse;
+ std::string Relative;
+ bool RecurseThroughSymlinks;
+ unsigned int FollowedSymlinkCount;
+ std::vector<std::string> VisitedSymlinks;
+ bool ListDirs;
+ bool RecurseListDirs;
+
+private:
+ Glob(const Glob&); // Not implemented.
+ void operator=(const Glob&); // Not implemented.
+};
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/IOStream.cxx b/test/API/driver/kwsys/IOStream.cxx
new file mode 100644
index 0000000..e21f87d
--- /dev/null
+++ b/test/API/driver/kwsys/IOStream.cxx
@@ -0,0 +1,255 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Configure.hxx)
+
+// Include the streams library.
+#include <iostream>
+#include KWSYS_HEADER(IOStream.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Configure.hxx.in"
+# include "IOStream.hxx.in"
+#endif
+
+// Implement the rest of this file only if it is needed.
+#if KWSYS_IOS_NEED_OPERATORS_LL
+
+# include <stdio.h> // sscanf, sprintf
+# include <string.h> // memchr
+
+# if defined(_MAX_INT_DIG)
+# define KWSYS_IOS_INT64_MAX_DIG _MAX_INT_DIG
+# else
+# define KWSYS_IOS_INT64_MAX_DIG 32
+# endif
+
+namespace KWSYS_NAMESPACE {
+
+// Scan an input stream for an integer value.
+static int IOStreamScanStream(std::istream& is, char* buffer)
+{
+ // Prepare to write to buffer.
+ char* out = buffer;
+ char* end = buffer + KWSYS_IOS_INT64_MAX_DIG - 1;
+
+ // Look for leading sign.
+ if (is.peek() == '+') {
+ *out++ = '+';
+ is.ignore();
+ } else if (is.peek() == '-') {
+ *out++ = '-';
+ is.ignore();
+ }
+
+ // Determine the base. If not specified in the stream, try to
+ // detect it from the input. A leading 0x means hex, and a leading
+ // 0 alone means octal.
+ int base = 0;
+ int flags = is.flags() & std::ios_base::basefield;
+ if (flags == std::ios_base::oct) {
+ base = 8;
+ } else if (flags == std::ios_base::dec) {
+ base = 10;
+ } else if (flags == std::ios_base::hex) {
+ base = 16;
+ }
+ bool foundDigit = false;
+ bool foundNonZero = false;
+ if (is.peek() == '0') {
+ foundDigit = true;
+ is.ignore();
+ if ((is.peek() == 'x' || is.peek() == 'X') && (base == 0 || base == 16)) {
+ base = 16;
+ foundDigit = false;
+ is.ignore();
+ } else if (base == 0) {
+ base = 8;
+ }
+ }
+
+ // Determine the range of digits allowed for this number.
+ const char* digits = "0123456789abcdefABCDEF";
+ int maxDigitIndex = 10;
+ if (base == 8) {
+ maxDigitIndex = 8;
+ } else if (base == 16) {
+ maxDigitIndex = 10 + 6 + 6;
+ }
+
+ // Scan until an invalid digit is found.
+ for (; is.peek() != EOF; is.ignore()) {
+ if (memchr(digits, *out = (char)is.peek(), maxDigitIndex) != 0) {
+ if ((foundNonZero || *out != '0') && out < end) {
+ ++out;
+ foundNonZero = true;
+ }
+ foundDigit = true;
+ } else {
+ break;
+ }
+ }
+
+ // Correct the buffer contents for degenerate cases.
+ if (foundDigit && !foundNonZero) {
+ *out++ = '0';
+ } else if (!foundDigit) {
+ out = buffer;
+ }
+
+ // Terminate the string in the buffer.
+ *out = '\0';
+
+ return base;
+}
+
+// Read an integer value from an input stream.
+template <class T>
+std::istream& IOStreamScanTemplate(std::istream& is, T& value, char type)
+{
+ int state = std::ios_base::goodbit;
+
+ // Skip leading whitespace.
+ std::istream::sentry okay(is);
+
+ if (okay) {
+ try {
+ // Copy the string to a buffer and construct the format string.
+ char buffer[KWSYS_IOS_INT64_MAX_DIG];
+# if defined(_MSC_VER)
+ char format[] = "%I64_";
+ const int typeIndex = 4;
+# else
+ char format[] = "%ll_";
+ const int typeIndex = 3;
+# endif
+ switch (IOStreamScanStream(is, buffer)) {
+ case 8:
+ format[typeIndex] = 'o';
+ break;
+ case 0: // Default to decimal if not told otherwise.
+ case 10:
+ format[typeIndex] = type;
+ break;
+ case 16:
+ format[typeIndex] = 'x';
+ break;
+ };
+
+ // Use sscanf to parse the number from the buffer.
+ T result;
+ int success = (sscanf(buffer, format, &result) == 1) ? 1 : 0;
+
+ // Set flags for resulting state.
+ if (is.peek() == EOF) {
+ state |= std::ios_base::eofbit;
+ }
+ if (!success) {
+ state |= std::ios_base::failbit;
+ } else {
+ value = result;
+ }
+ } catch (...) {
+ state |= std::ios_base::badbit;
+ }
+ }
+
+ is.setstate(std::ios_base::iostate(state));
+ return is;
+}
+
+// Print an integer value to an output stream.
+template <class T>
+std::ostream& IOStreamPrintTemplate(std::ostream& os, T value, char type)
+{
+ std::ostream::sentry okay(os);
+ if (okay) {
+ try {
+ // Construct the format string.
+ char format[8];
+ char* f = format;
+ *f++ = '%';
+ if (os.flags() & std::ios_base::showpos) {
+ *f++ = '+';
+ }
+ if (os.flags() & std::ios_base::showbase) {
+ *f++ = '#';
+ }
+# if defined(_MSC_VER)
+ *f++ = 'I';
+ *f++ = '6';
+ *f++ = '4';
+# else
+ *f++ = 'l';
+ *f++ = 'l';
+# endif
+ long bflags = os.flags() & std::ios_base::basefield;
+ if (bflags == std::ios_base::oct) {
+ *f++ = 'o';
+ } else if (bflags != std::ios_base::hex) {
+ *f++ = type;
+ } else if (os.flags() & std::ios_base::uppercase) {
+ *f++ = 'X';
+ } else {
+ *f++ = 'x';
+ }
+ *f = '\0';
+
+ // Use sprintf to print to a buffer and then write the
+ // buffer to the stream.
+ char buffer[2 * KWSYS_IOS_INT64_MAX_DIG];
+ sprintf(buffer, format, value);
+ os << buffer;
+ } catch (...) {
+ os.clear(os.rdstate() | std::ios_base::badbit);
+ }
+ }
+ return os;
+}
+
+# if !KWSYS_IOS_HAS_ISTREAM_LONG_LONG
+// Implement input stream operator for IOStreamSLL.
+std::istream& IOStreamScan(std::istream& is, IOStreamSLL& value)
+{
+ return IOStreamScanTemplate(is, value, 'd');
+}
+
+// Implement input stream operator for IOStreamULL.
+std::istream& IOStreamScan(std::istream& is, IOStreamULL& value)
+{
+ return IOStreamScanTemplate(is, value, 'u');
+}
+# endif
+
+# if !KWSYS_IOS_HAS_OSTREAM_LONG_LONG
+// Implement output stream operator for IOStreamSLL.
+std::ostream& IOStreamPrint(std::ostream& os, IOStreamSLL value)
+{
+ return IOStreamPrintTemplate(os, value, 'd');
+}
+
+// Implement output stream operator for IOStreamULL.
+std::ostream& IOStreamPrint(std::ostream& os, IOStreamULL value)
+{
+ return IOStreamPrintTemplate(os, value, 'u');
+}
+# endif
+
+} // namespace KWSYS_NAMESPACE
+
+#else
+
+namespace KWSYS_NAMESPACE {
+
+// Create one public symbol in this object file to avoid warnings from
+// archivers.
+void IOStreamSymbolToAvoidWarning();
+void IOStreamSymbolToAvoidWarning()
+{
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#endif // KWSYS_IOS_NEED_OPERATORS_LL
diff --git a/test/API/driver/kwsys/IOStream.hxx.in b/test/API/driver/kwsys/IOStream.hxx.in
new file mode 100644
index 0000000..db8a23e
--- /dev/null
+++ b/test/API/driver/kwsys/IOStream.hxx.in
@@ -0,0 +1,126 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_IOStream_hxx
+#define @KWSYS_NAMESPACE@_IOStream_hxx
+
+#include <iosfwd>
+
+/* Define these macros temporarily to keep the code readable. */
+#if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+
+/* Whether istream supports long long. */
+#define @KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG \
+ @KWSYS_IOS_HAS_ISTREAM_LONG_LONG@
+
+/* Whether ostream supports long long. */
+#define @KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG \
+ @KWSYS_IOS_HAS_OSTREAM_LONG_LONG@
+
+/* Determine whether we need to define the streaming operators for
+ long long or __int64. */
+#if @KWSYS_USE_LONG_LONG@
+# if !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG || \
+ !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG
+# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 1
+namespace @KWSYS_NAMESPACE@ {
+typedef long long IOStreamSLL;
+typedef unsigned long long IOStreamULL;
+}
+# endif
+#elif defined(_MSC_VER) && _MSC_VER < 1300
+# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 1
+namespace @KWSYS_NAMESPACE@ {
+typedef __int64 IOStreamSLL;
+typedef unsigned __int64 IOStreamULL;
+}
+#endif
+#if !defined(@KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL)
+# define @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL 0
+#endif
+
+#if @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL
+# if !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG
+
+/* Input stream operator implementation functions. */
+namespace @KWSYS_NAMESPACE@ {
+kwsysEXPORT std::istream& IOStreamScan(std::istream&, IOStreamSLL&);
+kwsysEXPORT std::istream& IOStreamScan(std::istream&, IOStreamULL&);
+}
+
+/* Provide input stream operator for long long. */
+# if !defined(@KWSYS_NAMESPACE@_IOS_NO_ISTREAM_LONG_LONG) && \
+ !defined(KWSYS_IOS_ISTREAM_LONG_LONG_DEFINED)
+# define KWSYS_IOS_ISTREAM_LONG_LONG_DEFINED
+# define @KWSYS_NAMESPACE@_IOS_ISTREAM_LONG_LONG_DEFINED
+inline std::istream& operator>>(std::istream& is,
+ @KWSYS_NAMESPACE@::IOStreamSLL& value)
+{
+ return @KWSYS_NAMESPACE@::IOStreamScan(is, value);
+}
+# endif
+
+/* Provide input stream operator for unsigned long long. */
+# if !defined(@KWSYS_NAMESPACE@_IOS_NO_ISTREAM_UNSIGNED_LONG_LONG) && \
+ !defined(KWSYS_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED)
+# define KWSYS_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED
+# define @KWSYS_NAMESPACE@_IOS_ISTREAM_UNSIGNED_LONG_LONG_DEFINED
+inline std::istream& operator>>(std::istream& is,
+ @KWSYS_NAMESPACE@::IOStreamULL& value)
+{
+ return @KWSYS_NAMESPACE@::IOStreamScan(is, value);
+}
+# endif
+# endif /* !@KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG */
+
+# if !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG
+
+/* Output stream operator implementation functions. */
+namespace @KWSYS_NAMESPACE@ {
+kwsysEXPORT std::ostream& IOStreamPrint(std::ostream&, IOStreamSLL);
+kwsysEXPORT std::ostream& IOStreamPrint(std::ostream&, IOStreamULL);
+}
+
+/* Provide output stream operator for long long. */
+# if !defined(@KWSYS_NAMESPACE@_IOS_NO_OSTREAM_LONG_LONG) && \
+ !defined(KWSYS_IOS_OSTREAM_LONG_LONG_DEFINED)
+# define KWSYS_IOS_OSTREAM_LONG_LONG_DEFINED
+# define @KWSYS_NAMESPACE@_IOS_OSTREAM_LONG_LONG_DEFINED
+inline std::ostream& operator<<(std::ostream& os,
+ @KWSYS_NAMESPACE@::IOStreamSLL value)
+{
+ return @KWSYS_NAMESPACE@::IOStreamPrint(os, value);
+}
+# endif
+
+/* Provide output stream operator for unsigned long long. */
+# if !defined(@KWSYS_NAMESPACE@_IOS_NO_OSTREAM_UNSIGNED_LONG_LONG) && \
+ !defined(KWSYS_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED)
+# define KWSYS_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED
+# define @KWSYS_NAMESPACE@_IOS_OSTREAM_UNSIGNED_LONG_LONG_DEFINED
+inline std::ostream& operator<<(std::ostream& os,
+ @KWSYS_NAMESPACE@::IOStreamULL value)
+{
+ return @KWSYS_NAMESPACE@::IOStreamPrint(os, value);
+}
+# endif
+# endif /* !@KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG */
+#endif /* @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL */
+
+/* Undefine temporary macros. */
+#if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# undef kwsysEXPORT
+#endif
+
+/* If building a C++ file in kwsys itself, give the source file
+ access to the macros without a configured namespace. */
+#if defined(KWSYS_NAMESPACE)
+# define KWSYS_IOS_HAS_ISTREAM_LONG_LONG \
+ @KWSYS_NAMESPACE@_IOS_HAS_ISTREAM_LONG_LONG
+# define KWSYS_IOS_HAS_OSTREAM_LONG_LONG \
+ @KWSYS_NAMESPACE@_IOS_HAS_OSTREAM_LONG_LONG
+# define KWSYS_IOS_NEED_OPERATORS_LL @KWSYS_NAMESPACE@_IOS_NEED_OPERATORS_LL
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/MD5.c b/test/API/driver/kwsys/MD5.c
new file mode 100644
index 0000000..97cf9ba
--- /dev/null
+++ b/test/API/driver/kwsys/MD5.c
@@ -0,0 +1,494 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(MD5.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "MD5.h.in"
+#endif
+
+#include <stddef.h> /* size_t */
+#include <stdlib.h> /* malloc, free */
+#include <string.h> /* memcpy, strlen */
+
+/* This MD5 implementation has been taken from a third party. Slight
+ modifications to the arrangement of the code have been made to put
+ it in a single source file instead of a separate header and
+ implementation file. */
+
+#if defined(__clang__) && !defined(__INTEL_COMPILER)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wcast-align"
+#endif
+
+/*
+ Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved.
+
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the authors be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ L. Peter Deutsch
+ ghost@aladdin.com
+
+ */
+/*
+ Independent implementation of MD5 (RFC 1321).
+
+ This code implements the MD5 Algorithm defined in RFC 1321, whose
+ text is available at
+ http://www.ietf.org/rfc/rfc1321.txt
+ The code is derived from the text of the RFC, including the test suite
+ (section A.5) but excluding the rest of Appendix A. It does not include
+ any code or documentation that is identified in the RFC as being
+ copyrighted.
+
+ The original and principal author of md5.c is L. Peter Deutsch
+ <ghost@aladdin.com>. Other authors are noted in the change history
+ that follows (in reverse chronological order):
+
+ 2002-04-13 lpd Clarified derivation from RFC 1321; now handles byte order
+ either statically or dynamically; added missing #include <string.h>
+ in library.
+ 2002-03-11 lpd Corrected argument list for main(), and added int return
+ type, in test program and T value program.
+ 2002-02-21 lpd Added missing #include <stdio.h> in test program.
+ 2000-07-03 lpd Patched to eliminate warnings about "constant is
+ unsigned in ANSI C, signed in traditional"; made test program
+ self-checking.
+ 1999-11-04 lpd Edited comments slightly for automatic TOC extraction.
+ 1999-10-18 lpd Fixed typo in header comment (ansi2knr rather than md5).
+ 1999-05-03 lpd Original version.
+ */
+
+/*
+ * This package supports both compile-time and run-time determination of CPU
+ * byte order. If ARCH_IS_BIG_ENDIAN is defined as 0, the code will be
+ * compiled to run only on little-endian CPUs; if ARCH_IS_BIG_ENDIAN is
+ * defined as non-zero, the code will be compiled to run only on big-endian
+ * CPUs; if ARCH_IS_BIG_ENDIAN is not defined, the code will be compiled to
+ * run on either big- or little-endian CPUs, but will run slightly less
+ * efficiently on either one than if ARCH_IS_BIG_ENDIAN is defined.
+ */
+
+typedef unsigned char md5_byte_t; /* 8-bit byte */
+typedef unsigned int md5_word_t; /* 32-bit word */
+
+/* Define the state of the MD5 Algorithm. */
+typedef struct md5_state_s
+{
+ md5_word_t count[2]; /* message length in bits, lsw first */
+ md5_word_t abcd[4]; /* digest buffer */
+ md5_byte_t buf[64]; /* accumulate block */
+} md5_state_t;
+
+#undef BYTE_ORDER /* 1 = big-endian, -1 = little-endian, 0 = unknown */
+#ifdef ARCH_IS_BIG_ENDIAN
+# define BYTE_ORDER (ARCH_IS_BIG_ENDIAN ? 1 : -1)
+#else
+# define BYTE_ORDER 0
+#endif
+
+#define T_MASK ((md5_word_t)~0)
+#define T1 /* 0xd76aa478 */ (T_MASK ^ 0x28955b87)
+#define T2 /* 0xe8c7b756 */ (T_MASK ^ 0x173848a9)
+#define T3 0x242070db
+#define T4 /* 0xc1bdceee */ (T_MASK ^ 0x3e423111)
+#define T5 /* 0xf57c0faf */ (T_MASK ^ 0x0a83f050)
+#define T6 0x4787c62a
+#define T7 /* 0xa8304613 */ (T_MASK ^ 0x57cfb9ec)
+#define T8 /* 0xfd469501 */ (T_MASK ^ 0x02b96afe)
+#define T9 0x698098d8
+#define T10 /* 0x8b44f7af */ (T_MASK ^ 0x74bb0850)
+#define T11 /* 0xffff5bb1 */ (T_MASK ^ 0x0000a44e)
+#define T12 /* 0x895cd7be */ (T_MASK ^ 0x76a32841)
+#define T13 0x6b901122
+#define T14 /* 0xfd987193 */ (T_MASK ^ 0x02678e6c)
+#define T15 /* 0xa679438e */ (T_MASK ^ 0x5986bc71)
+#define T16 0x49b40821
+#define T17 /* 0xf61e2562 */ (T_MASK ^ 0x09e1da9d)
+#define T18 /* 0xc040b340 */ (T_MASK ^ 0x3fbf4cbf)
+#define T19 0x265e5a51
+#define T20 /* 0xe9b6c7aa */ (T_MASK ^ 0x16493855)
+#define T21 /* 0xd62f105d */ (T_MASK ^ 0x29d0efa2)
+#define T22 0x02441453
+#define T23 /* 0xd8a1e681 */ (T_MASK ^ 0x275e197e)
+#define T24 /* 0xe7d3fbc8 */ (T_MASK ^ 0x182c0437)
+#define T25 0x21e1cde6
+#define T26 /* 0xc33707d6 */ (T_MASK ^ 0x3cc8f829)
+#define T27 /* 0xf4d50d87 */ (T_MASK ^ 0x0b2af278)
+#define T28 0x455a14ed
+#define T29 /* 0xa9e3e905 */ (T_MASK ^ 0x561c16fa)
+#define T30 /* 0xfcefa3f8 */ (T_MASK ^ 0x03105c07)
+#define T31 0x676f02d9
+#define T32 /* 0x8d2a4c8a */ (T_MASK ^ 0x72d5b375)
+#define T33 /* 0xfffa3942 */ (T_MASK ^ 0x0005c6bd)
+#define T34 /* 0x8771f681 */ (T_MASK ^ 0x788e097e)
+#define T35 0x6d9d6122
+#define T36 /* 0xfde5380c */ (T_MASK ^ 0x021ac7f3)
+#define T37 /* 0xa4beea44 */ (T_MASK ^ 0x5b4115bb)
+#define T38 0x4bdecfa9
+#define T39 /* 0xf6bb4b60 */ (T_MASK ^ 0x0944b49f)
+#define T40 /* 0xbebfbc70 */ (T_MASK ^ 0x4140438f)
+#define T41 0x289b7ec6
+#define T42 /* 0xeaa127fa */ (T_MASK ^ 0x155ed805)
+#define T43 /* 0xd4ef3085 */ (T_MASK ^ 0x2b10cf7a)
+#define T44 0x04881d05
+#define T45 /* 0xd9d4d039 */ (T_MASK ^ 0x262b2fc6)
+#define T46 /* 0xe6db99e5 */ (T_MASK ^ 0x1924661a)
+#define T47 0x1fa27cf8
+#define T48 /* 0xc4ac5665 */ (T_MASK ^ 0x3b53a99a)
+#define T49 /* 0xf4292244 */ (T_MASK ^ 0x0bd6ddbb)
+#define T50 0x432aff97
+#define T51 /* 0xab9423a7 */ (T_MASK ^ 0x546bdc58)
+#define T52 /* 0xfc93a039 */ (T_MASK ^ 0x036c5fc6)
+#define T53 0x655b59c3
+#define T54 /* 0x8f0ccc92 */ (T_MASK ^ 0x70f3336d)
+#define T55 /* 0xffeff47d */ (T_MASK ^ 0x00100b82)
+#define T56 /* 0x85845dd1 */ (T_MASK ^ 0x7a7ba22e)
+#define T57 0x6fa87e4f
+#define T58 /* 0xfe2ce6e0 */ (T_MASK ^ 0x01d3191f)
+#define T59 /* 0xa3014314 */ (T_MASK ^ 0x5cfebceb)
+#define T60 0x4e0811a1
+#define T61 /* 0xf7537e82 */ (T_MASK ^ 0x08ac817d)
+#define T62 /* 0xbd3af235 */ (T_MASK ^ 0x42c50dca)
+#define T63 0x2ad7d2bb
+#define T64 /* 0xeb86d391 */ (T_MASK ^ 0x14792c6e)
+
+static void md5_process(md5_state_t* pms, const md5_byte_t* data /*[64]*/)
+{
+ md5_word_t a = pms->abcd[0], b = pms->abcd[1], c = pms->abcd[2],
+ d = pms->abcd[3];
+ md5_word_t t;
+#if BYTE_ORDER > 0
+ /* Define storage only for big-endian CPUs. */
+ md5_word_t X[16];
+#else
+ /* Define storage for little-endian or both types of CPUs. */
+ md5_word_t xbuf[16];
+ const md5_word_t* X;
+#endif
+
+ {
+#if BYTE_ORDER == 0
+ /*
+ * Determine dynamically whether this is a big-endian or
+ * little-endian machine, since we can use a more efficient
+ * algorithm on the latter.
+ */
+ static const int w = 1;
+
+ if (*((const md5_byte_t*)&w)) /* dynamic little-endian */
+#endif
+#if BYTE_ORDER <= 0 /* little-endian */
+ {
+ /*
+ * On little-endian machines, we can process properly aligned
+ * data without copying it.
+ */
+ if (!((data - (const md5_byte_t*)0) & 3)) {
+ /* data are properly aligned */
+ X = (const md5_word_t*)data;
+ } else {
+ /* not aligned */
+ memcpy(xbuf, data, 64);
+ X = xbuf;
+ }
+ }
+#endif
+#if BYTE_ORDER == 0
+ else /* dynamic big-endian */
+#endif
+#if BYTE_ORDER >= 0 /* big-endian */
+ {
+ /*
+ * On big-endian machines, we must arrange the bytes in the
+ * right order.
+ */
+ const md5_byte_t* xp = data;
+ int i;
+
+# if BYTE_ORDER == 0
+ X = xbuf; /* (dynamic only) */
+# else
+# define xbuf X /* (static only) */
+# endif
+ for (i = 0; i < 16; ++i, xp += 4)
+ xbuf[i] =
+ (md5_word_t)(xp[0] + (xp[1] << 8) + (xp[2] << 16) + (xp[3] << 24));
+ }
+#endif
+ }
+
+#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32 - (n))))
+
+/* Round 1. */
+/* Let [abcd k s i] denote the operation
+ a = b + ((a + F(b,c,d) + X[k] + T[i]) <<< s). */
+#define F(x, y, z) (((x) & (y)) | (~(x) & (z)))
+#define SET(a, b, c, d, k, s, Ti) \
+ t = a + F(b, c, d) + X[k] + (Ti); \
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 0, 7, T1);
+ SET(d, a, b, c, 1, 12, T2);
+ SET(c, d, a, b, 2, 17, T3);
+ SET(b, c, d, a, 3, 22, T4);
+ SET(a, b, c, d, 4, 7, T5);
+ SET(d, a, b, c, 5, 12, T6);
+ SET(c, d, a, b, 6, 17, T7);
+ SET(b, c, d, a, 7, 22, T8);
+ SET(a, b, c, d, 8, 7, T9);
+ SET(d, a, b, c, 9, 12, T10);
+ SET(c, d, a, b, 10, 17, T11);
+ SET(b, c, d, a, 11, 22, T12);
+ SET(a, b, c, d, 12, 7, T13);
+ SET(d, a, b, c, 13, 12, T14);
+ SET(c, d, a, b, 14, 17, T15);
+ SET(b, c, d, a, 15, 22, T16);
+#undef SET
+
+/* Round 2. */
+/* Let [abcd k s i] denote the operation
+ a = b + ((a + G(b,c,d) + X[k] + T[i]) <<< s). */
+#define G(x, y, z) (((x) & (z)) | ((y) & ~(z)))
+#define SET(a, b, c, d, k, s, Ti) \
+ t = a + G(b, c, d) + X[k] + (Ti); \
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 1, 5, T17);
+ SET(d, a, b, c, 6, 9, T18);
+ SET(c, d, a, b, 11, 14, T19);
+ SET(b, c, d, a, 0, 20, T20);
+ SET(a, b, c, d, 5, 5, T21);
+ SET(d, a, b, c, 10, 9, T22);
+ SET(c, d, a, b, 15, 14, T23);
+ SET(b, c, d, a, 4, 20, T24);
+ SET(a, b, c, d, 9, 5, T25);
+ SET(d, a, b, c, 14, 9, T26);
+ SET(c, d, a, b, 3, 14, T27);
+ SET(b, c, d, a, 8, 20, T28);
+ SET(a, b, c, d, 13, 5, T29);
+ SET(d, a, b, c, 2, 9, T30);
+ SET(c, d, a, b, 7, 14, T31);
+ SET(b, c, d, a, 12, 20, T32);
+#undef SET
+
+/* Round 3. */
+/* Let [abcd k s t] denote the operation
+ a = b + ((a + H(b,c,d) + X[k] + T[i]) <<< s). */
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define SET(a, b, c, d, k, s, Ti) \
+ t = a + H(b, c, d) + X[k] + (Ti); \
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 5, 4, T33);
+ SET(d, a, b, c, 8, 11, T34);
+ SET(c, d, a, b, 11, 16, T35);
+ SET(b, c, d, a, 14, 23, T36);
+ SET(a, b, c, d, 1, 4, T37);
+ SET(d, a, b, c, 4, 11, T38);
+ SET(c, d, a, b, 7, 16, T39);
+ SET(b, c, d, a, 10, 23, T40);
+ SET(a, b, c, d, 13, 4, T41);
+ SET(d, a, b, c, 0, 11, T42);
+ SET(c, d, a, b, 3, 16, T43);
+ SET(b, c, d, a, 6, 23, T44);
+ SET(a, b, c, d, 9, 4, T45);
+ SET(d, a, b, c, 12, 11, T46);
+ SET(c, d, a, b, 15, 16, T47);
+ SET(b, c, d, a, 2, 23, T48);
+#undef SET
+
+/* Round 4. */
+/* Let [abcd k s t] denote the operation
+ a = b + ((a + I(b,c,d) + X[k] + T[i]) <<< s). */
+#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+#define SET(a, b, c, d, k, s, Ti) \
+ t = a + I(b, c, d) + X[k] + (Ti); \
+ a = ROTATE_LEFT(t, s) + b
+ /* Do the following 16 operations. */
+ SET(a, b, c, d, 0, 6, T49);
+ SET(d, a, b, c, 7, 10, T50);
+ SET(c, d, a, b, 14, 15, T51);
+ SET(b, c, d, a, 5, 21, T52);
+ SET(a, b, c, d, 12, 6, T53);
+ SET(d, a, b, c, 3, 10, T54);
+ SET(c, d, a, b, 10, 15, T55);
+ SET(b, c, d, a, 1, 21, T56);
+ SET(a, b, c, d, 8, 6, T57);
+ SET(d, a, b, c, 15, 10, T58);
+ SET(c, d, a, b, 6, 15, T59);
+ SET(b, c, d, a, 13, 21, T60);
+ SET(a, b, c, d, 4, 6, T61);
+ SET(d, a, b, c, 11, 10, T62);
+ SET(c, d, a, b, 2, 15, T63);
+ SET(b, c, d, a, 9, 21, T64);
+#undef SET
+
+ /* Then perform the following additions. (That is increment each
+ of the four registers by the value it had before this block
+ was started.) */
+ pms->abcd[0] += a;
+ pms->abcd[1] += b;
+ pms->abcd[2] += c;
+ pms->abcd[3] += d;
+}
+
+/* Initialize the algorithm. */
+static void md5_init(md5_state_t* pms)
+{
+ pms->count[0] = pms->count[1] = 0;
+ pms->abcd[0] = 0x67452301;
+ pms->abcd[1] = /*0xefcdab89*/ T_MASK ^ 0x10325476;
+ pms->abcd[2] = /*0x98badcfe*/ T_MASK ^ 0x67452301;
+ pms->abcd[3] = 0x10325476;
+}
+
+/* Append a string to the message. */
+static void md5_append(md5_state_t* pms, const md5_byte_t* data, size_t nbytes)
+{
+ const md5_byte_t* p = data;
+ size_t left = nbytes;
+ size_t offset = (pms->count[0] >> 3) & 63;
+ md5_word_t nbits = (md5_word_t)(nbytes << 3);
+
+ if (nbytes <= 0)
+ return;
+
+ /* Update the message length. */
+ pms->count[1] += (md5_word_t)(nbytes >> 29);
+ pms->count[0] += nbits;
+ if (pms->count[0] < nbits)
+ pms->count[1]++;
+
+ /* Process an initial partial block. */
+ if (offset) {
+ size_t copy = (offset + nbytes > 64 ? 64 - offset : nbytes);
+
+ memcpy(pms->buf + offset, p, copy);
+ if (offset + copy < 64)
+ return;
+ p += copy;
+ left -= copy;
+ md5_process(pms, pms->buf);
+ }
+
+ /* Process full blocks. */
+ for (; left >= 64; p += 64, left -= 64)
+ md5_process(pms, p);
+
+ /* Process a final partial block. */
+ if (left)
+ memcpy(pms->buf, p, left);
+}
+
+/* Finish the message and return the digest. */
+static void md5_finish(md5_state_t* pms, md5_byte_t digest[16])
+{
+ static const md5_byte_t pad[64] = { 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+ md5_byte_t data[8];
+ int i;
+
+ /* Save the length before padding. */
+ for (i = 0; i < 8; ++i)
+ data[i] = (md5_byte_t)(pms->count[i >> 2] >> ((i & 3) << 3));
+ /* Pad to 56 bytes mod 64. */
+ md5_append(pms, pad, ((55 - (pms->count[0] >> 3)) & 63) + 1);
+ /* Append the length. */
+ md5_append(pms, data, 8);
+ for (i = 0; i < 16; ++i)
+ digest[i] = (md5_byte_t)(pms->abcd[i >> 2] >> ((i & 3) << 3));
+}
+
+#if defined(__clang__) && !defined(__INTEL_COMPILER)
+# pragma clang diagnostic pop
+#endif
+
+/* Wrap up the MD5 state in our opaque structure. */
+struct kwsysMD5_s
+{
+ md5_state_t md5_state;
+};
+
+kwsysMD5* kwsysMD5_New(void)
+{
+ /* Allocate a process control structure. */
+ kwsysMD5* md5 = (kwsysMD5*)malloc(sizeof(kwsysMD5));
+ if (!md5) {
+ return 0;
+ }
+ return md5;
+}
+
+void kwsysMD5_Delete(kwsysMD5* md5)
+{
+ /* Make sure we have an instance. */
+ if (!md5) {
+ return;
+ }
+
+ /* Free memory. */
+ free(md5);
+}
+
+void kwsysMD5_Initialize(kwsysMD5* md5)
+{
+ md5_init(&md5->md5_state);
+}
+
+void kwsysMD5_Append(kwsysMD5* md5, unsigned char const* data, int length)
+{
+ size_t dlen;
+ if (length < 0) {
+ dlen = strlen((char const*)data);
+ } else {
+ dlen = (size_t)length;
+ }
+ md5_append(&md5->md5_state, (md5_byte_t const*)data, dlen);
+}
+
+void kwsysMD5_Finalize(kwsysMD5* md5, unsigned char digest[16])
+{
+ md5_finish(&md5->md5_state, (md5_byte_t*)digest);
+}
+
+void kwsysMD5_FinalizeHex(kwsysMD5* md5, char buffer[32])
+{
+ unsigned char digest[16];
+ kwsysMD5_Finalize(md5, digest);
+ kwsysMD5_DigestToHex(digest, buffer);
+}
+
+void kwsysMD5_DigestToHex(unsigned char const digest[16], char buffer[32])
+{
+ /* Map from 4-bit index to hexadecimal representation. */
+ static char const hex[16] = { '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+
+ /* Map each 4-bit block separately. */
+ char* out = buffer;
+ int i;
+ for (i = 0; i < 16; ++i) {
+ *out++ = hex[digest[i] >> 4];
+ *out++ = hex[digest[i] & 0xF];
+ }
+}
diff --git a/test/API/driver/kwsys/MD5.h.in b/test/API/driver/kwsys/MD5.h.in
new file mode 100644
index 0000000..7646f12
--- /dev/null
+++ b/test/API/driver/kwsys/MD5.h.in
@@ -0,0 +1,97 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_MD5_h
+#define @KWSYS_NAMESPACE@_MD5_h
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+/* Redefine all public interface symbol names to be in the proper
+ namespace. These macros are used internally to kwsys only, and are
+ not visible to user code. Use kwsysHeaderDump.pl to reproduce
+ these macros after making changes to the interface. */
+#if !defined(KWSYS_NAMESPACE)
+# define kwsys_ns(x) @KWSYS_NAMESPACE@##x
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsysMD5 kwsys_ns(MD5)
+# define kwsysMD5_s kwsys_ns(MD5_s)
+# define kwsysMD5_New kwsys_ns(MD5_New)
+# define kwsysMD5_Delete kwsys_ns(MD5_Delete)
+# define kwsysMD5_Initialize kwsys_ns(MD5_Initialize)
+# define kwsysMD5_Append kwsys_ns(MD5_Append)
+# define kwsysMD5_Finalize kwsys_ns(MD5_Finalize)
+# define kwsysMD5_FinalizeHex kwsys_ns(MD5_FinalizeHex)
+# define kwsysMD5_DigestToHex kwsys_ns(MD5_DigestToHex)
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * MD5 state data structure.
+ */
+typedef struct kwsysMD5_s kwsysMD5;
+
+/**
+ * Create a new MD5 instance. The returned instance is not initialized.
+ */
+kwsysEXPORT kwsysMD5* kwsysMD5_New(void);
+
+/**
+ * Delete an old MD5 instance.
+ */
+kwsysEXPORT void kwsysMD5_Delete(kwsysMD5* md5);
+
+/**
+ * Initialize a new MD5 digest.
+ */
+kwsysEXPORT void kwsysMD5_Initialize(kwsysMD5* md5);
+
+/**
+ * Append data to an MD5 digest. If the given length is negative,
+ * data will be read up to but not including a terminating null.
+ */
+kwsysEXPORT void kwsysMD5_Append(kwsysMD5* md5, unsigned char const* data,
+ int length);
+
+/**
+ * Finalize a MD5 digest and get the 16-byte hash value.
+ */
+kwsysEXPORT void kwsysMD5_Finalize(kwsysMD5* md5, unsigned char digest[16]);
+
+/**
+ * Finalize a MD5 digest and get the 32-bit hexadecimal representation.
+ */
+kwsysEXPORT void kwsysMD5_FinalizeHex(kwsysMD5* md5, char buffer[32]);
+
+/**
+ * Convert a MD5 digest 16-byte value to a 32-byte hexadecimal representation.
+ */
+kwsysEXPORT void kwsysMD5_DigestToHex(unsigned char const digest[16],
+ char buffer[32]);
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif
+
+/* If we are building a kwsys .c or .cxx file, let it use these macros.
+ Otherwise, undefine them to keep the namespace clean. */
+#if !defined(KWSYS_NAMESPACE)
+# undef kwsys_ns
+# undef kwsysEXPORT
+# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# undef kwsysMD5
+# undef kwsysMD5_s
+# undef kwsysMD5_New
+# undef kwsysMD5_Delete
+# undef kwsysMD5_Initialize
+# undef kwsysMD5_Append
+# undef kwsysMD5_Finalize
+# undef kwsysMD5_FinalizeHex
+# undef kwsysMD5_DigestToHex
+# endif
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/Process.h.in b/test/API/driver/kwsys/Process.h.in
new file mode 100644
index 0000000..73ea9db
--- /dev/null
+++ b/test/API/driver/kwsys/Process.h.in
@@ -0,0 +1,544 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Process_h
+#define @KWSYS_NAMESPACE@_Process_h
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+/* Redefine all public interface symbol names to be in the proper
+ namespace. These macros are used internally to kwsys only, and are
+ not visible to user code. Use kwsysHeaderDump.pl to reproduce
+ these macros after making changes to the interface. */
+#if !defined(KWSYS_NAMESPACE)
+# define kwsys_ns(x) @KWSYS_NAMESPACE@##x
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsysProcess kwsys_ns(Process)
+# define kwsysProcess_s kwsys_ns(Process_s)
+# define kwsysProcess_New kwsys_ns(Process_New)
+# define kwsysProcess_Delete kwsys_ns(Process_Delete)
+# define kwsysProcess_SetCommand kwsys_ns(Process_SetCommand)
+# define kwsysProcess_AddCommand kwsys_ns(Process_AddCommand)
+# define kwsysProcess_SetTimeout kwsys_ns(Process_SetTimeout)
+# define kwsysProcess_SetWorkingDirectory \
+ kwsys_ns(Process_SetWorkingDirectory)
+# define kwsysProcess_SetPipeFile kwsys_ns(Process_SetPipeFile)
+# define kwsysProcess_SetPipeNative kwsys_ns(Process_SetPipeNative)
+# define kwsysProcess_SetPipeShared kwsys_ns(Process_SetPipeShared)
+# define kwsysProcess_Option_Detach kwsys_ns(Process_Option_Detach)
+# define kwsysProcess_Option_HideWindow kwsys_ns(Process_Option_HideWindow)
+# define kwsysProcess_Option_MergeOutput kwsys_ns(Process_Option_MergeOutput)
+# define kwsysProcess_Option_Verbatim kwsys_ns(Process_Option_Verbatim)
+# define kwsysProcess_Option_CreateProcessGroup \
+ kwsys_ns(Process_Option_CreateProcessGroup)
+# define kwsysProcess_GetOption kwsys_ns(Process_GetOption)
+# define kwsysProcess_SetOption kwsys_ns(Process_SetOption)
+# define kwsysProcess_Option_e kwsys_ns(Process_Option_e)
+# define kwsysProcess_State_Starting kwsys_ns(Process_State_Starting)
+# define kwsysProcess_State_Error kwsys_ns(Process_State_Error)
+# define kwsysProcess_State_Exception kwsys_ns(Process_State_Exception)
+# define kwsysProcess_State_Executing kwsys_ns(Process_State_Executing)
+# define kwsysProcess_State_Exited kwsys_ns(Process_State_Exited)
+# define kwsysProcess_State_Expired kwsys_ns(Process_State_Expired)
+# define kwsysProcess_State_Killed kwsys_ns(Process_State_Killed)
+# define kwsysProcess_State_Disowned kwsys_ns(Process_State_Disowned)
+# define kwsysProcess_State_e kwsys_ns(Process_State_e)
+# define kwsysProcess_Exception_None kwsys_ns(Process_Exception_None)
+# define kwsysProcess_Exception_Fault kwsys_ns(Process_Exception_Fault)
+# define kwsysProcess_Exception_Illegal kwsys_ns(Process_Exception_Illegal)
+# define kwsysProcess_Exception_Interrupt \
+ kwsys_ns(Process_Exception_Interrupt)
+# define kwsysProcess_Exception_Numerical \
+ kwsys_ns(Process_Exception_Numerical)
+# define kwsysProcess_Exception_Other kwsys_ns(Process_Exception_Other)
+# define kwsysProcess_Exception_e kwsys_ns(Process_Exception_e)
+# define kwsysProcess_GetState kwsys_ns(Process_GetState)
+# define kwsysProcess_GetExitException kwsys_ns(Process_GetExitException)
+# define kwsysProcess_GetExitCode kwsys_ns(Process_GetExitCode)
+# define kwsysProcess_GetExitValue kwsys_ns(Process_GetExitValue)
+# define kwsysProcess_GetErrorString kwsys_ns(Process_GetErrorString)
+# define kwsysProcess_GetExceptionString kwsys_ns(Process_GetExceptionString)
+# define kwsysProcess_GetStateByIndex kwsys_ns(Process_GetStateByIndex)
+# define kwsysProcess_GetExitExceptionByIndex \
+ kwsys_ns(Process_GetExitExceptionByIndex)
+# define kwsysProcess_GetExitCodeByIndex kwsys_ns(Process_GetExitCodeByIndex)
+# define kwsysProcess_GetExitValueByIndex \
+ kwsys_ns(Process_GetExitValueByIndex)
+# define kwsysProcess_GetExceptionStringByIndex \
+ kwsys_ns(Process_GetExceptionStringByIndex)
+# define kwsysProcess_GetExitCodeByIndex kwsys_ns(Process_GetExitCodeByIndex)
+# define kwsysProcess_Execute kwsys_ns(Process_Execute)
+# define kwsysProcess_Disown kwsys_ns(Process_Disown)
+# define kwsysProcess_WaitForData kwsys_ns(Process_WaitForData)
+# define kwsysProcess_Pipes_e kwsys_ns(Process_Pipes_e)
+# define kwsysProcess_Pipe_None kwsys_ns(Process_Pipe_None)
+# define kwsysProcess_Pipe_STDIN kwsys_ns(Process_Pipe_STDIN)
+# define kwsysProcess_Pipe_STDOUT kwsys_ns(Process_Pipe_STDOUT)
+# define kwsysProcess_Pipe_STDERR kwsys_ns(Process_Pipe_STDERR)
+# define kwsysProcess_Pipe_Timeout kwsys_ns(Process_Pipe_Timeout)
+# define kwsysProcess_Pipe_Handle kwsys_ns(Process_Pipe_Handle)
+# define kwsysProcess_WaitForExit kwsys_ns(Process_WaitForExit)
+# define kwsysProcess_Interrupt kwsys_ns(Process_Interrupt)
+# define kwsysProcess_Kill kwsys_ns(Process_Kill)
+# define kwsysProcess_KillPID kwsys_ns(Process_KillPID)
+# define kwsysProcess_ResetStartTime kwsys_ns(Process_ResetStartTime)
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * Process control data structure.
+ */
+typedef struct kwsysProcess_s kwsysProcess;
+
+/* Platform-specific pipe handle type. */
+#if defined(_WIN32) && !defined(__CYGWIN__)
+typedef void* kwsysProcess_Pipe_Handle;
+#else
+typedef int kwsysProcess_Pipe_Handle;
+#endif
+
+/**
+ * Create a new Process instance.
+ */
+kwsysEXPORT kwsysProcess* kwsysProcess_New(void);
+
+/**
+ * Delete an existing Process instance. If the instance is currently
+ * executing a process, this blocks until the process terminates.
+ */
+kwsysEXPORT void kwsysProcess_Delete(kwsysProcess* cp);
+
+/**
+ * Set the command line to be executed. Argument is an array of
+ * pointers to the command and each argument. The array must end with
+ * a NULL pointer. Any previous command lines are removed. Returns
+ * 1 for success and 0 otherwise.
+ */
+kwsysEXPORT int kwsysProcess_SetCommand(kwsysProcess* cp,
+ char const* const* command);
+
+/**
+ * Add a command line to be executed. Argument is an array of
+ * pointers to the command and each argument. The array must end with
+ * a NULL pointer. If this is not the first command added, its
+ * standard input will be connected to the standard output of the
+ * previous command. Returns 1 for success and 0 otherwise.
+ */
+kwsysEXPORT int kwsysProcess_AddCommand(kwsysProcess* cp,
+ char const* const* command);
+
+/**
+ * Set the timeout in seconds for the child process. The timeout
+ * period begins when the child is executed. If the child has not
+ * terminated when the timeout expires, it will be killed. A
+ * non-positive (<= 0) value will disable the timeout.
+ */
+kwsysEXPORT void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout);
+
+/**
+ * Set the working directory for the child process. The working
+ * directory can be absolute or relative to the current directory.
+ * Returns 1 for success and 0 for failure.
+ */
+kwsysEXPORT int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp,
+ const char* dir);
+
+/**
+ * Set the name of a file to be attached to the given pipe. Returns 1
+ * for success and 0 for failure.
+ */
+kwsysEXPORT int kwsysProcess_SetPipeFile(kwsysProcess* cp, int pipe,
+ const char* file);
+
+/**
+ * Set whether the given pipe in the child is shared with the parent
+ * process. The default is no for Pipe_STDOUT and Pipe_STDERR and yes
+ * for Pipe_STDIN.
+ */
+kwsysEXPORT void kwsysProcess_SetPipeShared(kwsysProcess* cp, int pipe,
+ int shared);
+
+/**
+ * Specify a platform-specific native pipe for use as one of the child
+ * interface pipes. The native pipe is specified by an array of two
+ * descriptors or handles. The first entry in the array (index 0)
+ * should be the read end of the pipe. The second entry in the array
+ * (index 1) should be the write end of the pipe. If a null pointer
+ * is given the option will be disabled.
+ *
+ * For Pipe_STDIN the native pipe is connected to the first child in
+ * the pipeline as its stdin. After the children are created the
+ * write end of the pipe will be closed in the child process and the
+ * read end will be closed in the parent process.
+ *
+ * For Pipe_STDOUT and Pipe_STDERR the pipe is connected to the last
+ * child as its stdout or stderr. After the children are created the
+ * write end of the pipe will be closed in the parent process and the
+ * read end will be closed in the child process.
+ */
+kwsysEXPORT void kwsysProcess_SetPipeNative(kwsysProcess* cp, int pipe,
+ kwsysProcess_Pipe_Handle p[2]);
+
+/**
+ * Get/Set a possibly platform-specific option. Possible options are:
+ *
+ * kwsysProcess_Option_Detach = Whether to detach the process.
+ * 0 = No (default)
+ * 1 = Yes
+ *
+ * kwsysProcess_Option_HideWindow = Whether to hide window on Windows.
+ * 0 = No (default)
+ * 1 = Yes
+ *
+ * kwsysProcess_Option_MergeOutput = Whether to merge stdout/stderr.
+ * No content will be returned as stderr.
+ * Any actual stderr will be on stdout.
+ * 0 = No (default)
+ * 1 = Yes
+ *
+ * kwsysProcess_Option_Verbatim = Whether SetCommand and AddCommand
+ * should treat the first argument
+ * as a verbatim command line
+ * and ignore the rest of the arguments.
+ * 0 = No (default)
+ * 1 = Yes
+ *
+ * kwsysProcess_Option_CreateProcessGroup = Whether to place the process in a
+ * new process group. This is
+ * useful if you want to send Ctrl+C
+ * to the process. On UNIX, also
+ * places the process in a new
+ * session.
+ * 0 = No (default)
+ * 1 = Yes
+ */
+kwsysEXPORT int kwsysProcess_GetOption(kwsysProcess* cp, int optionId);
+kwsysEXPORT void kwsysProcess_SetOption(kwsysProcess* cp, int optionId,
+ int value);
+enum kwsysProcess_Option_e
+{
+ kwsysProcess_Option_HideWindow,
+ kwsysProcess_Option_Detach,
+ kwsysProcess_Option_MergeOutput,
+ kwsysProcess_Option_Verbatim,
+ kwsysProcess_Option_CreateProcessGroup
+};
+
+/**
+ * Get the current state of the Process instance. Possible states are:
+ *
+ * kwsysProcess_State_Starting = Execute has not yet been called.
+ * kwsysProcess_State_Error = Error administrating the child process.
+ * kwsysProcess_State_Exception = Child process exited abnormally.
+ * kwsysProcess_State_Executing = Child process is currently running.
+ * kwsysProcess_State_Exited = Child process exited normally.
+ * kwsysProcess_State_Expired = Child process's timeout expired.
+ * kwsysProcess_State_Killed = Child process terminated by Kill method.
+ * kwsysProcess_State_Disowned = Child is no longer managed by this object.
+ */
+kwsysEXPORT int kwsysProcess_GetState(kwsysProcess* cp);
+enum kwsysProcess_State_e
+{
+ kwsysProcess_State_Starting,
+ kwsysProcess_State_Error,
+ kwsysProcess_State_Exception,
+ kwsysProcess_State_Executing,
+ kwsysProcess_State_Exited,
+ kwsysProcess_State_Expired,
+ kwsysProcess_State_Killed,
+ kwsysProcess_State_Disowned
+};
+
+/**
+ * When GetState returns "Exception", this method returns a
+ * platform-independent description of the exceptional behavior that
+ * caused the child to terminate abnormally. Possible exceptions are:
+ *
+ * kwsysProcess_Exception_None = No exceptional behavior occurred.
+ * kwsysProcess_Exception_Fault = Child crashed with a memory fault.
+ * kwsysProcess_Exception_Illegal = Child crashed with an illegal
+ * instruction.
+ * kwsysProcess_Exception_Interrupt = Child was interrupted by user
+ * (Cntl-C/Break).
+ * kwsysProcess_Exception_Numerical = Child crashed with a numerical
+ * exception.
+ * kwsysProcess_Exception_Other = Child terminated for another reason.
+ */
+kwsysEXPORT int kwsysProcess_GetExitException(kwsysProcess* cp);
+enum kwsysProcess_Exception_e
+{
+ kwsysProcess_Exception_None,
+ kwsysProcess_Exception_Fault,
+ kwsysProcess_Exception_Illegal,
+ kwsysProcess_Exception_Interrupt,
+ kwsysProcess_Exception_Numerical,
+ kwsysProcess_Exception_Other
+};
+
+/**
+ * When GetState returns "Exited" or "Exception", this method returns
+ * the platform-specific raw exit code of the process. UNIX platforms
+ * should use WIFEXITED/WEXITSTATUS and WIFSIGNALED/WTERMSIG to access
+ * this value. Windows users should compare the value to the various
+ * EXCEPTION_* values.
+ *
+ * If GetState returns "Exited", use GetExitValue to get the
+ * platform-independent child return value.
+ */
+kwsysEXPORT int kwsysProcess_GetExitCode(kwsysProcess* cp);
+
+/**
+ * When GetState returns "Exited", this method returns the child's
+ * platform-independent exit code (such as the value returned by the
+ * child's main).
+ */
+kwsysEXPORT int kwsysProcess_GetExitValue(kwsysProcess* cp);
+
+/**
+ * When GetState returns "Error", this method returns a string
+ * describing the problem. Otherwise, it returns NULL.
+ */
+kwsysEXPORT const char* kwsysProcess_GetErrorString(kwsysProcess* cp);
+
+/**
+ * When GetState returns "Exception", this method returns a string
+ * describing the problem. Otherwise, it returns NULL.
+ */
+kwsysEXPORT const char* kwsysProcess_GetExceptionString(kwsysProcess* cp);
+
+/**
+ * Get the current state of the Process instance. Possible states are:
+ *
+ * kwsysProcess_StateByIndex_Starting = Execute has not yet been called.
+ * kwsysProcess_StateByIndex_Exception = Child process exited abnormally.
+ * kwsysProcess_StateByIndex_Exited = Child process exited normally.
+ * kwsysProcess_StateByIndex_Error = Error getting the child return code.
+ */
+kwsysEXPORT int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx);
+enum kwsysProcess_StateByIndex_e
+{
+ kwsysProcess_StateByIndex_Starting = kwsysProcess_State_Starting,
+ kwsysProcess_StateByIndex_Exception = kwsysProcess_State_Exception,
+ kwsysProcess_StateByIndex_Exited = kwsysProcess_State_Exited,
+ kwsysProcess_StateByIndex_Error = kwsysProcess_State_Error
+};
+
+/**
+ * When GetState returns "Exception", this method returns a
+ * platform-independent description of the exceptional behavior that
+ * caused the child to terminate abnormally. Possible exceptions are:
+ *
+ * kwsysProcess_Exception_None = No exceptional behavior occurred.
+ * kwsysProcess_Exception_Fault = Child crashed with a memory fault.
+ * kwsysProcess_Exception_Illegal = Child crashed with an illegal
+ * instruction.
+ * kwsysProcess_Exception_Interrupt = Child was interrupted by user
+ * (Cntl-C/Break).
+ * kwsysProcess_Exception_Numerical = Child crashed with a numerical
+ * exception.
+ * kwsysProcess_Exception_Other = Child terminated for another reason.
+ */
+kwsysEXPORT int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp,
+ int idx);
+
+/**
+ * When GetState returns "Exited" or "Exception", this method returns
+ * the platform-specific raw exit code of the process. UNIX platforms
+ * should use WIFEXITED/WEXITSTATUS and WIFSIGNALED/WTERMSIG to access
+ * this value. Windows users should compare the value to the various
+ * EXCEPTION_* values.
+ *
+ * If GetState returns "Exited", use GetExitValue to get the
+ * platform-independent child return value.
+ */
+kwsysEXPORT int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx);
+
+/**
+ * When GetState returns "Exited", this method returns the child's
+ * platform-independent exit code (such as the value returned by the
+ * child's main).
+ */
+kwsysEXPORT int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx);
+
+/**
+ * When GetState returns "Exception", this method returns a string
+ * describing the problem. Otherwise, it returns NULL.
+ */
+kwsysEXPORT const char* kwsysProcess_GetExceptionStringByIndex(
+ kwsysProcess* cp, int idx);
+
+/**
+ * Start executing the child process.
+ */
+kwsysEXPORT void kwsysProcess_Execute(kwsysProcess* cp);
+
+/**
+ * Stop management of a detached child process. This closes any pipes
+ * being read. If the child was not created with the
+ * kwsysProcess_Option_Detach option, this method does nothing. This
+ * is because disowning a non-detached process will cause the child
+ * exit signal to be left unhandled until this process exits.
+ */
+kwsysEXPORT void kwsysProcess_Disown(kwsysProcess* cp);
+
+/**
+ * Block until data are available on a pipe, a timeout expires, or the
+ * child process terminates. Arguments are as follows:
+ *
+ * data = If data are read, the pointer to which this points is
+ * set to point to the data.
+ * length = If data are read, the integer to which this points is
+ * set to the length of the data read.
+ * timeout = Specifies the maximum time this call may block. Upon
+ * return after reading data, the time elapsed is subtracted
+ * from the timeout value. If this timeout expires, the
+ * value is set to 0. A NULL pointer passed for this argument
+ * indicates no timeout for the call. A negative or zero
+ * value passed for this argument may be used for polling
+ * and will always return immediately.
+ *
+ * Return value will be one of:
+ *
+ * Pipe_None = No more data will be available from the child process,
+ * ( == 0) or no process has been executed. WaitForExit should
+ * be called to wait for the process to terminate.
+ * Pipe_STDOUT = Data have been read from the child's stdout pipe.
+ * Pipe_STDERR = Data have been read from the child's stderr pipe.
+ * Pipe_Timeout = No data available within timeout specified for the
+ * call. Time elapsed has been subtracted from timeout
+ * argument.
+ */
+kwsysEXPORT int kwsysProcess_WaitForData(kwsysProcess* cp, char** data,
+ int* length, double* timeout);
+enum kwsysProcess_Pipes_e
+{
+ kwsysProcess_Pipe_None,
+ kwsysProcess_Pipe_STDIN,
+ kwsysProcess_Pipe_STDOUT,
+ kwsysProcess_Pipe_STDERR,
+ kwsysProcess_Pipe_Timeout = 255
+};
+
+/**
+ * Block until the child process terminates or the given timeout
+ * expires. If no process is running, returns immediately. The
+ * argument is:
+ *
+ * timeout = Specifies the maximum time this call may block. Upon
+ * returning due to child termination, the elapsed time
+ * is subtracted from the given value. A NULL pointer
+ * passed for this argument indicates no timeout for the
+ * call.
+ *
+ * Return value will be one of:
+ *
+ * 0 = Child did not terminate within timeout specified for
+ * the call. Time elapsed has been subtracted from timeout
+ * argument.
+ * 1 = Child has terminated or was not running.
+ */
+kwsysEXPORT int kwsysProcess_WaitForExit(kwsysProcess* cp, double* timeout);
+
+/**
+ * Interrupt the process group for the child process that is currently
+ * running by sending it the appropriate operating-system specific signal.
+ * The caller should call WaitForExit after this returns to wait for the
+ * child to terminate.
+ *
+ * WARNING: If you didn't specify kwsysProcess_Option_CreateProcessGroup,
+ * you will interrupt your own process group.
+ */
+kwsysEXPORT void kwsysProcess_Interrupt(kwsysProcess* cp);
+
+/**
+ * Forcefully terminate the child process that is currently running.
+ * The caller should call WaitForExit after this returns to wait for
+ * the child to terminate.
+ */
+kwsysEXPORT void kwsysProcess_Kill(kwsysProcess* cp);
+
+/**
+ * Same as kwsysProcess_Kill using process ID to locate process to
+ * terminate.
+ * @see kwsysProcess_Kill(kwsysProcess* cp)
+ */
+kwsysEXPORT void kwsysProcess_KillPID(unsigned long);
+
+/**
+ * Reset the start time of the child process to the current time.
+ */
+kwsysEXPORT void kwsysProcess_ResetStartTime(kwsysProcess* cp);
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif
+
+/* If we are building a kwsys .c or .cxx file, let it use these macros.
+ Otherwise, undefine them to keep the namespace clean. */
+#if !defined(KWSYS_NAMESPACE)
+# undef kwsys_ns
+# undef kwsysEXPORT
+# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# undef kwsysProcess
+# undef kwsysProcess_s
+# undef kwsysProcess_New
+# undef kwsysProcess_Delete
+# undef kwsysProcess_SetCommand
+# undef kwsysProcess_AddCommand
+# undef kwsysProcess_SetTimeout
+# undef kwsysProcess_SetWorkingDirectory
+# undef kwsysProcess_SetPipeFile
+# undef kwsysProcess_SetPipeNative
+# undef kwsysProcess_SetPipeShared
+# undef kwsysProcess_Option_Detach
+# undef kwsysProcess_Option_HideWindow
+# undef kwsysProcess_Option_MergeOutput
+# undef kwsysProcess_Option_Verbatim
+# undef kwsysProcess_Option_CreateProcessGroup
+# undef kwsysProcess_GetOption
+# undef kwsysProcess_SetOption
+# undef kwsysProcess_Option_e
+# undef kwsysProcess_State_Starting
+# undef kwsysProcess_State_Error
+# undef kwsysProcess_State_Exception
+# undef kwsysProcess_State_Executing
+# undef kwsysProcess_State_Exited
+# undef kwsysProcess_State_Expired
+# undef kwsysProcess_State_Killed
+# undef kwsysProcess_State_Disowned
+# undef kwsysProcess_GetState
+# undef kwsysProcess_State_e
+# undef kwsysProcess_Exception_None
+# undef kwsysProcess_Exception_Fault
+# undef kwsysProcess_Exception_Illegal
+# undef kwsysProcess_Exception_Interrupt
+# undef kwsysProcess_Exception_Numerical
+# undef kwsysProcess_Exception_Other
+# undef kwsysProcess_GetExitException
+# undef kwsysProcess_Exception_e
+# undef kwsysProcess_GetExitCode
+# undef kwsysProcess_GetExitValue
+# undef kwsysProcess_GetErrorString
+# undef kwsysProcess_GetExceptionString
+# undef kwsysProcess_Execute
+# undef kwsysProcess_Disown
+# undef kwsysProcess_WaitForData
+# undef kwsysProcess_Pipes_e
+# undef kwsysProcess_Pipe_None
+# undef kwsysProcess_Pipe_STDIN
+# undef kwsysProcess_Pipe_STDOUT
+# undef kwsysProcess_Pipe_STDERR
+# undef kwsysProcess_Pipe_Timeout
+# undef kwsysProcess_Pipe_Handle
+# undef kwsysProcess_WaitForExit
+# undef kwsysProcess_Interrupt
+# undef kwsysProcess_Kill
+# undef kwsysProcess_ResetStartTime
+# endif
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/ProcessUNIX.c b/test/API/driver/kwsys/ProcessUNIX.c
new file mode 100644
index 0000000..100eddc
--- /dev/null
+++ b/test/API/driver/kwsys/ProcessUNIX.c
@@ -0,0 +1,2920 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Process.h)
+#include KWSYS_HEADER(System.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "Process.h.in"
+# include "System.h.in"
+#endif
+
+/*
+
+Implementation for UNIX
+
+On UNIX, a child process is forked to exec the program. Three output
+pipes are read by the parent process using a select call to block
+until data are ready. Two of the pipes are stdout and stderr for the
+child. The third is a special pipe populated by a signal handler to
+indicate that a child has terminated. This is used in conjunction
+with the timeout on the select call to implement a timeout for program
+even when it closes stdout and stderr and at the same time avoiding
+races.
+
+*/
+
+/*
+
+TODO:
+
+We cannot create the pipeline of processes in suspended states. How
+do we cleanup processes already started when one fails to load? Right
+now we are just killing them, which is probably not the right thing to
+do.
+
+*/
+
+#if defined(__CYGWIN__)
+/* Increase the file descriptor limit for select() before including
+ related system headers. (Default: 64) */
+# define FD_SETSIZE 16384
+#endif
+
+#include <assert.h> /* assert */
+#include <ctype.h> /* isspace */
+#include <dirent.h> /* DIR, dirent */
+#include <errno.h> /* errno */
+#include <fcntl.h> /* fcntl */
+#include <signal.h> /* sigaction */
+#include <stddef.h> /* ptrdiff_t */
+#include <stdio.h> /* snprintf */
+#include <stdlib.h> /* malloc, free */
+#include <string.h> /* strdup, strerror, memset */
+#include <sys/stat.h> /* open mode */
+#include <sys/time.h> /* struct timeval */
+#include <sys/types.h> /* pid_t, fd_set */
+#include <sys/wait.h> /* waitpid */
+#include <time.h> /* gettimeofday */
+#include <unistd.h> /* pipe, close, fork, execvp, select, _exit */
+
+#if defined(__VMS)
+# define KWSYSPE_VMS_NONBLOCK , O_NONBLOCK
+#else
+# define KWSYSPE_VMS_NONBLOCK
+#endif
+
+#if defined(KWSYS_C_HAS_PTRDIFF_T) && KWSYS_C_HAS_PTRDIFF_T
+typedef ptrdiff_t kwsysProcess_ptrdiff_t;
+#else
+typedef int kwsysProcess_ptrdiff_t;
+#endif
+
+#if defined(KWSYS_C_HAS_SSIZE_T) && KWSYS_C_HAS_SSIZE_T
+typedef ssize_t kwsysProcess_ssize_t;
+#else
+typedef int kwsysProcess_ssize_t;
+#endif
+
+#if defined(__BEOS__) && !defined(__ZETA__)
+/* BeOS 5 doesn't have usleep(), but it has snooze(), which is identical. */
+# include <be/kernel/OS.h>
+static inline void kwsysProcess_usleep(unsigned int msec)
+{
+ snooze(msec);
+}
+#else
+# define kwsysProcess_usleep usleep
+#endif
+
+/*
+ * BeOS's select() works like WinSock: it's for networking only, and
+ * doesn't work with Unix file handles...socket and file handles are
+ * different namespaces (the same descriptor means different things in
+ * each context!)
+ *
+ * So on Unix-like systems where select() is flakey, we'll set the
+ * pipes' file handles to be non-blocking and just poll them directly
+ * without select().
+ */
+#if !defined(__BEOS__) && !defined(__VMS) && !defined(__MINT__) && \
+ !defined(KWSYSPE_USE_SELECT)
+# define KWSYSPE_USE_SELECT 1
+#endif
+
+/* Some platforms do not have siginfo on their signal handlers. */
+#if defined(SA_SIGINFO) && !defined(__BEOS__)
+# define KWSYSPE_USE_SIGINFO 1
+#endif
+
+/* The number of pipes for the child's output. The standard stdout
+ and stderr pipes are the first two. One more pipe is used to
+ detect when the child process has terminated. The third pipe is
+ not given to the child process, so it cannot close it until it
+ terminates. */
+#define KWSYSPE_PIPE_COUNT 3
+#define KWSYSPE_PIPE_STDOUT 0
+#define KWSYSPE_PIPE_STDERR 1
+#define KWSYSPE_PIPE_SIGNAL 2
+
+/* The maximum amount to read from a pipe at a time. */
+#define KWSYSPE_PIPE_BUFFER_SIZE 1024
+
+/* Keep track of times using a signed representation. Switch to the
+ native (possibly unsigned) representation only when calling native
+ functions. */
+typedef struct timeval kwsysProcessTimeNative;
+typedef struct kwsysProcessTime_s kwsysProcessTime;
+struct kwsysProcessTime_s
+{
+ long tv_sec;
+ long tv_usec;
+};
+
+typedef struct kwsysProcessCreateInformation_s
+{
+ int StdIn;
+ int StdOut;
+ int StdErr;
+ int ErrorPipe[2];
+} kwsysProcessCreateInformation;
+
+static void kwsysProcessVolatileFree(volatile void* p);
+static int kwsysProcessInitialize(kwsysProcess* cp);
+static void kwsysProcessCleanup(kwsysProcess* cp, int error);
+static void kwsysProcessCleanupDescriptor(int* pfd);
+static void kwsysProcessClosePipes(kwsysProcess* cp);
+static int kwsysProcessSetNonBlocking(int fd);
+static int kwsysProcessCreate(kwsysProcess* cp, int prIndex,
+ kwsysProcessCreateInformation* si);
+static void kwsysProcessDestroy(kwsysProcess* cp);
+static int kwsysProcessSetupOutputPipeFile(int* p, const char* name);
+static int kwsysProcessSetupOutputPipeNative(int* p, int des[2]);
+static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout,
+ kwsysProcessTime* timeoutTime);
+static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime,
+ double* userTimeout,
+ kwsysProcessTimeNative* timeoutLength,
+ int zeroIsExpired);
+static kwsysProcessTime kwsysProcessTimeGetCurrent(void);
+static double kwsysProcessTimeToDouble(kwsysProcessTime t);
+static kwsysProcessTime kwsysProcessTimeFromDouble(double d);
+static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2);
+static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1,
+ kwsysProcessTime in2);
+static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1,
+ kwsysProcessTime in2);
+static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int sig,
+ int idx);
+static void kwsysProcessChildErrorExit(int errorPipe);
+static void kwsysProcessRestoreDefaultSignalHandlers(void);
+static pid_t kwsysProcessFork(kwsysProcess* cp,
+ kwsysProcessCreateInformation* si);
+static void kwsysProcessKill(pid_t process_id);
+#if defined(__VMS)
+static int kwsysProcessSetVMSFeature(const char* name, int value);
+#endif
+static int kwsysProcessesAdd(kwsysProcess* cp);
+static void kwsysProcessesRemove(kwsysProcess* cp);
+#if KWSYSPE_USE_SIGINFO
+static void kwsysProcessesSignalHandler(int signum, siginfo_t* info,
+ void* ucontext);
+#else
+static void kwsysProcessesSignalHandler(int signum);
+#endif
+
+/* A structure containing results data for each process. */
+typedef struct kwsysProcessResults_s kwsysProcessResults;
+struct kwsysProcessResults_s
+{
+ /* The status of the child process. */
+ int State;
+
+ /* The exceptional behavior that terminated the process, if any. */
+ int ExitException;
+
+ /* The process exit code. */
+ int ExitCode;
+
+ /* The process return code, if any. */
+ int ExitValue;
+
+ /* Description for the ExitException. */
+ char ExitExceptionString[KWSYSPE_PIPE_BUFFER_SIZE + 1];
+};
+
+/* Structure containing data used to implement the child's execution. */
+struct kwsysProcess_s
+{
+ /* The command lines to execute. */
+ char*** Commands;
+ volatile int NumberOfCommands;
+
+ /* Descriptors for the read ends of the child's output pipes and
+ the signal pipe. */
+ int PipeReadEnds[KWSYSPE_PIPE_COUNT];
+
+ /* Descriptors for the child's ends of the pipes.
+ Used temporarily during process creation. */
+ int PipeChildStd[3];
+
+ /* Write descriptor for child termination signal pipe. */
+ int SignalPipe;
+
+ /* Buffer for pipe data. */
+ char PipeBuffer[KWSYSPE_PIPE_BUFFER_SIZE];
+
+ /* Process IDs returned by the calls to fork. Everything is volatile
+ because the signal handler accesses them. You must be very careful
+ when reaping PIDs or modifying this array to avoid race conditions. */
+ volatile pid_t* volatile ForkPIDs;
+
+ /* Flag for whether the children were terminated by a failed select. */
+ int SelectError;
+
+ /* The timeout length. */
+ double Timeout;
+
+ /* The working directory for the process. */
+ char* WorkingDirectory;
+
+ /* Whether to create the child as a detached process. */
+ int OptionDetach;
+
+ /* Whether the child was created as a detached process. */
+ int Detached;
+
+ /* Whether to treat command lines as verbatim. */
+ int Verbatim;
+
+ /* Whether to merge stdout/stderr of the child. */
+ int MergeOutput;
+
+ /* Whether to create the process in a new process group. */
+ volatile sig_atomic_t CreateProcessGroup;
+
+ /* Time at which the child started. Negative for no timeout. */
+ kwsysProcessTime StartTime;
+
+ /* Time at which the child will timeout. Negative for no timeout. */
+ kwsysProcessTime TimeoutTime;
+
+ /* Flag for whether the timeout expired. */
+ int TimeoutExpired;
+
+ /* The number of pipes left open during execution. */
+ int PipesLeft;
+
+#if KWSYSPE_USE_SELECT
+ /* File descriptor set for call to select. */
+ fd_set PipeSet;
+#endif
+
+ /* The number of children still executing. */
+ int CommandsLeft;
+
+ /* The status of the process structure. Must be atomic because
+ the signal handler checks this to avoid a race. */
+ volatile sig_atomic_t State;
+
+ /* Whether the process was killed. */
+ volatile sig_atomic_t Killed;
+
+ /* Buffer for error message in case of failure. */
+ char ErrorMessage[KWSYSPE_PIPE_BUFFER_SIZE + 1];
+
+ /* process results. */
+ kwsysProcessResults* ProcessResults;
+
+ /* The exit codes of each child process in the pipeline. */
+ int* CommandExitCodes;
+
+ /* Name of files to which stdin and stdout pipes are attached. */
+ char* PipeFileSTDIN;
+ char* PipeFileSTDOUT;
+ char* PipeFileSTDERR;
+
+ /* Whether each pipe is shared with the parent process. */
+ int PipeSharedSTDIN;
+ int PipeSharedSTDOUT;
+ int PipeSharedSTDERR;
+
+ /* Native pipes provided by the user. */
+ int PipeNativeSTDIN[2];
+ int PipeNativeSTDOUT[2];
+ int PipeNativeSTDERR[2];
+
+ /* The real working directory of this process. */
+ int RealWorkingDirectoryLength;
+ char* RealWorkingDirectory;
+};
+
+kwsysProcess* kwsysProcess_New(void)
+{
+ /* Allocate a process control structure. */
+ kwsysProcess* cp = (kwsysProcess*)malloc(sizeof(kwsysProcess));
+ if (!cp) {
+ return 0;
+ }
+ memset(cp, 0, sizeof(kwsysProcess));
+
+ /* Share stdin with the parent process by default. */
+ cp->PipeSharedSTDIN = 1;
+
+ /* No native pipes by default. */
+ cp->PipeNativeSTDIN[0] = -1;
+ cp->PipeNativeSTDIN[1] = -1;
+ cp->PipeNativeSTDOUT[0] = -1;
+ cp->PipeNativeSTDOUT[1] = -1;
+ cp->PipeNativeSTDERR[0] = -1;
+ cp->PipeNativeSTDERR[1] = -1;
+
+ /* Set initial status. */
+ cp->State = kwsysProcess_State_Starting;
+
+ return cp;
+}
+
+void kwsysProcess_Delete(kwsysProcess* cp)
+{
+ /* Make sure we have an instance. */
+ if (!cp) {
+ return;
+ }
+
+ /* If the process is executing, wait for it to finish. */
+ if (cp->State == kwsysProcess_State_Executing) {
+ if (cp->Detached) {
+ kwsysProcess_Disown(cp);
+ } else {
+ kwsysProcess_WaitForExit(cp, 0);
+ }
+ }
+
+ /* Free memory. */
+ kwsysProcess_SetCommand(cp, 0);
+ kwsysProcess_SetWorkingDirectory(cp, 0);
+ kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDIN, 0);
+ kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDOUT, 0);
+ kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDERR, 0);
+ free(cp->CommandExitCodes);
+ free(cp->ProcessResults);
+ free(cp);
+}
+
+int kwsysProcess_SetCommand(kwsysProcess* cp, char const* const* command)
+{
+ int i;
+ if (!cp) {
+ return 0;
+ }
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ char** c = cp->Commands[i];
+ while (*c) {
+ free(*c++);
+ }
+ free(cp->Commands[i]);
+ }
+ cp->NumberOfCommands = 0;
+ if (cp->Commands) {
+ free(cp->Commands);
+ cp->Commands = 0;
+ }
+ if (command) {
+ return kwsysProcess_AddCommand(cp, command);
+ }
+ return 1;
+}
+
+int kwsysProcess_AddCommand(kwsysProcess* cp, char const* const* command)
+{
+ int newNumberOfCommands;
+ char*** newCommands;
+
+ /* Make sure we have a command to add. */
+ if (!cp || !command || !*command) {
+ return 0;
+ }
+
+ /* Allocate a new array for command pointers. */
+ newNumberOfCommands = cp->NumberOfCommands + 1;
+ if (!(newCommands =
+ (char***)malloc(sizeof(char**) * (size_t)(newNumberOfCommands)))) {
+ /* Out of memory. */
+ return 0;
+ }
+
+ /* Copy any existing commands into the new array. */
+ {
+ int i;
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ newCommands[i] = cp->Commands[i];
+ }
+ }
+
+ /* Add the new command. */
+ if (cp->Verbatim) {
+ /* In order to run the given command line verbatim we need to
+ parse it. */
+ newCommands[cp->NumberOfCommands] =
+ kwsysSystem_Parse_CommandForUnix(*command, 0);
+ if (!newCommands[cp->NumberOfCommands] ||
+ !newCommands[cp->NumberOfCommands][0]) {
+ /* Out of memory or no command parsed. */
+ free(newCommands);
+ return 0;
+ }
+ } else {
+ /* Copy each argument string individually. */
+ char const* const* c = command;
+ kwsysProcess_ptrdiff_t n = 0;
+ kwsysProcess_ptrdiff_t i = 0;
+ while (*c++)
+ ;
+ n = c - command - 1;
+ newCommands[cp->NumberOfCommands] =
+ (char**)malloc((size_t)(n + 1) * sizeof(char*));
+ if (!newCommands[cp->NumberOfCommands]) {
+ /* Out of memory. */
+ free(newCommands);
+ return 0;
+ }
+ for (i = 0; i < n; ++i) {
+ assert(command[i]); /* Quiet Clang scan-build. */
+ newCommands[cp->NumberOfCommands][i] = strdup(command[i]);
+ if (!newCommands[cp->NumberOfCommands][i]) {
+ break;
+ }
+ }
+ if (i < n) {
+ /* Out of memory. */
+ for (; i > 0; --i) {
+ free(newCommands[cp->NumberOfCommands][i - 1]);
+ }
+ free(newCommands);
+ return 0;
+ }
+ newCommands[cp->NumberOfCommands][n] = 0;
+ }
+
+ /* Successfully allocated new command array. Free the old array. */
+ free(cp->Commands);
+ cp->Commands = newCommands;
+ cp->NumberOfCommands = newNumberOfCommands;
+
+ return 1;
+}
+
+void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout)
+{
+ if (!cp) {
+ return;
+ }
+ cp->Timeout = timeout;
+ if (cp->Timeout < 0) {
+ cp->Timeout = 0;
+ }
+ // Force recomputation of TimeoutTime.
+ cp->TimeoutTime.tv_sec = -1;
+}
+
+int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp, const char* dir)
+{
+ if (!cp) {
+ return 0;
+ }
+ if (cp->WorkingDirectory == dir) {
+ return 1;
+ }
+ if (cp->WorkingDirectory && dir && strcmp(cp->WorkingDirectory, dir) == 0) {
+ return 1;
+ }
+ if (cp->WorkingDirectory) {
+ free(cp->WorkingDirectory);
+ cp->WorkingDirectory = 0;
+ }
+ if (dir) {
+ cp->WorkingDirectory = strdup(dir);
+ if (!cp->WorkingDirectory) {
+ return 0;
+ }
+ }
+ return 1;
+}
+
+int kwsysProcess_SetPipeFile(kwsysProcess* cp, int prPipe, const char* file)
+{
+ char** pfile;
+ if (!cp) {
+ return 0;
+ }
+ switch (prPipe) {
+ case kwsysProcess_Pipe_STDIN:
+ pfile = &cp->PipeFileSTDIN;
+ break;
+ case kwsysProcess_Pipe_STDOUT:
+ pfile = &cp->PipeFileSTDOUT;
+ break;
+ case kwsysProcess_Pipe_STDERR:
+ pfile = &cp->PipeFileSTDERR;
+ break;
+ default:
+ return 0;
+ }
+ if (*pfile) {
+ free(*pfile);
+ *pfile = 0;
+ }
+ if (file) {
+ *pfile = strdup(file);
+ if (!*pfile) {
+ return 0;
+ }
+ }
+
+ /* If we are redirecting the pipe, do not share it or use a native
+ pipe. */
+ if (*pfile) {
+ kwsysProcess_SetPipeNative(cp, prPipe, 0);
+ kwsysProcess_SetPipeShared(cp, prPipe, 0);
+ }
+ return 1;
+}
+
+void kwsysProcess_SetPipeShared(kwsysProcess* cp, int prPipe, int shared)
+{
+ if (!cp) {
+ return;
+ }
+
+ switch (prPipe) {
+ case kwsysProcess_Pipe_STDIN:
+ cp->PipeSharedSTDIN = shared ? 1 : 0;
+ break;
+ case kwsysProcess_Pipe_STDOUT:
+ cp->PipeSharedSTDOUT = shared ? 1 : 0;
+ break;
+ case kwsysProcess_Pipe_STDERR:
+ cp->PipeSharedSTDERR = shared ? 1 : 0;
+ break;
+ default:
+ return;
+ }
+
+ /* If we are sharing the pipe, do not redirect it to a file or use a
+ native pipe. */
+ if (shared) {
+ kwsysProcess_SetPipeFile(cp, prPipe, 0);
+ kwsysProcess_SetPipeNative(cp, prPipe, 0);
+ }
+}
+
+void kwsysProcess_SetPipeNative(kwsysProcess* cp, int prPipe, int p[2])
+{
+ int* pPipeNative = 0;
+
+ if (!cp) {
+ return;
+ }
+
+ switch (prPipe) {
+ case kwsysProcess_Pipe_STDIN:
+ pPipeNative = cp->PipeNativeSTDIN;
+ break;
+ case kwsysProcess_Pipe_STDOUT:
+ pPipeNative = cp->PipeNativeSTDOUT;
+ break;
+ case kwsysProcess_Pipe_STDERR:
+ pPipeNative = cp->PipeNativeSTDERR;
+ break;
+ default:
+ return;
+ }
+
+ /* Copy the native pipe descriptors provided. */
+ if (p) {
+ pPipeNative[0] = p[0];
+ pPipeNative[1] = p[1];
+ } else {
+ pPipeNative[0] = -1;
+ pPipeNative[1] = -1;
+ }
+
+ /* If we are using a native pipe, do not share it or redirect it to
+ a file. */
+ if (p) {
+ kwsysProcess_SetPipeFile(cp, prPipe, 0);
+ kwsysProcess_SetPipeShared(cp, prPipe, 0);
+ }
+}
+
+int kwsysProcess_GetOption(kwsysProcess* cp, int optionId)
+{
+ if (!cp) {
+ return 0;
+ }
+
+ switch (optionId) {
+ case kwsysProcess_Option_Detach:
+ return cp->OptionDetach;
+ case kwsysProcess_Option_MergeOutput:
+ return cp->MergeOutput;
+ case kwsysProcess_Option_Verbatim:
+ return cp->Verbatim;
+ case kwsysProcess_Option_CreateProcessGroup:
+ return cp->CreateProcessGroup;
+ default:
+ return 0;
+ }
+}
+
+void kwsysProcess_SetOption(kwsysProcess* cp, int optionId, int value)
+{
+ if (!cp) {
+ return;
+ }
+
+ switch (optionId) {
+ case kwsysProcess_Option_Detach:
+ cp->OptionDetach = value;
+ break;
+ case kwsysProcess_Option_MergeOutput:
+ cp->MergeOutput = value;
+ break;
+ case kwsysProcess_Option_Verbatim:
+ cp->Verbatim = value;
+ break;
+ case kwsysProcess_Option_CreateProcessGroup:
+ cp->CreateProcessGroup = value;
+ break;
+ default:
+ break;
+ }
+}
+
+int kwsysProcess_GetState(kwsysProcess* cp)
+{
+ return cp ? cp->State : kwsysProcess_State_Error;
+}
+
+int kwsysProcess_GetExitException(kwsysProcess* cp)
+{
+ return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0))
+ ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitException
+ : kwsysProcess_Exception_Other;
+}
+
+int kwsysProcess_GetExitCode(kwsysProcess* cp)
+{
+ return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0))
+ ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitCode
+ : 0;
+}
+
+int kwsysProcess_GetExitValue(kwsysProcess* cp)
+{
+ return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0))
+ ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitValue
+ : -1;
+}
+
+const char* kwsysProcess_GetErrorString(kwsysProcess* cp)
+{
+ if (!cp) {
+ return "Process management structure could not be allocated";
+ } else if (cp->State == kwsysProcess_State_Error) {
+ return cp->ErrorMessage;
+ }
+ return "Success";
+}
+
+const char* kwsysProcess_GetExceptionString(kwsysProcess* cp)
+{
+ if (!(cp && cp->ProcessResults && (cp->NumberOfCommands > 0))) {
+ return "GetExceptionString called with NULL process management structure";
+ } else if (cp->State == kwsysProcess_State_Exception) {
+ return cp->ProcessResults[cp->NumberOfCommands - 1].ExitExceptionString;
+ }
+ return "No exception";
+}
+
+/* the index should be in array bound. */
+#define KWSYSPE_IDX_CHK(RET) \
+ if (!cp || idx >= cp->NumberOfCommands || idx < 0) { \
+ return RET; \
+ }
+
+int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK(kwsysProcess_State_Error)
+ return cp->ProcessResults[idx].State;
+}
+
+int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK(kwsysProcess_Exception_Other)
+ return cp->ProcessResults[idx].ExitException;
+}
+
+int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK(-1)
+ return cp->ProcessResults[idx].ExitValue;
+}
+
+int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK(-1)
+ return cp->CommandExitCodes[idx];
+}
+
+const char* kwsysProcess_GetExceptionStringByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK("GetExceptionString called with NULL process management "
+ "structure or index out of bound")
+ if (cp->ProcessResults[idx].State == kwsysProcess_StateByIndex_Exception) {
+ return cp->ProcessResults[idx].ExitExceptionString;
+ }
+ return "No exception";
+}
+
+#undef KWSYSPE_IDX_CHK
+
+void kwsysProcess_Execute(kwsysProcess* cp)
+{
+ int i;
+
+ /* Do not execute a second copy simultaneously. */
+ if (!cp || cp->State == kwsysProcess_State_Executing) {
+ return;
+ }
+
+ /* Make sure we have something to run. */
+ if (cp->NumberOfCommands < 1) {
+ strcpy(cp->ErrorMessage, "No command");
+ cp->State = kwsysProcess_State_Error;
+ return;
+ }
+
+ /* Initialize the control structure for a new process. */
+ if (!kwsysProcessInitialize(cp)) {
+ strcpy(cp->ErrorMessage, "Out of memory");
+ cp->State = kwsysProcess_State_Error;
+ return;
+ }
+
+#if defined(__VMS)
+ /* Make sure pipes behave like streams on VMS. */
+ if (!kwsysProcessSetVMSFeature("DECC$STREAM_PIPE", 1)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+#endif
+
+ /* Save the real working directory of this process and change to
+ the working directory for the child processes. This is needed
+ to make pipe file paths evaluate correctly. */
+ if (cp->WorkingDirectory) {
+ int r;
+ if (!getcwd(cp->RealWorkingDirectory,
+ (size_t)(cp->RealWorkingDirectoryLength))) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+
+ /* Some platforms specify that the chdir call may be
+ interrupted. Repeat the call until it finishes. */
+ while (((r = chdir(cp->WorkingDirectory)) < 0) && (errno == EINTR))
+ ;
+ if (r < 0) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ }
+
+ /* If not running a detached child, add this object to the global
+ set of process objects that wish to be notified when a child
+ exits. */
+ if (!cp->OptionDetach) {
+ if (!kwsysProcessesAdd(cp)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ }
+
+ /* Setup the stdin pipe for the first process. */
+ if (cp->PipeFileSTDIN) {
+ /* Open a file for the child's stdin to read. */
+ cp->PipeChildStd[0] = open(cp->PipeFileSTDIN, O_RDONLY);
+ if (cp->PipeChildStd[0] < 0) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+
+ /* Set close-on-exec flag on the pipe's end. */
+ if (fcntl(cp->PipeChildStd[0], F_SETFD, FD_CLOEXEC) < 0) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ } else if (cp->PipeSharedSTDIN) {
+ cp->PipeChildStd[0] = 0;
+ } else if (cp->PipeNativeSTDIN[0] >= 0) {
+ cp->PipeChildStd[0] = cp->PipeNativeSTDIN[0];
+
+ /* Set close-on-exec flag on the pipe's ends. The read end will
+ be dup2-ed into the stdin descriptor after the fork but before
+ the exec. */
+ if ((fcntl(cp->PipeNativeSTDIN[0], F_SETFD, FD_CLOEXEC) < 0) ||
+ (fcntl(cp->PipeNativeSTDIN[1], F_SETFD, FD_CLOEXEC) < 0)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ } else {
+ cp->PipeChildStd[0] = -1;
+ }
+
+ /* Create the output pipe for the last process.
+ We always create this so the pipe can be passed to select even if
+ it will report closed immediately. */
+ {
+ /* Create the pipe. */
+ int p[2];
+ if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+
+ /* Store the pipe. */
+ cp->PipeReadEnds[KWSYSPE_PIPE_STDOUT] = p[0];
+ cp->PipeChildStd[1] = p[1];
+
+ /* Set close-on-exec flag on the pipe's ends. */
+ if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) ||
+ (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+
+ /* Set to non-blocking in case select lies, or for the polling
+ implementation. */
+ if (!kwsysProcessSetNonBlocking(p[0])) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ }
+
+ if (cp->PipeFileSTDOUT) {
+ /* Use a file for stdout. */
+ if (!kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[1],
+ cp->PipeFileSTDOUT)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ } else if (cp->PipeSharedSTDOUT) {
+ /* Use the parent stdout. */
+ kwsysProcessCleanupDescriptor(&cp->PipeChildStd[1]);
+ cp->PipeChildStd[1] = 1;
+ } else if (cp->PipeNativeSTDOUT[1] >= 0) {
+ /* Use the given descriptor for stdout. */
+ if (!kwsysProcessSetupOutputPipeNative(&cp->PipeChildStd[1],
+ cp->PipeNativeSTDOUT)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ }
+
+ /* Create stderr pipe to be shared by all processes in the pipeline.
+ We always create this so the pipe can be passed to select even if
+ it will report closed immediately. */
+ {
+ /* Create the pipe. */
+ int p[2];
+ if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+
+ /* Store the pipe. */
+ cp->PipeReadEnds[KWSYSPE_PIPE_STDERR] = p[0];
+ cp->PipeChildStd[2] = p[1];
+
+ /* Set close-on-exec flag on the pipe's ends. */
+ if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) ||
+ (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+
+ /* Set to non-blocking in case select lies, or for the polling
+ implementation. */
+ if (!kwsysProcessSetNonBlocking(p[0])) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ }
+
+ if (cp->PipeFileSTDERR) {
+ /* Use a file for stderr. */
+ if (!kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[2],
+ cp->PipeFileSTDERR)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ } else if (cp->PipeSharedSTDERR) {
+ /* Use the parent stderr. */
+ kwsysProcessCleanupDescriptor(&cp->PipeChildStd[2]);
+ cp->PipeChildStd[2] = 2;
+ } else if (cp->PipeNativeSTDERR[1] >= 0) {
+ /* Use the given handle for stderr. */
+ if (!kwsysProcessSetupOutputPipeNative(&cp->PipeChildStd[2],
+ cp->PipeNativeSTDERR)) {
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ }
+
+ /* The timeout period starts now. */
+ cp->StartTime = kwsysProcessTimeGetCurrent();
+ cp->TimeoutTime.tv_sec = -1;
+ cp->TimeoutTime.tv_usec = -1;
+
+ /* Create the pipeline of processes. */
+ {
+ kwsysProcessCreateInformation si = { -1, -1, -1, { -1, -1 } };
+ int nextStdIn = cp->PipeChildStd[0];
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ /* Setup the process's pipes. */
+ si.StdIn = nextStdIn;
+ if (i == cp->NumberOfCommands - 1) {
+ nextStdIn = -1;
+ si.StdOut = cp->PipeChildStd[1];
+ } else {
+ /* Create a pipe to sit between the children. */
+ int p[2] = { -1, -1 };
+ if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) {
+ if (nextStdIn != cp->PipeChildStd[0]) {
+ kwsysProcessCleanupDescriptor(&nextStdIn);
+ }
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+
+ /* Set close-on-exec flag on the pipe's ends. */
+ if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) ||
+ (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) {
+ close(p[0]);
+ close(p[1]);
+ if (nextStdIn != cp->PipeChildStd[0]) {
+ kwsysProcessCleanupDescriptor(&nextStdIn);
+ }
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ nextStdIn = p[0];
+ si.StdOut = p[1];
+ }
+ si.StdErr = cp->MergeOutput ? cp->PipeChildStd[1] : cp->PipeChildStd[2];
+
+ {
+ int res = kwsysProcessCreate(cp, i, &si);
+
+ /* Close our copies of pipes used between children. */
+ if (si.StdIn != cp->PipeChildStd[0]) {
+ kwsysProcessCleanupDescriptor(&si.StdIn);
+ }
+ if (si.StdOut != cp->PipeChildStd[1]) {
+ kwsysProcessCleanupDescriptor(&si.StdOut);
+ }
+ if (si.StdErr != cp->PipeChildStd[2] && !cp->MergeOutput) {
+ kwsysProcessCleanupDescriptor(&si.StdErr);
+ }
+
+ if (!res) {
+ kwsysProcessCleanupDescriptor(&si.ErrorPipe[0]);
+ kwsysProcessCleanupDescriptor(&si.ErrorPipe[1]);
+ if (nextStdIn != cp->PipeChildStd[0]) {
+ kwsysProcessCleanupDescriptor(&nextStdIn);
+ }
+ kwsysProcessCleanup(cp, 1);
+ return;
+ }
+ }
+ }
+ }
+
+ /* The parent process does not need the child's pipe ends. */
+ for (i = 0; i < 3; ++i) {
+ kwsysProcessCleanupDescriptor(&cp->PipeChildStd[i]);
+ }
+
+ /* Restore the working directory. */
+ if (cp->RealWorkingDirectory) {
+ /* Some platforms specify that the chdir call may be
+ interrupted. Repeat the call until it finishes. */
+ while ((chdir(cp->RealWorkingDirectory) < 0) && (errno == EINTR))
+ ;
+ free(cp->RealWorkingDirectory);
+ cp->RealWorkingDirectory = 0;
+ }
+
+ /* All the pipes are now open. */
+ cp->PipesLeft = KWSYSPE_PIPE_COUNT;
+
+ /* The process has now started. */
+ cp->State = kwsysProcess_State_Executing;
+ cp->Detached = cp->OptionDetach;
+}
+
+kwsysEXPORT void kwsysProcess_Disown(kwsysProcess* cp)
+{
+ /* Make sure a detached child process is running. */
+ if (!cp || !cp->Detached || cp->State != kwsysProcess_State_Executing ||
+ cp->TimeoutExpired || cp->Killed) {
+ return;
+ }
+
+ /* Close all the pipes safely. */
+ kwsysProcessClosePipes(cp);
+
+ /* We will not wait for exit, so cleanup now. */
+ kwsysProcessCleanup(cp, 0);
+
+ /* The process has been disowned. */
+ cp->State = kwsysProcess_State_Disowned;
+}
+
+typedef struct kwsysProcessWaitData_s
+{
+ int Expired;
+ int PipeId;
+ int User;
+ double* UserTimeout;
+ kwsysProcessTime TimeoutTime;
+} kwsysProcessWaitData;
+static int kwsysProcessWaitForPipe(kwsysProcess* cp, char** data, int* length,
+ kwsysProcessWaitData* wd);
+
+int kwsysProcess_WaitForData(kwsysProcess* cp, char** data, int* length,
+ double* userTimeout)
+{
+ kwsysProcessTime userStartTime = { 0, 0 };
+ kwsysProcessWaitData wd = { 0, kwsysProcess_Pipe_None, 0, 0, { 0, 0 } };
+ wd.UserTimeout = userTimeout;
+ /* Make sure we are executing a process. */
+ if (!cp || cp->State != kwsysProcess_State_Executing || cp->Killed ||
+ cp->TimeoutExpired) {
+ return kwsysProcess_Pipe_None;
+ }
+
+ /* Record the time at which user timeout period starts. */
+ if (userTimeout) {
+ userStartTime = kwsysProcessTimeGetCurrent();
+ }
+
+ /* Calculate the time at which a timeout will expire, and whether it
+ is the user or process timeout. */
+ wd.User = kwsysProcessGetTimeoutTime(cp, userTimeout, &wd.TimeoutTime);
+
+ /* Data can only be available when pipes are open. If the process
+ is not running, cp->PipesLeft will be 0. */
+ while (cp->PipesLeft > 0 &&
+ !kwsysProcessWaitForPipe(cp, data, length, &wd)) {
+ }
+
+ /* Update the user timeout. */
+ if (userTimeout) {
+ kwsysProcessTime userEndTime = kwsysProcessTimeGetCurrent();
+ kwsysProcessTime difference =
+ kwsysProcessTimeSubtract(userEndTime, userStartTime);
+ double d = kwsysProcessTimeToDouble(difference);
+ *userTimeout -= d;
+ if (*userTimeout < 0) {
+ *userTimeout = 0;
+ }
+ }
+
+ /* Check what happened. */
+ if (wd.PipeId) {
+ /* Data are ready on a pipe. */
+ return wd.PipeId;
+ } else if (wd.Expired) {
+ /* A timeout has expired. */
+ if (wd.User) {
+ /* The user timeout has expired. It has no time left. */
+ return kwsysProcess_Pipe_Timeout;
+ } else {
+ /* The process timeout has expired. Kill the children now. */
+ kwsysProcess_Kill(cp);
+ cp->Killed = 0;
+ cp->TimeoutExpired = 1;
+ return kwsysProcess_Pipe_None;
+ }
+ } else {
+ /* No pipes are left open. */
+ return kwsysProcess_Pipe_None;
+ }
+}
+
+static int kwsysProcessWaitForPipe(kwsysProcess* cp, char** data, int* length,
+ kwsysProcessWaitData* wd)
+{
+ int i;
+ kwsysProcessTimeNative timeoutLength;
+
+#if KWSYSPE_USE_SELECT
+ int numReady = 0;
+ int max = -1;
+ kwsysProcessTimeNative* timeout = 0;
+
+ /* Check for any open pipes with data reported ready by the last
+ call to select. According to "man select_tut" we must deal
+ with all descriptors reported by a call to select before
+ passing them to another select call. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ if (cp->PipeReadEnds[i] >= 0 &&
+ FD_ISSET(cp->PipeReadEnds[i], &cp->PipeSet)) {
+ kwsysProcess_ssize_t n;
+
+ /* We are handling this pipe now. Remove it from the set. */
+ FD_CLR(cp->PipeReadEnds[i], &cp->PipeSet);
+
+ /* The pipe is ready to read without blocking. Keep trying to
+ read until the operation is not interrupted. */
+ while (((n = read(cp->PipeReadEnds[i], cp->PipeBuffer,
+ KWSYSPE_PIPE_BUFFER_SIZE)) < 0) &&
+ (errno == EINTR))
+ ;
+ if (n > 0) {
+ /* We have data on this pipe. */
+ if (i == KWSYSPE_PIPE_SIGNAL) {
+ /* A child process has terminated. */
+ kwsysProcessDestroy(cp);
+ } else if (data && length) {
+ /* Report this data. */
+ *data = cp->PipeBuffer;
+ *length = (int)(n);
+ switch (i) {
+ case KWSYSPE_PIPE_STDOUT:
+ wd->PipeId = kwsysProcess_Pipe_STDOUT;
+ break;
+ case KWSYSPE_PIPE_STDERR:
+ wd->PipeId = kwsysProcess_Pipe_STDERR;
+ break;
+ }
+ return 1;
+ }
+ } else if (n < 0 && errno == EAGAIN) {
+ /* No data are really ready. The select call lied. See the
+ "man select" page on Linux for cases when this occurs. */
+ } else {
+ /* We are done reading from this pipe. */
+ kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]);
+ --cp->PipesLeft;
+ }
+ }
+ }
+
+ /* If we have data, break early. */
+ if (wd->PipeId) {
+ return 1;
+ }
+
+ /* Make sure the set is empty (it should always be empty here
+ anyway). */
+ FD_ZERO(&cp->PipeSet);
+
+ /* Setup a timeout if required. */
+ if (wd->TimeoutTime.tv_sec < 0) {
+ timeout = 0;
+ } else {
+ timeout = &timeoutLength;
+ }
+ if (kwsysProcessGetTimeoutLeft(
+ &wd->TimeoutTime, wd->User ? wd->UserTimeout : 0, &timeoutLength, 0)) {
+ /* Timeout has already expired. */
+ wd->Expired = 1;
+ return 1;
+ }
+
+ /* Add the pipe reading ends that are still open. */
+ max = -1;
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ if (cp->PipeReadEnds[i] >= 0) {
+ FD_SET(cp->PipeReadEnds[i], &cp->PipeSet);
+ if (cp->PipeReadEnds[i] > max) {
+ max = cp->PipeReadEnds[i];
+ }
+ }
+ }
+
+ /* Make sure we have a non-empty set. */
+ if (max < 0) {
+ /* All pipes have closed. Child has terminated. */
+ return 1;
+ }
+
+ /* Run select to block until data are available. Repeat call
+ until it is not interrupted. */
+ while (((numReady = select(max + 1, &cp->PipeSet, 0, 0, timeout)) < 0) &&
+ (errno == EINTR))
+ ;
+
+ /* Check result of select. */
+ if (numReady == 0) {
+ /* Select's timeout expired. */
+ wd->Expired = 1;
+ return 1;
+ } else if (numReady < 0) {
+ /* Select returned an error. Leave the error description in the
+ pipe buffer. */
+ strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE);
+
+ /* Kill the children now. */
+ kwsysProcess_Kill(cp);
+ cp->Killed = 0;
+ cp->SelectError = 1;
+ }
+
+ return 0;
+#else
+ /* Poll pipes for data since we do not have select. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ if (cp->PipeReadEnds[i] >= 0) {
+ const int fd = cp->PipeReadEnds[i];
+ int n = read(fd, cp->PipeBuffer, KWSYSPE_PIPE_BUFFER_SIZE);
+ if (n > 0) {
+ /* We have data on this pipe. */
+ if (i == KWSYSPE_PIPE_SIGNAL) {
+ /* A child process has terminated. */
+ kwsysProcessDestroy(cp);
+ } else if (data && length) {
+ /* Report this data. */
+ *data = cp->PipeBuffer;
+ *length = n;
+ switch (i) {
+ case KWSYSPE_PIPE_STDOUT:
+ wd->PipeId = kwsysProcess_Pipe_STDOUT;
+ break;
+ case KWSYSPE_PIPE_STDERR:
+ wd->PipeId = kwsysProcess_Pipe_STDERR;
+ break;
+ };
+ }
+ return 1;
+ } else if (n == 0) /* EOF */
+ {
+/* We are done reading from this pipe. */
+# if defined(__VMS)
+ if (!cp->CommandsLeft)
+# endif
+ {
+ kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]);
+ --cp->PipesLeft;
+ }
+ } else if (n < 0) /* error */
+ {
+# if defined(__VMS)
+ if (!cp->CommandsLeft) {
+ kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]);
+ --cp->PipesLeft;
+ } else
+# endif
+ if ((errno != EINTR) && (errno != EAGAIN)) {
+ strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE);
+ /* Kill the children now. */
+ kwsysProcess_Kill(cp);
+ cp->Killed = 0;
+ cp->SelectError = 1;
+ return 1;
+ }
+ }
+ }
+ }
+
+ /* If we have data, break early. */
+ if (wd->PipeId) {
+ return 1;
+ }
+
+ if (kwsysProcessGetTimeoutLeft(
+ &wd->TimeoutTime, wd->User ? wd->UserTimeout : 0, &timeoutLength, 1)) {
+ /* Timeout has already expired. */
+ wd->Expired = 1;
+ return 1;
+ }
+
+ /* Sleep a little, try again. */
+ {
+ unsigned int msec =
+ ((timeoutLength.tv_sec * 1000) + (timeoutLength.tv_usec / 1000));
+ if (msec > 100000) {
+ msec = 100000; /* do not sleep more than 100 milliseconds at a time */
+ }
+ kwsysProcess_usleep(msec);
+ }
+ return 0;
+#endif
+}
+
+int kwsysProcess_WaitForExit(kwsysProcess* cp, double* userTimeout)
+{
+ int prPipe = 0;
+
+ /* Make sure we are executing a process. */
+ if (!cp || cp->State != kwsysProcess_State_Executing) {
+ return 1;
+ }
+
+ /* Wait for all the pipes to close. Ignore all data. */
+ while ((prPipe = kwsysProcess_WaitForData(cp, 0, 0, userTimeout)) > 0) {
+ if (prPipe == kwsysProcess_Pipe_Timeout) {
+ return 0;
+ }
+ }
+
+ /* Check if there was an error in one of the waitpid calls. */
+ if (cp->State == kwsysProcess_State_Error) {
+ /* The error message is already in its buffer. Tell
+ kwsysProcessCleanup to not create it. */
+ kwsysProcessCleanup(cp, 0);
+ return 1;
+ }
+
+ /* Check whether the child reported an error invoking the process. */
+ if (cp->SelectError) {
+ /* The error message is already in its buffer. Tell
+ kwsysProcessCleanup to not create it. */
+ kwsysProcessCleanup(cp, 0);
+ cp->State = kwsysProcess_State_Error;
+ return 1;
+ }
+ /* Determine the outcome. */
+ if (cp->Killed) {
+ /* We killed the child. */
+ cp->State = kwsysProcess_State_Killed;
+ } else if (cp->TimeoutExpired) {
+ /* The timeout expired. */
+ cp->State = kwsysProcess_State_Expired;
+ } else {
+ /* The children exited. Report the outcome of the child processes. */
+ for (prPipe = 0; prPipe < cp->NumberOfCommands; ++prPipe) {
+ cp->ProcessResults[prPipe].ExitCode = cp->CommandExitCodes[prPipe];
+ if (WIFEXITED(cp->ProcessResults[prPipe].ExitCode)) {
+ /* The child exited normally. */
+ cp->ProcessResults[prPipe].State = kwsysProcess_StateByIndex_Exited;
+ cp->ProcessResults[prPipe].ExitException = kwsysProcess_Exception_None;
+ cp->ProcessResults[prPipe].ExitValue =
+ (int)WEXITSTATUS(cp->ProcessResults[prPipe].ExitCode);
+ } else if (WIFSIGNALED(cp->ProcessResults[prPipe].ExitCode)) {
+ /* The child received an unhandled signal. */
+ cp->ProcessResults[prPipe].State = kwsysProcess_State_Exception;
+ kwsysProcessSetExitExceptionByIndex(
+ cp, (int)WTERMSIG(cp->ProcessResults[prPipe].ExitCode), prPipe);
+ } else {
+ /* Error getting the child return code. */
+ strcpy(cp->ProcessResults[prPipe].ExitExceptionString,
+ "Error getting child return code.");
+ cp->ProcessResults[prPipe].State = kwsysProcess_StateByIndex_Error;
+ }
+ }
+ /* support legacy state status value */
+ cp->State = cp->ProcessResults[cp->NumberOfCommands - 1].State;
+ }
+ /* Normal cleanup. */
+ kwsysProcessCleanup(cp, 0);
+ return 1;
+}
+
+void kwsysProcess_Interrupt(kwsysProcess* cp)
+{
+ int i;
+ /* Make sure we are executing a process. */
+ if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired ||
+ cp->Killed) {
+ return;
+ }
+
+ /* Interrupt the children. */
+ if (cp->CreateProcessGroup) {
+ if (cp->ForkPIDs) {
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ /* Make sure the PID is still valid. */
+ if (cp->ForkPIDs[i]) {
+ /* The user created a process group for this process. The group ID
+ is the process ID for the original process in the group. */
+ kill(-cp->ForkPIDs[i], SIGINT);
+ }
+ }
+ }
+ } else {
+ /* No process group was created. Kill our own process group.
+ NOTE: While one could argue that we could call kill(cp->ForkPIDs[i],
+ SIGINT) as a way to still interrupt the process even though it's not in
+ a special group, this is not an option on Windows. Therefore, we kill
+ the current process group for consistency with Windows. */
+ kill(0, SIGINT);
+ }
+}
+
+void kwsysProcess_Kill(kwsysProcess* cp)
+{
+ int i;
+
+ /* Make sure we are executing a process. */
+ if (!cp || cp->State != kwsysProcess_State_Executing) {
+ return;
+ }
+
+ /* First close the child exit report pipe write end to avoid causing a
+ SIGPIPE when the child terminates and our signal handler tries to
+ report it after we have already closed the read end. */
+ kwsysProcessCleanupDescriptor(&cp->SignalPipe);
+
+#if !defined(__APPLE__)
+ /* Close all the pipe read ends. Do this before killing the
+ children because Cygwin has problems killing processes that are
+ blocking to wait for writing to their output pipes. */
+ kwsysProcessClosePipes(cp);
+#endif
+
+ /* Kill the children. */
+ cp->Killed = 1;
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ int status;
+ if (cp->ForkPIDs[i]) {
+ /* Kill the child. */
+ kwsysProcessKill(cp->ForkPIDs[i]);
+
+ /* Reap the child. Keep trying until the call is not
+ interrupted. */
+ while ((waitpid(cp->ForkPIDs[i], &status, 0) < 0) && (errno == EINTR))
+ ;
+ }
+ }
+
+#if defined(__APPLE__)
+ /* Close all the pipe read ends. Do this after killing the
+ children because OS X has problems closing pipe read ends whose
+ pipes are full and still have an open write end. */
+ kwsysProcessClosePipes(cp);
+#endif
+
+ cp->CommandsLeft = 0;
+}
+
+/* Call the free() function with a pointer to volatile without causing
+ compiler warnings. */
+static void kwsysProcessVolatileFree(volatile void* p)
+{
+/* clang has made it impossible to free memory that points to volatile
+ without first using special pragmas to disable a warning... */
+#if defined(__clang__) && !defined(__INTEL_COMPILER)
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wcast-qual"
+#endif
+ free((void*)p); /* The cast will silence most compilers, but not clang. */
+#if defined(__clang__) && !defined(__INTEL_COMPILER)
+# pragma clang diagnostic pop
+#endif
+}
+
+/* Initialize a process control structure for kwsysProcess_Execute. */
+static int kwsysProcessInitialize(kwsysProcess* cp)
+{
+ int i;
+ volatile pid_t* oldForkPIDs;
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ cp->PipeReadEnds[i] = -1;
+ }
+ for (i = 0; i < 3; ++i) {
+ cp->PipeChildStd[i] = -1;
+ }
+ cp->SignalPipe = -1;
+ cp->SelectError = 0;
+ cp->StartTime.tv_sec = -1;
+ cp->StartTime.tv_usec = -1;
+ cp->TimeoutTime.tv_sec = -1;
+ cp->TimeoutTime.tv_usec = -1;
+ cp->TimeoutExpired = 0;
+ cp->PipesLeft = 0;
+ cp->CommandsLeft = 0;
+#if KWSYSPE_USE_SELECT
+ FD_ZERO(&cp->PipeSet);
+#endif
+ cp->State = kwsysProcess_State_Starting;
+ cp->Killed = 0;
+ cp->ErrorMessage[0] = 0;
+
+ oldForkPIDs = cp->ForkPIDs;
+ cp->ForkPIDs = (volatile pid_t*)malloc(sizeof(volatile pid_t) *
+ (size_t)(cp->NumberOfCommands));
+ kwsysProcessVolatileFree(oldForkPIDs);
+ if (!cp->ForkPIDs) {
+ return 0;
+ }
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ cp->ForkPIDs[i] = 0; /* can't use memset due to volatile */
+ }
+
+ free(cp->CommandExitCodes);
+ cp->CommandExitCodes =
+ (int*)malloc(sizeof(int) * (size_t)(cp->NumberOfCommands));
+ if (!cp->CommandExitCodes) {
+ return 0;
+ }
+ memset(cp->CommandExitCodes, 0,
+ sizeof(int) * (size_t)(cp->NumberOfCommands));
+
+ /* Allocate process result information for each process. */
+ free(cp->ProcessResults);
+ cp->ProcessResults = (kwsysProcessResults*)malloc(
+ sizeof(kwsysProcessResults) * (size_t)(cp->NumberOfCommands));
+ if (!cp->ProcessResults) {
+ return 0;
+ }
+ memset(cp->ProcessResults, 0,
+ sizeof(kwsysProcessResults) * (size_t)(cp->NumberOfCommands));
+ for (i = 0; i < cp->NumberOfCommands; i++) {
+ cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None;
+ cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Starting;
+ cp->ProcessResults[i].ExitCode = 1;
+ cp->ProcessResults[i].ExitValue = 1;
+ strcpy(cp->ProcessResults[i].ExitExceptionString, "No exception");
+ }
+
+ /* Allocate memory to save the real working directory. */
+ if (cp->WorkingDirectory) {
+#if defined(MAXPATHLEN)
+ cp->RealWorkingDirectoryLength = MAXPATHLEN;
+#elif defined(PATH_MAX)
+ cp->RealWorkingDirectoryLength = PATH_MAX;
+#else
+ cp->RealWorkingDirectoryLength = 4096;
+#endif
+ cp->RealWorkingDirectory =
+ (char*)malloc((size_t)(cp->RealWorkingDirectoryLength));
+ if (!cp->RealWorkingDirectory) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/* Free all resources used by the given kwsysProcess instance that were
+ allocated by kwsysProcess_Execute. */
+static void kwsysProcessCleanup(kwsysProcess* cp, int error)
+{
+ int i;
+
+ if (error) {
+ /* We are cleaning up due to an error. Report the error message
+ if one has not been provided already. */
+ if (cp->ErrorMessage[0] == 0) {
+ strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE);
+ }
+
+ /* Set the error state. */
+ cp->State = kwsysProcess_State_Error;
+
+ /* Kill any children already started. */
+ if (cp->ForkPIDs) {
+ int status;
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ if (cp->ForkPIDs[i]) {
+ /* Kill the child. */
+ kwsysProcessKill(cp->ForkPIDs[i]);
+
+ /* Reap the child. Keep trying until the call is not
+ interrupted. */
+ while ((waitpid(cp->ForkPIDs[i], &status, 0) < 0) &&
+ (errno == EINTR))
+ ;
+ }
+ }
+ }
+
+ /* Restore the working directory. */
+ if (cp->RealWorkingDirectory) {
+ while ((chdir(cp->RealWorkingDirectory) < 0) && (errno == EINTR))
+ ;
+ }
+ }
+
+ /* If not creating a detached child, remove this object from the
+ global set of process objects that wish to be notified when a
+ child exits. */
+ if (!cp->OptionDetach) {
+ kwsysProcessesRemove(cp);
+ }
+
+ /* Free memory. */
+ if (cp->ForkPIDs) {
+ kwsysProcessVolatileFree(cp->ForkPIDs);
+ cp->ForkPIDs = 0;
+ }
+ if (cp->RealWorkingDirectory) {
+ free(cp->RealWorkingDirectory);
+ cp->RealWorkingDirectory = 0;
+ }
+
+ /* Close pipe handles. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]);
+ }
+ for (i = 0; i < 3; ++i) {
+ kwsysProcessCleanupDescriptor(&cp->PipeChildStd[i]);
+ }
+}
+
+/* Close the given file descriptor if it is open. Reset its value to -1. */
+static void kwsysProcessCleanupDescriptor(int* pfd)
+{
+ if (pfd && *pfd > 2) {
+ /* Keep trying to close until it is not interrupted by a
+ * signal. */
+ while ((close(*pfd) < 0) && (errno == EINTR))
+ ;
+ *pfd = -1;
+ }
+}
+
+static void kwsysProcessClosePipes(kwsysProcess* cp)
+{
+ int i;
+
+ /* Close any pipes that are still open. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ if (cp->PipeReadEnds[i] >= 0) {
+#if KWSYSPE_USE_SELECT
+ /* If the pipe was reported by the last call to select, we must
+ read from it. This is needed to satisfy the suggestions from
+ "man select_tut" and is not needed for the polling
+ implementation. Ignore the data. */
+ if (FD_ISSET(cp->PipeReadEnds[i], &cp->PipeSet)) {
+ /* We are handling this pipe now. Remove it from the set. */
+ FD_CLR(cp->PipeReadEnds[i], &cp->PipeSet);
+
+ /* The pipe is ready to read without blocking. Keep trying to
+ read until the operation is not interrupted. */
+ while ((read(cp->PipeReadEnds[i], cp->PipeBuffer,
+ KWSYSPE_PIPE_BUFFER_SIZE) < 0) &&
+ (errno == EINTR))
+ ;
+ }
+#endif
+
+ /* We are done reading from this pipe. */
+ kwsysProcessCleanupDescriptor(&cp->PipeReadEnds[i]);
+ --cp->PipesLeft;
+ }
+ }
+}
+
+static int kwsysProcessSetNonBlocking(int fd)
+{
+ int flags = fcntl(fd, F_GETFL);
+ if (flags >= 0) {
+ flags = fcntl(fd, F_SETFL, flags | O_NONBLOCK);
+ }
+ return flags >= 0;
+}
+
+#if defined(__VMS)
+int decc$set_child_standard_streams(int fd1, int fd2, int fd3);
+#endif
+
+static int kwsysProcessCreate(kwsysProcess* cp, int prIndex,
+ kwsysProcessCreateInformation* si)
+{
+ sigset_t mask, old_mask;
+ int pgidPipe[2];
+ char tmp;
+ ssize_t readRes;
+
+ /* Create the error reporting pipe. */
+ if (pipe(si->ErrorPipe) < 0) {
+ return 0;
+ }
+
+ /* Create a pipe for detecting that the child process has created a process
+ group and session. */
+ if (pipe(pgidPipe) < 0) {
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]);
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]);
+ return 0;
+ }
+
+ /* Set close-on-exec flag on the pipe's write end. */
+ if (fcntl(si->ErrorPipe[1], F_SETFD, FD_CLOEXEC) < 0 ||
+ fcntl(pgidPipe[1], F_SETFD, FD_CLOEXEC) < 0) {
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]);
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]);
+ kwsysProcessCleanupDescriptor(&pgidPipe[0]);
+ kwsysProcessCleanupDescriptor(&pgidPipe[1]);
+ return 0;
+ }
+
+ /* Block SIGINT / SIGTERM while we start. The purpose is so that our signal
+ handler doesn't get called from the child process after the fork and
+ before the exec, and subsequently start kill()'ing PIDs from ForkPIDs. */
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGINT);
+ sigaddset(&mask, SIGTERM);
+ if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) {
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]);
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]);
+ kwsysProcessCleanupDescriptor(&pgidPipe[0]);
+ kwsysProcessCleanupDescriptor(&pgidPipe[1]);
+ return 0;
+ }
+
+/* Fork off a child process. */
+#if defined(__VMS)
+ /* VMS needs vfork and execvp to be in the same function because
+ they use setjmp/longjmp to run the child startup code in the
+ parent! TODO: OptionDetach. Also
+ TODO: CreateProcessGroup. */
+ cp->ForkPIDs[prIndex] = vfork();
+#else
+ cp->ForkPIDs[prIndex] = kwsysProcessFork(cp, si);
+#endif
+ if (cp->ForkPIDs[prIndex] < 0) {
+ sigprocmask(SIG_SETMASK, &old_mask, 0);
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]);
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]);
+ kwsysProcessCleanupDescriptor(&pgidPipe[0]);
+ kwsysProcessCleanupDescriptor(&pgidPipe[1]);
+ return 0;
+ }
+
+ if (cp->ForkPIDs[prIndex] == 0) {
+#if defined(__VMS)
+ /* Specify standard pipes for child process. */
+ decc$set_child_standard_streams(si->StdIn, si->StdOut, si->StdErr);
+#else
+ /* Close the read end of the error reporting / process group
+ setup pipe. */
+ close(si->ErrorPipe[0]);
+ close(pgidPipe[0]);
+
+ /* Setup the stdin, stdout, and stderr pipes. */
+ if (si->StdIn > 0) {
+ dup2(si->StdIn, 0);
+ } else if (si->StdIn < 0) {
+ close(0);
+ }
+ if (si->StdOut != 1) {
+ dup2(si->StdOut, 1);
+ }
+ if (si->StdErr != 2) {
+ dup2(si->StdErr, 2);
+ }
+
+ /* Clear the close-on-exec flag for stdin, stdout, and stderr.
+ All other pipe handles will be closed when exec succeeds. */
+ fcntl(0, F_SETFD, 0);
+ fcntl(1, F_SETFD, 0);
+ fcntl(2, F_SETFD, 0);
+
+ /* Restore all default signal handlers. */
+ kwsysProcessRestoreDefaultSignalHandlers();
+
+ /* Now that we have restored default signal handling and created the
+ process group, restore mask. */
+ sigprocmask(SIG_SETMASK, &old_mask, 0);
+
+ /* Create new process group. We use setsid instead of setpgid to avoid
+ the child getting hung up on signals like SIGTTOU. (In the real world,
+ this has been observed where "git svn" ends up calling the "resize"
+ program which opens /dev/tty. */
+ if (cp->CreateProcessGroup && setsid() < 0) {
+ kwsysProcessChildErrorExit(si->ErrorPipe[1]);
+ }
+#endif
+
+ /* Execute the real process. If successful, this does not return. */
+ execvp(cp->Commands[prIndex][0], cp->Commands[prIndex]);
+ /* TODO: What does VMS do if the child fails to start? */
+ /* TODO: On VMS, how do we put the process in a new group? */
+
+ /* Failure. Report error to parent and terminate. */
+ kwsysProcessChildErrorExit(si->ErrorPipe[1]);
+ }
+
+#if defined(__VMS)
+ /* Restore the standard pipes of this process. */
+ decc$set_child_standard_streams(0, 1, 2);
+#endif
+
+ /* We are done with the error reporting pipe and process group setup pipe
+ write end. */
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[1]);
+ kwsysProcessCleanupDescriptor(&pgidPipe[1]);
+
+ /* Make sure the child is in the process group before we proceed. This
+ avoids race conditions with calls to the kill function that we make for
+ signalling process groups. */
+ while ((readRes = read(pgidPipe[0], &tmp, 1)) > 0)
+ ;
+ if (readRes < 0) {
+ sigprocmask(SIG_SETMASK, &old_mask, 0);
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]);
+ kwsysProcessCleanupDescriptor(&pgidPipe[0]);
+ return 0;
+ }
+ kwsysProcessCleanupDescriptor(&pgidPipe[0]);
+
+ /* Unmask signals. */
+ if (sigprocmask(SIG_SETMASK, &old_mask, 0) < 0) {
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]);
+ return 0;
+ }
+
+ /* A child has been created. */
+ ++cp->CommandsLeft;
+
+ /* Block until the child's exec call succeeds and closes the error
+ pipe or writes data to the pipe to report an error. */
+ {
+ kwsysProcess_ssize_t total = 0;
+ kwsysProcess_ssize_t n = 1;
+ /* Read the entire error message up to the length of our buffer. */
+ while (total < KWSYSPE_PIPE_BUFFER_SIZE && n > 0) {
+ /* Keep trying to read until the operation is not interrupted. */
+ while (((n = read(si->ErrorPipe[0], cp->ErrorMessage + total,
+ (size_t)(KWSYSPE_PIPE_BUFFER_SIZE - total))) < 0) &&
+ (errno == EINTR))
+ ;
+ if (n > 0) {
+ total += n;
+ }
+ }
+
+ /* We are done with the error reporting pipe read end. */
+ kwsysProcessCleanupDescriptor(&si->ErrorPipe[0]);
+
+ if (total > 0) {
+ /* The child failed to execute the process. */
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static void kwsysProcessDestroy(kwsysProcess* cp)
+{
+ /* A child process has terminated. Reap it if it is one handled by
+ this object. */
+ int i;
+ /* Temporarily disable signals that access ForkPIDs. We don't want them to
+ read a reaped PID, and writes to ForkPIDs are not atomic. */
+ sigset_t mask, old_mask;
+ sigemptyset(&mask);
+ sigaddset(&mask, SIGINT);
+ sigaddset(&mask, SIGTERM);
+ if (sigprocmask(SIG_BLOCK, &mask, &old_mask) < 0) {
+ return;
+ }
+
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ if (cp->ForkPIDs[i]) {
+ int result;
+ while (((result = waitpid(cp->ForkPIDs[i], &cp->CommandExitCodes[i],
+ WNOHANG)) < 0) &&
+ (errno == EINTR))
+ ;
+ if (result > 0) {
+ /* This child has terminated. */
+ cp->ForkPIDs[i] = 0;
+ if (--cp->CommandsLeft == 0) {
+ /* All children have terminated. Close the signal pipe
+ write end so that no more notifications are sent to this
+ object. */
+ kwsysProcessCleanupDescriptor(&cp->SignalPipe);
+
+ /* TODO: Once the children have terminated, switch
+ WaitForData to use a non-blocking read to get the
+ rest of the data from the pipe. This is needed when
+ grandchildren keep the output pipes open. */
+ }
+ } else if (result < 0 && cp->State != kwsysProcess_State_Error) {
+ /* Unexpected error. Report the first time this happens. */
+ strncpy(cp->ErrorMessage, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE);
+ cp->State = kwsysProcess_State_Error;
+ }
+ }
+ }
+
+ /* Re-enable signals. */
+ sigprocmask(SIG_SETMASK, &old_mask, 0);
+}
+
+static int kwsysProcessSetupOutputPipeFile(int* p, const char* name)
+{
+ int fout;
+ if (!name) {
+ return 1;
+ }
+
+ /* Close the existing descriptor. */
+ kwsysProcessCleanupDescriptor(p);
+
+ /* Open a file for the pipe to write. */
+ if ((fout = open(name, O_WRONLY | O_CREAT | O_TRUNC, 0666)) < 0) {
+ return 0;
+ }
+
+ /* Set close-on-exec flag on the pipe's end. */
+ if (fcntl(fout, F_SETFD, FD_CLOEXEC) < 0) {
+ close(fout);
+ return 0;
+ }
+
+ /* Assign the replacement descriptor. */
+ *p = fout;
+ return 1;
+}
+
+static int kwsysProcessSetupOutputPipeNative(int* p, int des[2])
+{
+ /* Close the existing descriptor. */
+ kwsysProcessCleanupDescriptor(p);
+
+ /* Set close-on-exec flag on the pipe's ends. The proper end will
+ be dup2-ed into the standard descriptor number after fork but
+ before exec. */
+ if ((fcntl(des[0], F_SETFD, FD_CLOEXEC) < 0) ||
+ (fcntl(des[1], F_SETFD, FD_CLOEXEC) < 0)) {
+ return 0;
+ }
+
+ /* Assign the replacement descriptor. */
+ *p = des[1];
+ return 1;
+}
+
+/* Get the time at which either the process or user timeout will
+ expire. Returns 1 if the user timeout is first, and 0 otherwise. */
+static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout,
+ kwsysProcessTime* timeoutTime)
+{
+ /* The first time this is called, we need to calculate the time at
+ which the child will timeout. */
+ if (cp->Timeout > 0 && cp->TimeoutTime.tv_sec < 0) {
+ kwsysProcessTime length = kwsysProcessTimeFromDouble(cp->Timeout);
+ cp->TimeoutTime = kwsysProcessTimeAdd(cp->StartTime, length);
+ }
+
+ /* Start with process timeout. */
+ *timeoutTime = cp->TimeoutTime;
+
+ /* Check if the user timeout is earlier. */
+ if (userTimeout) {
+ kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent();
+ kwsysProcessTime userTimeoutLength =
+ kwsysProcessTimeFromDouble(*userTimeout);
+ kwsysProcessTime userTimeoutTime =
+ kwsysProcessTimeAdd(currentTime, userTimeoutLength);
+ if (timeoutTime->tv_sec < 0 ||
+ kwsysProcessTimeLess(userTimeoutTime, *timeoutTime)) {
+ *timeoutTime = userTimeoutTime;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Get the length of time before the given timeout time arrives.
+ Returns 1 if the time has already arrived, and 0 otherwise. */
+static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime,
+ double* userTimeout,
+ kwsysProcessTimeNative* timeoutLength,
+ int zeroIsExpired)
+{
+ if (timeoutTime->tv_sec < 0) {
+ /* No timeout time has been requested. */
+ return 0;
+ } else {
+ /* Calculate the remaining time. */
+ kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent();
+ kwsysProcessTime timeLeft =
+ kwsysProcessTimeSubtract(*timeoutTime, currentTime);
+ if (timeLeft.tv_sec < 0 && userTimeout && *userTimeout <= 0) {
+ /* Caller has explicitly requested a zero timeout. */
+ timeLeft.tv_sec = 0;
+ timeLeft.tv_usec = 0;
+ }
+
+ if (timeLeft.tv_sec < 0 ||
+ (timeLeft.tv_sec == 0 && timeLeft.tv_usec == 0 && zeroIsExpired)) {
+ /* Timeout has already expired. */
+ return 1;
+ } else {
+ /* There is some time left. */
+ timeoutLength->tv_sec = timeLeft.tv_sec;
+ timeoutLength->tv_usec = timeLeft.tv_usec;
+ return 0;
+ }
+ }
+}
+
+static kwsysProcessTime kwsysProcessTimeGetCurrent(void)
+{
+ kwsysProcessTime current;
+ kwsysProcessTimeNative current_native;
+#if KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC
+ struct timespec current_timespec;
+ clock_gettime(CLOCK_MONOTONIC, &current_timespec);
+
+ current_native.tv_sec = current_timespec.tv_sec;
+ current_native.tv_usec = current_timespec.tv_nsec / 1000;
+#else
+ gettimeofday(&current_native, 0);
+#endif
+ current.tv_sec = (long)current_native.tv_sec;
+ current.tv_usec = (long)current_native.tv_usec;
+ return current;
+}
+
+static double kwsysProcessTimeToDouble(kwsysProcessTime t)
+{
+ return (double)t.tv_sec + (double)(t.tv_usec) * 0.000001;
+}
+
+static kwsysProcessTime kwsysProcessTimeFromDouble(double d)
+{
+ kwsysProcessTime t;
+ t.tv_sec = (long)d;
+ t.tv_usec = (long)((d - (double)(t.tv_sec)) * 1000000);
+ return t;
+}
+
+static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2)
+{
+ return ((in1.tv_sec < in2.tv_sec) ||
+ ((in1.tv_sec == in2.tv_sec) && (in1.tv_usec < in2.tv_usec)));
+}
+
+static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1,
+ kwsysProcessTime in2)
+{
+ kwsysProcessTime out;
+ out.tv_sec = in1.tv_sec + in2.tv_sec;
+ out.tv_usec = in1.tv_usec + in2.tv_usec;
+ if (out.tv_usec >= 1000000) {
+ out.tv_usec -= 1000000;
+ out.tv_sec += 1;
+ }
+ return out;
+}
+
+static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1,
+ kwsysProcessTime in2)
+{
+ kwsysProcessTime out;
+ out.tv_sec = in1.tv_sec - in2.tv_sec;
+ out.tv_usec = in1.tv_usec - in2.tv_usec;
+ if (out.tv_usec < 0) {
+ out.tv_usec += 1000000;
+ out.tv_sec -= 1;
+ }
+ return out;
+}
+
+#define KWSYSPE_CASE(type, str) \
+ cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_##type; \
+ strcpy(cp->ProcessResults[idx].ExitExceptionString, str)
+static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int sig,
+ int idx)
+{
+ switch (sig) {
+#ifdef SIGSEGV
+ case SIGSEGV:
+ KWSYSPE_CASE(Fault, "Segmentation fault");
+ break;
+#endif
+#ifdef SIGBUS
+# if !defined(SIGSEGV) || SIGBUS != SIGSEGV
+ case SIGBUS:
+ KWSYSPE_CASE(Fault, "Bus error");
+ break;
+# endif
+#endif
+#ifdef SIGFPE
+ case SIGFPE:
+ KWSYSPE_CASE(Numerical, "Floating-point exception");
+ break;
+#endif
+#ifdef SIGILL
+ case SIGILL:
+ KWSYSPE_CASE(Illegal, "Illegal instruction");
+ break;
+#endif
+#ifdef SIGINT
+ case SIGINT:
+ KWSYSPE_CASE(Interrupt, "User interrupt");
+ break;
+#endif
+#ifdef SIGABRT
+ case SIGABRT:
+ KWSYSPE_CASE(Other, "Child aborted");
+ break;
+#endif
+#ifdef SIGKILL
+ case SIGKILL:
+ KWSYSPE_CASE(Other, "Child killed");
+ break;
+#endif
+#ifdef SIGTERM
+ case SIGTERM:
+ KWSYSPE_CASE(Other, "Child terminated");
+ break;
+#endif
+#ifdef SIGHUP
+ case SIGHUP:
+ KWSYSPE_CASE(Other, "SIGHUP");
+ break;
+#endif
+#ifdef SIGQUIT
+ case SIGQUIT:
+ KWSYSPE_CASE(Other, "SIGQUIT");
+ break;
+#endif
+#ifdef SIGTRAP
+ case SIGTRAP:
+ KWSYSPE_CASE(Other, "SIGTRAP");
+ break;
+#endif
+#ifdef SIGIOT
+# if !defined(SIGABRT) || SIGIOT != SIGABRT
+ case SIGIOT:
+ KWSYSPE_CASE(Other, "SIGIOT");
+ break;
+# endif
+#endif
+#ifdef SIGUSR1
+ case SIGUSR1:
+ KWSYSPE_CASE(Other, "SIGUSR1");
+ break;
+#endif
+#ifdef SIGUSR2
+ case SIGUSR2:
+ KWSYSPE_CASE(Other, "SIGUSR2");
+ break;
+#endif
+#ifdef SIGPIPE
+ case SIGPIPE:
+ KWSYSPE_CASE(Other, "SIGPIPE");
+ break;
+#endif
+#ifdef SIGALRM
+ case SIGALRM:
+ KWSYSPE_CASE(Other, "SIGALRM");
+ break;
+#endif
+#ifdef SIGSTKFLT
+ case SIGSTKFLT:
+ KWSYSPE_CASE(Other, "SIGSTKFLT");
+ break;
+#endif
+#ifdef SIGCHLD
+ case SIGCHLD:
+ KWSYSPE_CASE(Other, "SIGCHLD");
+ break;
+#elif defined(SIGCLD)
+ case SIGCLD:
+ KWSYSPE_CASE(Other, "SIGCLD");
+ break;
+#endif
+#ifdef SIGCONT
+ case SIGCONT:
+ KWSYSPE_CASE(Other, "SIGCONT");
+ break;
+#endif
+#ifdef SIGSTOP
+ case SIGSTOP:
+ KWSYSPE_CASE(Other, "SIGSTOP");
+ break;
+#endif
+#ifdef SIGTSTP
+ case SIGTSTP:
+ KWSYSPE_CASE(Other, "SIGTSTP");
+ break;
+#endif
+#ifdef SIGTTIN
+ case SIGTTIN:
+ KWSYSPE_CASE(Other, "SIGTTIN");
+ break;
+#endif
+#ifdef SIGTTOU
+ case SIGTTOU:
+ KWSYSPE_CASE(Other, "SIGTTOU");
+ break;
+#endif
+#ifdef SIGURG
+ case SIGURG:
+ KWSYSPE_CASE(Other, "SIGURG");
+ break;
+#endif
+#ifdef SIGXCPU
+ case SIGXCPU:
+ KWSYSPE_CASE(Other, "SIGXCPU");
+ break;
+#endif
+#ifdef SIGXFSZ
+ case SIGXFSZ:
+ KWSYSPE_CASE(Other, "SIGXFSZ");
+ break;
+#endif
+#ifdef SIGVTALRM
+ case SIGVTALRM:
+ KWSYSPE_CASE(Other, "SIGVTALRM");
+ break;
+#endif
+#ifdef SIGPROF
+ case SIGPROF:
+ KWSYSPE_CASE(Other, "SIGPROF");
+ break;
+#endif
+#ifdef SIGWINCH
+ case SIGWINCH:
+ KWSYSPE_CASE(Other, "SIGWINCH");
+ break;
+#endif
+#ifdef SIGPOLL
+ case SIGPOLL:
+ KWSYSPE_CASE(Other, "SIGPOLL");
+ break;
+#endif
+#ifdef SIGIO
+# if !defined(SIGPOLL) || SIGIO != SIGPOLL
+ case SIGIO:
+ KWSYSPE_CASE(Other, "SIGIO");
+ break;
+# endif
+#endif
+#ifdef SIGPWR
+ case SIGPWR:
+ KWSYSPE_CASE(Other, "SIGPWR");
+ break;
+#endif
+#ifdef SIGSYS
+ case SIGSYS:
+ KWSYSPE_CASE(Other, "SIGSYS");
+ break;
+#endif
+#ifdef SIGUNUSED
+# if !defined(SIGSYS) || SIGUNUSED != SIGSYS
+ case SIGUNUSED:
+ KWSYSPE_CASE(Other, "SIGUNUSED");
+ break;
+# endif
+#endif
+ default:
+ cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_Other;
+ sprintf(cp->ProcessResults[idx].ExitExceptionString, "Signal %d", sig);
+ break;
+ }
+}
+#undef KWSYSPE_CASE
+
+/* When the child process encounters an error before its program is
+ invoked, this is called to report the error to the parent and
+ exit. */
+static void kwsysProcessChildErrorExit(int errorPipe)
+{
+ /* Construct the error message. */
+ char buffer[KWSYSPE_PIPE_BUFFER_SIZE];
+ kwsysProcess_ssize_t result;
+ strncpy(buffer, strerror(errno), KWSYSPE_PIPE_BUFFER_SIZE);
+ buffer[KWSYSPE_PIPE_BUFFER_SIZE - 1] = '\0';
+
+ /* Report the error to the parent through the special pipe. */
+ result = write(errorPipe, buffer, strlen(buffer));
+ (void)result;
+
+ /* Terminate without cleanup. */
+ _exit(1);
+}
+
+/* Restores all signal handlers to their default values. */
+static void kwsysProcessRestoreDefaultSignalHandlers(void)
+{
+ struct sigaction act;
+ memset(&act, 0, sizeof(struct sigaction));
+ act.sa_handler = SIG_DFL;
+#ifdef SIGHUP
+ sigaction(SIGHUP, &act, 0);
+#endif
+#ifdef SIGINT
+ sigaction(SIGINT, &act, 0);
+#endif
+#ifdef SIGQUIT
+ sigaction(SIGQUIT, &act, 0);
+#endif
+#ifdef SIGILL
+ sigaction(SIGILL, &act, 0);
+#endif
+#ifdef SIGTRAP
+ sigaction(SIGTRAP, &act, 0);
+#endif
+#ifdef SIGABRT
+ sigaction(SIGABRT, &act, 0);
+#endif
+#ifdef SIGIOT
+ sigaction(SIGIOT, &act, 0);
+#endif
+#ifdef SIGBUS
+ sigaction(SIGBUS, &act, 0);
+#endif
+#ifdef SIGFPE
+ sigaction(SIGFPE, &act, 0);
+#endif
+#ifdef SIGUSR1
+ sigaction(SIGUSR1, &act, 0);
+#endif
+#ifdef SIGSEGV
+ sigaction(SIGSEGV, &act, 0);
+#endif
+#ifdef SIGUSR2
+ sigaction(SIGUSR2, &act, 0);
+#endif
+#ifdef SIGPIPE
+ sigaction(SIGPIPE, &act, 0);
+#endif
+#ifdef SIGALRM
+ sigaction(SIGALRM, &act, 0);
+#endif
+#ifdef SIGTERM
+ sigaction(SIGTERM, &act, 0);
+#endif
+#ifdef SIGSTKFLT
+ sigaction(SIGSTKFLT, &act, 0);
+#endif
+#ifdef SIGCLD
+ sigaction(SIGCLD, &act, 0);
+#endif
+#ifdef SIGCHLD
+ sigaction(SIGCHLD, &act, 0);
+#endif
+#ifdef SIGCONT
+ sigaction(SIGCONT, &act, 0);
+#endif
+#ifdef SIGTSTP
+ sigaction(SIGTSTP, &act, 0);
+#endif
+#ifdef SIGTTIN
+ sigaction(SIGTTIN, &act, 0);
+#endif
+#ifdef SIGTTOU
+ sigaction(SIGTTOU, &act, 0);
+#endif
+#ifdef SIGURG
+ sigaction(SIGURG, &act, 0);
+#endif
+#ifdef SIGXCPU
+ sigaction(SIGXCPU, &act, 0);
+#endif
+#ifdef SIGXFSZ
+ sigaction(SIGXFSZ, &act, 0);
+#endif
+#ifdef SIGVTALRM
+ sigaction(SIGVTALRM, &act, 0);
+#endif
+#ifdef SIGPROF
+ sigaction(SIGPROF, &act, 0);
+#endif
+#ifdef SIGWINCH
+ sigaction(SIGWINCH, &act, 0);
+#endif
+#ifdef SIGPOLL
+ sigaction(SIGPOLL, &act, 0);
+#endif
+#ifdef SIGIO
+ sigaction(SIGIO, &act, 0);
+#endif
+#ifdef SIGPWR
+ sigaction(SIGPWR, &act, 0);
+#endif
+#ifdef SIGSYS
+ sigaction(SIGSYS, &act, 0);
+#endif
+#ifdef SIGUNUSED
+ sigaction(SIGUNUSED, &act, 0);
+#endif
+}
+
+static void kwsysProcessExit(void)
+{
+ _exit(0);
+}
+
+#if !defined(__VMS)
+static pid_t kwsysProcessFork(kwsysProcess* cp,
+ kwsysProcessCreateInformation* si)
+{
+ /* Create a detached process if requested. */
+ if (cp->OptionDetach) {
+ /* Create an intermediate process. */
+ pid_t middle_pid = fork();
+ if (middle_pid < 0) {
+ /* Fork failed. Return as if we were not detaching. */
+ return middle_pid;
+ } else if (middle_pid == 0) {
+ /* This is the intermediate process. Create the real child. */
+ pid_t child_pid = fork();
+ if (child_pid == 0) {
+ /* This is the real child process. There is nothing to do here. */
+ return 0;
+ } else {
+ /* Use the error pipe to report the pid to the real parent. */
+ while ((write(si->ErrorPipe[1], &child_pid, sizeof(child_pid)) < 0) &&
+ (errno == EINTR))
+ ;
+
+ /* Exit without cleanup. The parent holds all resources. */
+ kwsysProcessExit();
+ return 0; /* Never reached, but avoids SunCC warning. */
+ }
+ } else {
+ /* This is the original parent process. The intermediate
+ process will use the error pipe to report the pid of the
+ detached child. */
+ pid_t child_pid;
+ int status;
+ while ((read(si->ErrorPipe[0], &child_pid, sizeof(child_pid)) < 0) &&
+ (errno == EINTR))
+ ;
+
+ /* Wait for the intermediate process to exit and clean it up. */
+ while ((waitpid(middle_pid, &status, 0) < 0) && (errno == EINTR))
+ ;
+ return child_pid;
+ }
+ } else {
+ /* Not creating a detached process. Use normal fork. */
+ return fork();
+ }
+}
+#endif
+
+/* We try to obtain process information by invoking the ps command.
+ Here we define the command to call on each platform and the
+ corresponding parsing format string. The parsing format should
+ have two integers to store: the pid and then the ppid. */
+#if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(__OpenBSD__) || defined(__GLIBC__) || defined(__GNU__)
+# define KWSYSPE_PS_COMMAND "ps axo pid,ppid"
+# define KWSYSPE_PS_FORMAT "%d %d\n"
+#elif defined(__sun) && (defined(__SVR4) || defined(__svr4__)) /* Solaris */
+# define KWSYSPE_PS_COMMAND "ps -e -o pid,ppid"
+# define KWSYSPE_PS_FORMAT "%d %d\n"
+#elif defined(__hpux) || defined(__sun__) || defined(__sgi) || \
+ defined(_AIX) || defined(__sparc)
+# define KWSYSPE_PS_COMMAND "ps -ef"
+# define KWSYSPE_PS_FORMAT "%*s %d %d %*[^\n]\n"
+#elif defined(__QNX__)
+# define KWSYSPE_PS_COMMAND "ps -Af"
+# define KWSYSPE_PS_FORMAT "%*d %d %d %*[^\n]\n"
+#elif defined(__CYGWIN__)
+# define KWSYSPE_PS_COMMAND "ps aux"
+# define KWSYSPE_PS_FORMAT "%d %d %*[^\n]\n"
+#endif
+
+void kwsysProcess_KillPID(unsigned long process_id)
+{
+ kwsysProcessKill((pid_t)process_id);
+}
+
+static void kwsysProcessKill(pid_t process_id)
+{
+#if defined(__linux__) || defined(__CYGWIN__)
+ DIR* procdir;
+#endif
+
+ /* Suspend the process to be sure it will not create more children. */
+ kill(process_id, SIGSTOP);
+
+#if defined(__CYGWIN__)
+ /* Some Cygwin versions seem to need help here. Give up our time slice
+ so that the child can process SIGSTOP before we send SIGKILL. */
+ usleep(1);
+#endif
+
+/* Kill all children if we can find them. */
+#if defined(__linux__) || defined(__CYGWIN__)
+ /* First try using the /proc filesystem. */
+ if ((procdir = opendir("/proc")) != NULL) {
+# if defined(MAXPATHLEN)
+ char fname[MAXPATHLEN];
+# elif defined(PATH_MAX)
+ char fname[PATH_MAX];
+# else
+ char fname[4096];
+# endif
+ char buffer[KWSYSPE_PIPE_BUFFER_SIZE + 1];
+ struct dirent* d;
+
+ /* Each process has a directory in /proc whose name is the pid.
+ Within this directory is a file called stat that has the
+ following format:
+
+ pid (command line) status ppid ...
+
+ We want to get the ppid for all processes. Those that have
+ process_id as their parent should be recursively killed. */
+ for (d = readdir(procdir); d; d = readdir(procdir)) {
+ int pid;
+ if (sscanf(d->d_name, "%d", &pid) == 1 && pid != 0) {
+ struct stat finfo;
+ sprintf(fname, "/proc/%d/stat", pid);
+ if (stat(fname, &finfo) == 0) {
+ FILE* f = fopen(fname, "r");
+ if (f) {
+ size_t nread = fread(buffer, 1, KWSYSPE_PIPE_BUFFER_SIZE, f);
+ fclose(f);
+ buffer[nread] = '\0';
+ if (nread > 0) {
+ const char* rparen = strrchr(buffer, ')');
+ int ppid;
+ if (rparen && (sscanf(rparen + 1, "%*s %d", &ppid) == 1)) {
+ if (ppid == process_id) {
+ /* Recursively kill this child and its children. */
+ kwsysProcessKill(pid);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ closedir(procdir);
+ } else
+#endif
+ {
+#if defined(KWSYSPE_PS_COMMAND)
+ /* Try running "ps" to get the process information. */
+ FILE* ps = popen(KWSYSPE_PS_COMMAND, "r");
+
+ /* Make sure the process started and provided a valid header. */
+ if (ps && fscanf(ps, "%*[^\n]\n") != EOF) {
+ /* Look for processes whose parent is the process being killed. */
+ int pid, ppid;
+ while (fscanf(ps, KWSYSPE_PS_FORMAT, &pid, &ppid) == 2) {
+ if (ppid == process_id) {
+ /* Recursively kill this child and its children. */
+ kwsysProcessKill(pid);
+ }
+ }
+ }
+
+ /* We are done with the ps process. */
+ if (ps) {
+ pclose(ps);
+ }
+#endif
+ }
+
+ /* Kill the process. */
+ kill(process_id, SIGKILL);
+
+#if defined(__APPLE__)
+ /* On OS X 10.3 the above SIGSTOP occasionally prevents the SIGKILL
+ from working. Just in case, we resume the child and kill it
+ again. There is a small race condition in this obscure case. If
+ the child manages to fork again between these two signals, we
+ will not catch its children. */
+ kill(process_id, SIGCONT);
+ kill(process_id, SIGKILL);
+#endif
+}
+
+#if defined(__VMS)
+int decc$feature_get_index(const char* name);
+int decc$feature_set_value(int index, int mode, int value);
+static int kwsysProcessSetVMSFeature(const char* name, int value)
+{
+ int i;
+ errno = 0;
+ i = decc$feature_get_index(name);
+ return i >= 0 && (decc$feature_set_value(i, 1, value) >= 0 || errno == 0);
+}
+#endif
+
+/* Global set of executing processes for use by the signal handler.
+ This global instance will be zero-initialized by the compiler. */
+typedef struct kwsysProcessInstances_s
+{
+ int Count;
+ int Size;
+ kwsysProcess** Processes;
+} kwsysProcessInstances;
+static kwsysProcessInstances kwsysProcesses;
+
+/* The old SIGCHLD / SIGINT / SIGTERM handlers. */
+static struct sigaction kwsysProcessesOldSigChldAction;
+static struct sigaction kwsysProcessesOldSigIntAction;
+static struct sigaction kwsysProcessesOldSigTermAction;
+
+static void kwsysProcessesUpdate(kwsysProcessInstances* newProcesses)
+{
+ /* Block signals while we update the set of pipes to check.
+ TODO: sigprocmask is undefined for threaded apps. See
+ pthread_sigmask. */
+ sigset_t newset;
+ sigset_t oldset;
+ sigemptyset(&newset);
+ sigaddset(&newset, SIGCHLD);
+ sigaddset(&newset, SIGINT);
+ sigaddset(&newset, SIGTERM);
+ sigprocmask(SIG_BLOCK, &newset, &oldset);
+
+ /* Store the new set in that seen by the signal handler. */
+ kwsysProcesses = *newProcesses;
+
+ /* Restore the signal mask to the previous setting. */
+ sigprocmask(SIG_SETMASK, &oldset, 0);
+}
+
+static int kwsysProcessesAdd(kwsysProcess* cp)
+{
+ /* Create a pipe through which the signal handler can notify the
+ given process object that a child has exited. */
+ {
+ /* Create the pipe. */
+ int p[2];
+ if (pipe(p KWSYSPE_VMS_NONBLOCK) < 0) {
+ return 0;
+ }
+
+ /* Store the pipes now to be sure they are cleaned up later. */
+ cp->PipeReadEnds[KWSYSPE_PIPE_SIGNAL] = p[0];
+ cp->SignalPipe = p[1];
+
+ /* Switch the pipe to non-blocking mode so that reading a byte can
+ be an atomic test-and-set. */
+ if (!kwsysProcessSetNonBlocking(p[0]) ||
+ !kwsysProcessSetNonBlocking(p[1])) {
+ return 0;
+ }
+
+ /* The children do not need this pipe. Set close-on-exec flag on
+ the pipe's ends. */
+ if ((fcntl(p[0], F_SETFD, FD_CLOEXEC) < 0) ||
+ (fcntl(p[1], F_SETFD, FD_CLOEXEC) < 0)) {
+ return 0;
+ }
+ }
+
+ /* Attempt to add the given signal pipe to the signal handler set. */
+ {
+
+ /* Make sure there is enough space for the new signal pipe. */
+ kwsysProcessInstances oldProcesses = kwsysProcesses;
+ kwsysProcessInstances newProcesses = oldProcesses;
+ if (oldProcesses.Count == oldProcesses.Size) {
+ /* Start with enough space for a small number of process instances
+ and double the size each time more is needed. */
+ newProcesses.Size = oldProcesses.Size ? oldProcesses.Size * 2 : 4;
+
+ /* Try allocating the new block of memory. */
+ if ((newProcesses.Processes = ((kwsysProcess**)malloc(
+ (size_t)(newProcesses.Size) * sizeof(kwsysProcess*))))) {
+ /* Copy the old pipe set to the new memory. */
+ if (oldProcesses.Count > 0) {
+ memcpy(newProcesses.Processes, oldProcesses.Processes,
+ ((size_t)(oldProcesses.Count) * sizeof(kwsysProcess*)));
+ }
+ } else {
+ /* Failed to allocate memory for the new signal pipe set. */
+ return 0;
+ }
+ }
+
+ /* Append the new signal pipe to the set. */
+ newProcesses.Processes[newProcesses.Count++] = cp;
+
+ /* Store the new set in that seen by the signal handler. */
+ kwsysProcessesUpdate(&newProcesses);
+
+ /* Free the original pipes if new ones were allocated. */
+ if (newProcesses.Processes != oldProcesses.Processes) {
+ free(oldProcesses.Processes);
+ }
+
+ /* If this is the first process, enable the signal handler. */
+ if (newProcesses.Count == 1) {
+ /* Install our handler for SIGCHLD. Repeat call until it is not
+ interrupted. */
+ struct sigaction newSigAction;
+ memset(&newSigAction, 0, sizeof(struct sigaction));
+#if KWSYSPE_USE_SIGINFO
+ newSigAction.sa_sigaction = kwsysProcessesSignalHandler;
+ newSigAction.sa_flags = SA_NOCLDSTOP | SA_SIGINFO;
+# ifdef SA_RESTART
+ newSigAction.sa_flags |= SA_RESTART;
+# endif
+#else
+ newSigAction.sa_handler = kwsysProcessesSignalHandler;
+ newSigAction.sa_flags = SA_NOCLDSTOP;
+#endif
+ sigemptyset(&newSigAction.sa_mask);
+ while ((sigaction(SIGCHLD, &newSigAction,
+ &kwsysProcessesOldSigChldAction) < 0) &&
+ (errno == EINTR))
+ ;
+
+ /* Install our handler for SIGINT / SIGTERM. Repeat call until
+ it is not interrupted. */
+ sigemptyset(&newSigAction.sa_mask);
+ sigaddset(&newSigAction.sa_mask, SIGTERM);
+ while ((sigaction(SIGINT, &newSigAction,
+ &kwsysProcessesOldSigIntAction) < 0) &&
+ (errno == EINTR))
+ ;
+
+ sigemptyset(&newSigAction.sa_mask);
+ sigaddset(&newSigAction.sa_mask, SIGINT);
+ while ((sigaction(SIGTERM, &newSigAction,
+ &kwsysProcessesOldSigIntAction) < 0) &&
+ (errno == EINTR))
+ ;
+ }
+ }
+
+ return 1;
+}
+
+static void kwsysProcessesRemove(kwsysProcess* cp)
+{
+ /* Attempt to remove the given signal pipe from the signal handler set. */
+ {
+ /* Find the given process in the set. */
+ kwsysProcessInstances newProcesses = kwsysProcesses;
+ int i;
+ for (i = 0; i < newProcesses.Count; ++i) {
+ if (newProcesses.Processes[i] == cp) {
+ break;
+ }
+ }
+ if (i < newProcesses.Count) {
+ /* Remove the process from the set. */
+ --newProcesses.Count;
+ for (; i < newProcesses.Count; ++i) {
+ newProcesses.Processes[i] = newProcesses.Processes[i + 1];
+ }
+
+ /* If this was the last process, disable the signal handler. */
+ if (newProcesses.Count == 0) {
+ /* Restore the signal handlers. Repeat call until it is not
+ interrupted. */
+ while ((sigaction(SIGCHLD, &kwsysProcessesOldSigChldAction, 0) < 0) &&
+ (errno == EINTR))
+ ;
+ while ((sigaction(SIGINT, &kwsysProcessesOldSigIntAction, 0) < 0) &&
+ (errno == EINTR))
+ ;
+ while ((sigaction(SIGTERM, &kwsysProcessesOldSigTermAction, 0) < 0) &&
+ (errno == EINTR))
+ ;
+
+ /* Free the table of process pointers since it is now empty.
+ This is safe because the signal handler has been removed. */
+ newProcesses.Size = 0;
+ free(newProcesses.Processes);
+ newProcesses.Processes = 0;
+ }
+
+ /* Store the new set in that seen by the signal handler. */
+ kwsysProcessesUpdate(&newProcesses);
+ }
+ }
+
+ /* Close the pipe through which the signal handler may have notified
+ the given process object that a child has exited. */
+ kwsysProcessCleanupDescriptor(&cp->SignalPipe);
+}
+
+static void kwsysProcessesSignalHandler(int signum
+#if KWSYSPE_USE_SIGINFO
+ ,
+ siginfo_t* info, void* ucontext
+#endif
+)
+{
+ int i, j, procStatus, old_errno = errno;
+#if KWSYSPE_USE_SIGINFO
+ (void)info;
+ (void)ucontext;
+#endif
+
+ /* Signal all process objects that a child has terminated. */
+ switch (signum) {
+ case SIGCHLD:
+ for (i = 0; i < kwsysProcesses.Count; ++i) {
+ /* Set the pipe in a signalled state. */
+ char buf = 1;
+ kwsysProcess* cp = kwsysProcesses.Processes[i];
+ kwsysProcess_ssize_t pipeStatus =
+ read(cp->PipeReadEnds[KWSYSPE_PIPE_SIGNAL], &buf, 1);
+ (void)pipeStatus;
+ pipeStatus = write(cp->SignalPipe, &buf, 1);
+ (void)pipeStatus;
+ }
+ break;
+ case SIGINT:
+ case SIGTERM:
+ /* Signal child processes that are running in new process groups. */
+ for (i = 0; i < kwsysProcesses.Count; ++i) {
+ kwsysProcess* cp = kwsysProcesses.Processes[i];
+ /* Check Killed to avoid data race condition when killing.
+ Check State to avoid data race condition in kwsysProcessCleanup
+ when there is an error (it leaves a reaped PID). */
+ if (cp->CreateProcessGroup && !cp->Killed &&
+ cp->State != kwsysProcess_State_Error && cp->ForkPIDs) {
+ for (j = 0; j < cp->NumberOfCommands; ++j) {
+ /* Make sure the PID is still valid. */
+ if (cp->ForkPIDs[j]) {
+ /* The user created a process group for this process. The group
+ ID
+ is the process ID for the original process in the group. */
+ kill(-cp->ForkPIDs[j], SIGINT);
+ }
+ }
+ }
+ }
+
+ /* Wait for all processes to terminate. */
+ while (wait(&procStatus) >= 0 || errno != ECHILD) {
+ }
+
+ /* Terminate the process, which is now in an inconsistent state
+ because we reaped all the PIDs that it may have been reaping
+ or may have reaped in the future. Reraise the signal so that
+ the proper exit code is returned. */
+ {
+ /* Install default signal handler. */
+ struct sigaction defSigAction;
+ sigset_t unblockSet;
+ memset(&defSigAction, 0, sizeof(defSigAction));
+ defSigAction.sa_handler = SIG_DFL;
+ sigemptyset(&defSigAction.sa_mask);
+ while ((sigaction(signum, &defSigAction, 0) < 0) && (errno == EINTR))
+ ;
+ /* Unmask the signal. */
+ sigemptyset(&unblockSet);
+ sigaddset(&unblockSet, signum);
+ sigprocmask(SIG_UNBLOCK, &unblockSet, 0);
+ /* Raise the signal again. */
+ raise(signum);
+ /* We shouldn't get here... but if we do... */
+ _exit(1);
+ }
+ /* break omitted to silence unreachable code clang compiler warning. */
+ }
+
+#if !KWSYSPE_USE_SIGINFO
+ /* Re-Install our handler. Repeat call until it is not interrupted. */
+ {
+ struct sigaction newSigAction;
+ struct sigaction& oldSigAction;
+ memset(&newSigAction, 0, sizeof(struct sigaction));
+ newSigChldAction.sa_handler = kwsysProcessesSignalHandler;
+ newSigChldAction.sa_flags = SA_NOCLDSTOP;
+ sigemptyset(&newSigAction.sa_mask);
+ switch (signum) {
+ case SIGCHLD:
+ oldSigAction = &kwsysProcessesOldSigChldAction;
+ break;
+ case SIGINT:
+ sigaddset(&newSigAction.sa_mask, SIGTERM);
+ oldSigAction = &kwsysProcessesOldSigIntAction;
+ break;
+ case SIGTERM:
+ sigaddset(&newSigAction.sa_mask, SIGINT);
+ oldSigAction = &kwsysProcessesOldSigTermAction;
+ break;
+ default:
+ return 0;
+ }
+ while ((sigaction(signum, &newSigAction, oldSigAction) < 0) &&
+ (errno == EINTR))
+ ;
+ }
+#endif
+
+ errno = old_errno;
+}
+
+void kwsysProcess_ResetStartTime(kwsysProcess* cp)
+{
+ if (!cp) {
+ return;
+ }
+ /* Reset start time. */
+ cp->StartTime = kwsysProcessTimeGetCurrent();
+}
diff --git a/test/API/driver/kwsys/ProcessWin32.c b/test/API/driver/kwsys/ProcessWin32.c
new file mode 100644
index 0000000..a963862
--- /dev/null
+++ b/test/API/driver/kwsys/ProcessWin32.c
@@ -0,0 +1,2786 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Process.h)
+#include KWSYS_HEADER(Encoding.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "Encoding.h.in"
+# include "Process.h.in"
+#endif
+
+/*
+
+Implementation for Windows
+
+On windows, a thread is created to wait for data on each pipe. The
+threads are synchronized with the main thread to simulate the use of
+a UNIX-style select system call.
+
+*/
+
+#ifdef _MSC_VER
+# pragma warning(push, 1)
+#endif
+#include <windows.h> /* Windows API */
+#if defined(_MSC_VER) && _MSC_VER >= 1800
+# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+#endif
+#include <io.h> /* _unlink */
+#include <stdio.h> /* sprintf */
+#include <string.h> /* strlen, strdup */
+#ifdef __WATCOMC__
+# define _unlink unlink
+#endif
+
+#ifndef _MAX_FNAME
+# define _MAX_FNAME 4096
+#endif
+#ifndef _MAX_PATH
+# define _MAX_PATH 4096
+#endif
+
+#ifdef _MSC_VER
+# pragma warning(pop)
+# pragma warning(disable : 4514)
+# pragma warning(disable : 4706)
+#endif
+
+#if defined(__BORLANDC__)
+# pragma warn - 8004 /* assigned a value that is never used */
+# pragma warn - 8060 /* Assignment inside if() condition. */
+#endif
+
+/* There are pipes for the process pipeline's stdout and stderr. */
+#define KWSYSPE_PIPE_COUNT 2
+#define KWSYSPE_PIPE_STDOUT 0
+#define KWSYSPE_PIPE_STDERR 1
+
+/* The maximum amount to read from a pipe at a time. */
+#define KWSYSPE_PIPE_BUFFER_SIZE 1024
+
+/* Debug output macro. */
+#if 0
+# define KWSYSPE_DEBUG(x) \
+ ((void*)cp == (void*)0x00226DE0 \
+ ? (fprintf(stderr, "%d/%p/%d ", (int)GetCurrentProcessId(), cp, \
+ __LINE__), \
+ fprintf x, fflush(stderr), 1) \
+ : (1))
+#else
+# define KWSYSPE_DEBUG(x) (void)1
+#endif
+
+typedef LARGE_INTEGER kwsysProcessTime;
+
+typedef struct kwsysProcessCreateInformation_s
+{
+ /* Windows child startup control data. */
+ STARTUPINFOW StartupInfo;
+
+ /* Original handles before making inherited duplicates. */
+ HANDLE hStdInput;
+ HANDLE hStdOutput;
+ HANDLE hStdError;
+} kwsysProcessCreateInformation;
+
+typedef struct kwsysProcessPipeData_s kwsysProcessPipeData;
+static DWORD WINAPI kwsysProcessPipeThreadRead(LPVOID ptd);
+static void kwsysProcessPipeThreadReadPipe(kwsysProcess* cp,
+ kwsysProcessPipeData* td);
+static DWORD WINAPI kwsysProcessPipeThreadWake(LPVOID ptd);
+static void kwsysProcessPipeThreadWakePipe(kwsysProcess* cp,
+ kwsysProcessPipeData* td);
+static int kwsysProcessInitialize(kwsysProcess* cp);
+static DWORD kwsysProcessCreate(kwsysProcess* cp, int index,
+ kwsysProcessCreateInformation* si);
+static void kwsysProcessDestroy(kwsysProcess* cp, int event);
+static DWORD kwsysProcessSetupOutputPipeFile(PHANDLE handle, const char* name);
+static void kwsysProcessSetupSharedPipe(DWORD nStdHandle, PHANDLE handle);
+static void kwsysProcessSetupPipeNative(HANDLE native, PHANDLE handle);
+static void kwsysProcessCleanupHandle(PHANDLE h);
+static void kwsysProcessCleanup(kwsysProcess* cp, DWORD error);
+static void kwsysProcessCleanErrorMessage(kwsysProcess* cp);
+static int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout,
+ kwsysProcessTime* timeoutTime);
+static int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime,
+ double* userTimeout,
+ kwsysProcessTime* timeoutLength);
+static kwsysProcessTime kwsysProcessTimeGetCurrent(void);
+static DWORD kwsysProcessTimeToDWORD(kwsysProcessTime t);
+static double kwsysProcessTimeToDouble(kwsysProcessTime t);
+static kwsysProcessTime kwsysProcessTimeFromDouble(double d);
+static int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2);
+static kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1,
+ kwsysProcessTime in2);
+static kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1,
+ kwsysProcessTime in2);
+static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int code,
+ int idx);
+static void kwsysProcessKillTree(int pid);
+static void kwsysProcessDisablePipeThreads(kwsysProcess* cp);
+static int kwsysProcessesInitialize(void);
+static int kwsysTryEnterCreateProcessSection(void);
+static void kwsysLeaveCreateProcessSection(void);
+static int kwsysProcessesAdd(HANDLE hProcess, DWORD dwProcessId,
+ int newProcessGroup);
+static void kwsysProcessesRemove(HANDLE hProcess);
+static BOOL WINAPI kwsysCtrlHandler(DWORD dwCtrlType);
+
+/* A structure containing synchronization data for each thread. */
+typedef struct kwsysProcessPipeSync_s kwsysProcessPipeSync;
+struct kwsysProcessPipeSync_s
+{
+ /* Handle to the thread. */
+ HANDLE Thread;
+
+ /* Semaphore indicating to the thread that a process has started. */
+ HANDLE Ready;
+
+ /* Semaphore indicating to the thread that it should begin work. */
+ HANDLE Go;
+
+ /* Semaphore indicating thread has reset for another process. */
+ HANDLE Reset;
+};
+
+/* A structure containing data for each pipe's threads. */
+struct kwsysProcessPipeData_s
+{
+ /* ------------- Data managed per instance of kwsysProcess ------------- */
+
+ /* Synchronization data for reading thread. */
+ kwsysProcessPipeSync Reader;
+
+ /* Synchronization data for waking thread. */
+ kwsysProcessPipeSync Waker;
+
+ /* Index of this pipe. */
+ int Index;
+
+ /* The kwsysProcess instance owning this pipe. */
+ kwsysProcess* Process;
+
+ /* ------------- Data managed per call to Execute ------------- */
+
+ /* Buffer for data read in this pipe's thread. */
+ char DataBuffer[KWSYSPE_PIPE_BUFFER_SIZE];
+
+ /* The length of the data stored in the buffer. */
+ DWORD DataLength;
+
+ /* Whether the pipe has been closed. */
+ int Closed;
+
+ /* Handle for the read end of this pipe. */
+ HANDLE Read;
+
+ /* Handle for the write end of this pipe. */
+ HANDLE Write;
+};
+
+/* A structure containing results data for each process. */
+typedef struct kwsysProcessResults_s kwsysProcessResults;
+struct kwsysProcessResults_s
+{
+ /* The status of the process. */
+ int State;
+
+ /* The exceptional behavior that terminated the process, if any. */
+ int ExitException;
+
+ /* The process exit code. */
+ DWORD ExitCode;
+
+ /* The process return code, if any. */
+ int ExitValue;
+
+ /* Description for the ExitException. */
+ char ExitExceptionString[KWSYSPE_PIPE_BUFFER_SIZE + 1];
+};
+
+/* Structure containing data used to implement the child's execution. */
+struct kwsysProcess_s
+{
+ /* ------------- Data managed per instance of kwsysProcess ------------- */
+
+ /* The status of the process structure. */
+ int State;
+
+ /* The command lines to execute. */
+ wchar_t** Commands;
+ int NumberOfCommands;
+
+ /* The exit code of each command. */
+ DWORD* CommandExitCodes;
+
+ /* The working directory for the child process. */
+ wchar_t* WorkingDirectory;
+
+ /* Whether to create the child as a detached process. */
+ int OptionDetach;
+
+ /* Whether the child was created as a detached process. */
+ int Detached;
+
+ /* Whether to hide the child process's window. */
+ int HideWindow;
+
+ /* Whether to treat command lines as verbatim. */
+ int Verbatim;
+
+ /* Whether to merge stdout/stderr of the child. */
+ int MergeOutput;
+
+ /* Whether to create the process in a new process group. */
+ int CreateProcessGroup;
+
+ /* Mutex to protect the shared index used by threads to report data. */
+ HANDLE SharedIndexMutex;
+
+ /* Semaphore used by threads to signal data ready. */
+ HANDLE Full;
+
+ /* Whether we are currently deleting this kwsysProcess instance. */
+ int Deleting;
+
+ /* Data specific to each pipe and its thread. */
+ kwsysProcessPipeData Pipe[KWSYSPE_PIPE_COUNT];
+
+ /* Name of files to which stdin and stdout pipes are attached. */
+ char* PipeFileSTDIN;
+ char* PipeFileSTDOUT;
+ char* PipeFileSTDERR;
+
+ /* Whether each pipe is shared with the parent process. */
+ int PipeSharedSTDIN;
+ int PipeSharedSTDOUT;
+ int PipeSharedSTDERR;
+
+ /* Native pipes provided by the user. */
+ HANDLE PipeNativeSTDIN[2];
+ HANDLE PipeNativeSTDOUT[2];
+ HANDLE PipeNativeSTDERR[2];
+
+ /* ------------- Data managed per call to Execute ------------- */
+
+ /* Index of last pipe to report data, if any. */
+ int CurrentIndex;
+
+ /* Index shared by threads to report data. */
+ int SharedIndex;
+
+ /* The timeout length. */
+ double Timeout;
+
+ /* Time at which the child started. */
+ kwsysProcessTime StartTime;
+
+ /* Time at which the child will timeout. Negative for no timeout. */
+ kwsysProcessTime TimeoutTime;
+
+ /* Flag for whether the process was killed. */
+ int Killed;
+
+ /* Flag for whether the timeout expired. */
+ int TimeoutExpired;
+
+ /* Flag for whether the process has terminated. */
+ int Terminated;
+
+ /* The number of pipes still open during execution and while waiting
+ for pipes to close after process termination. */
+ int PipesLeft;
+
+ /* Buffer for error messages. */
+ char ErrorMessage[KWSYSPE_PIPE_BUFFER_SIZE + 1];
+
+ /* process results. */
+ kwsysProcessResults* ProcessResults;
+
+ /* Windows process information data. */
+ PROCESS_INFORMATION* ProcessInformation;
+
+ /* Data and process termination events for which to wait. */
+ PHANDLE ProcessEvents;
+ int ProcessEventsLength;
+
+ /* Real working directory of our own process. */
+ DWORD RealWorkingDirectoryLength;
+ wchar_t* RealWorkingDirectory;
+
+ /* Own handles for the child's ends of the pipes in the parent process.
+ Used temporarily during process creation. */
+ HANDLE PipeChildStd[3];
+};
+
+kwsysProcess* kwsysProcess_New(void)
+{
+ int i;
+
+ /* Process control structure. */
+ kwsysProcess* cp;
+
+ /* Windows version number data. */
+ OSVERSIONINFO osv;
+
+ /* Initialize list of processes before we get any farther. It's especially
+ important that the console Ctrl handler be added BEFORE starting the
+ first process. This prevents the risk of an orphaned process being
+ started by the main thread while the default Ctrl handler is in
+ progress. */
+ if (!kwsysProcessesInitialize()) {
+ return 0;
+ }
+
+ /* Allocate a process control structure. */
+ cp = (kwsysProcess*)malloc(sizeof(kwsysProcess));
+ if (!cp) {
+ /* Could not allocate memory for the control structure. */
+ return 0;
+ }
+ ZeroMemory(cp, sizeof(*cp));
+
+ /* Share stdin with the parent process by default. */
+ cp->PipeSharedSTDIN = 1;
+
+ /* Set initial status. */
+ cp->State = kwsysProcess_State_Starting;
+
+ /* Choose a method of running the child based on version of
+ windows. */
+ ZeroMemory(&osv, sizeof(osv));
+ osv.dwOSVersionInfoSize = sizeof(osv);
+#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# pragma warning(push)
+# ifdef __INTEL_COMPILER
+# pragma warning(disable : 1478)
+# elif defined __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
+# else
+# pragma warning(disable : 4996)
+# endif
+#endif
+ GetVersionEx(&osv);
+#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# ifdef __clang__
+# pragma clang diagnostic pop
+# else
+# pragma warning(pop)
+# endif
+#endif
+ if (osv.dwPlatformId == VER_PLATFORM_WIN32_WINDOWS) {
+ /* Win9x no longer supported. */
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* Initially no thread owns the mutex. Initialize semaphore to 1. */
+ if (!(cp->SharedIndexMutex = CreateSemaphore(0, 1, 1, 0))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* Initially no data are available. Initialize semaphore to 0. */
+ if (!(cp->Full = CreateSemaphore(0, 0, 1, 0))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* Create the thread to read each pipe. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ DWORD dummy = 0;
+
+ /* Assign the thread its index. */
+ cp->Pipe[i].Index = i;
+
+ /* Give the thread a pointer back to the kwsysProcess instance. */
+ cp->Pipe[i].Process = cp;
+
+ /* No process is yet running. Initialize semaphore to 0. */
+ if (!(cp->Pipe[i].Reader.Ready = CreateSemaphore(0, 0, 1, 0))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* The pipe is not yet reset. Initialize semaphore to 0. */
+ if (!(cp->Pipe[i].Reader.Reset = CreateSemaphore(0, 0, 1, 0))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* The thread's buffer is initially empty. Initialize semaphore to 1. */
+ if (!(cp->Pipe[i].Reader.Go = CreateSemaphore(0, 1, 1, 0))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* Create the reading thread. It will block immediately. The
+ thread will not make deeply nested calls, so we need only a
+ small stack. */
+ if (!(cp->Pipe[i].Reader.Thread = CreateThread(
+ 0, 1024, kwsysProcessPipeThreadRead, &cp->Pipe[i], 0, &dummy))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* No process is yet running. Initialize semaphore to 0. */
+ if (!(cp->Pipe[i].Waker.Ready = CreateSemaphore(0, 0, 1, 0))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* The pipe is not yet reset. Initialize semaphore to 0. */
+ if (!(cp->Pipe[i].Waker.Reset = CreateSemaphore(0, 0, 1, 0))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* The waker should not wake immediately. Initialize semaphore to 0. */
+ if (!(cp->Pipe[i].Waker.Go = CreateSemaphore(0, 0, 1, 0))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+
+ /* Create the waking thread. It will block immediately. The
+ thread will not make deeply nested calls, so we need only a
+ small stack. */
+ if (!(cp->Pipe[i].Waker.Thread = CreateThread(
+ 0, 1024, kwsysProcessPipeThreadWake, &cp->Pipe[i], 0, &dummy))) {
+ kwsysProcess_Delete(cp);
+ return 0;
+ }
+ }
+ for (i = 0; i < 3; ++i) {
+ cp->PipeChildStd[i] = INVALID_HANDLE_VALUE;
+ }
+
+ return cp;
+}
+
+void kwsysProcess_Delete(kwsysProcess* cp)
+{
+ int i;
+
+ /* Make sure we have an instance. */
+ if (!cp) {
+ return;
+ }
+
+ /* If the process is executing, wait for it to finish. */
+ if (cp->State == kwsysProcess_State_Executing) {
+ if (cp->Detached) {
+ kwsysProcess_Disown(cp);
+ } else {
+ kwsysProcess_WaitForExit(cp, 0);
+ }
+ }
+
+ /* We are deleting the kwsysProcess instance. */
+ cp->Deleting = 1;
+
+ /* Terminate each of the threads. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ /* Terminate this reading thread. */
+ if (cp->Pipe[i].Reader.Thread) {
+ /* Signal the thread we are ready for it. It will terminate
+ immediately since Deleting is set. */
+ ReleaseSemaphore(cp->Pipe[i].Reader.Ready, 1, 0);
+
+ /* Wait for the thread to exit. */
+ WaitForSingleObject(cp->Pipe[i].Reader.Thread, INFINITE);
+
+ /* Close the handle to the thread. */
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Thread);
+ }
+
+ /* Terminate this waking thread. */
+ if (cp->Pipe[i].Waker.Thread) {
+ /* Signal the thread we are ready for it. It will terminate
+ immediately since Deleting is set. */
+ ReleaseSemaphore(cp->Pipe[i].Waker.Ready, 1, 0);
+
+ /* Wait for the thread to exit. */
+ WaitForSingleObject(cp->Pipe[i].Waker.Thread, INFINITE);
+
+ /* Close the handle to the thread. */
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Thread);
+ }
+
+ /* Cleanup the pipe's semaphores. */
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Ready);
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Go);
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Reader.Reset);
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Ready);
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Go);
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Waker.Reset);
+ }
+
+ /* Close the shared semaphores. */
+ kwsysProcessCleanupHandle(&cp->SharedIndexMutex);
+ kwsysProcessCleanupHandle(&cp->Full);
+
+ /* Free memory. */
+ kwsysProcess_SetCommand(cp, 0);
+ kwsysProcess_SetWorkingDirectory(cp, 0);
+ kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDIN, 0);
+ kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDOUT, 0);
+ kwsysProcess_SetPipeFile(cp, kwsysProcess_Pipe_STDERR, 0);
+ free(cp->CommandExitCodes);
+ free(cp->ProcessResults);
+ free(cp);
+}
+
+int kwsysProcess_SetCommand(kwsysProcess* cp, char const* const* command)
+{
+ int i;
+ if (!cp) {
+ return 0;
+ }
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ free(cp->Commands[i]);
+ }
+ cp->NumberOfCommands = 0;
+ if (cp->Commands) {
+ free(cp->Commands);
+ cp->Commands = 0;
+ }
+ if (command) {
+ return kwsysProcess_AddCommand(cp, command);
+ }
+ return 1;
+}
+
+int kwsysProcess_AddCommand(kwsysProcess* cp, char const* const* command)
+{
+ int newNumberOfCommands;
+ wchar_t** newCommands;
+
+ /* Make sure we have a command to add. */
+ if (!cp || !command || !*command) {
+ return 0;
+ }
+
+ /* Allocate a new array for command pointers. */
+ newNumberOfCommands = cp->NumberOfCommands + 1;
+ if (!(newCommands =
+ (wchar_t**)malloc(sizeof(wchar_t*) * newNumberOfCommands))) {
+ /* Out of memory. */
+ return 0;
+ }
+
+ /* Copy any existing commands into the new array. */
+ {
+ int i;
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ newCommands[i] = cp->Commands[i];
+ }
+ }
+
+ if (cp->Verbatim) {
+ /* Copy the verbatim command line into the buffer. */
+ newCommands[cp->NumberOfCommands] = kwsysEncoding_DupToWide(*command);
+ } else {
+ /* Encode the arguments so CommandLineToArgvW can decode
+ them from the command line string in the child. */
+ char buffer[32768]; /* CreateProcess max command-line length. */
+ char* end = buffer + sizeof(buffer);
+ char* out = buffer;
+ char const* const* a;
+ for (a = command; *a; ++a) {
+ int quote = !**a; /* Quote the empty string. */
+ int slashes = 0;
+ char const* c;
+ if (a != command && out != end) {
+ *out++ = ' ';
+ }
+ for (c = *a; !quote && *c; ++c) {
+ quote = (*c == ' ' || *c == '\t');
+ }
+ if (quote && out != end) {
+ *out++ = '"';
+ }
+ for (c = *a; *c; ++c) {
+ if (*c == '\\') {
+ ++slashes;
+ } else {
+ if (*c == '"') {
+ // Add n+1 backslashes to total 2n+1 before internal '"'.
+ while (slashes-- >= 0 && out != end) {
+ *out++ = '\\';
+ }
+ }
+ slashes = 0;
+ }
+ if (out != end) {
+ *out++ = *c;
+ }
+ }
+ if (quote) {
+ // Add n backslashes to total 2n before ending '"'.
+ while (slashes-- > 0 && out != end) {
+ *out++ = '\\';
+ }
+ if (out != end) {
+ *out++ = '"';
+ }
+ }
+ }
+ if (out != end) {
+ *out = '\0';
+ newCommands[cp->NumberOfCommands] = kwsysEncoding_DupToWide(buffer);
+ } else {
+ newCommands[cp->NumberOfCommands] = 0;
+ }
+ }
+ if (!newCommands[cp->NumberOfCommands]) {
+ /* Out of memory or command line too long. */
+ free(newCommands);
+ return 0;
+ }
+
+ /* Save the new array of commands. */
+ free(cp->Commands);
+ cp->Commands = newCommands;
+ cp->NumberOfCommands = newNumberOfCommands;
+ return 1;
+}
+
+void kwsysProcess_SetTimeout(kwsysProcess* cp, double timeout)
+{
+ if (!cp) {
+ return;
+ }
+ cp->Timeout = timeout;
+ if (cp->Timeout < 0) {
+ cp->Timeout = 0;
+ }
+ // Force recomputation of TimeoutTime.
+ cp->TimeoutTime.QuadPart = -1;
+}
+
+int kwsysProcess_SetWorkingDirectory(kwsysProcess* cp, const char* dir)
+{
+ if (!cp) {
+ return 0;
+ }
+ if (cp->WorkingDirectory) {
+ free(cp->WorkingDirectory);
+ cp->WorkingDirectory = 0;
+ }
+ if (dir && dir[0]) {
+ wchar_t* wdir = kwsysEncoding_DupToWide(dir);
+ /* We must convert the working directory to a full path. */
+ DWORD length = GetFullPathNameW(wdir, 0, 0, 0);
+ if (length > 0) {
+ wchar_t* work_dir = malloc(length * sizeof(wchar_t));
+ if (!work_dir) {
+ free(wdir);
+ return 0;
+ }
+ if (!GetFullPathNameW(wdir, length, work_dir, 0)) {
+ free(work_dir);
+ free(wdir);
+ return 0;
+ }
+ cp->WorkingDirectory = work_dir;
+ }
+ free(wdir);
+ }
+ return 1;
+}
+
+int kwsysProcess_SetPipeFile(kwsysProcess* cp, int pipe, const char* file)
+{
+ char** pfile;
+ if (!cp) {
+ return 0;
+ }
+ switch (pipe) {
+ case kwsysProcess_Pipe_STDIN:
+ pfile = &cp->PipeFileSTDIN;
+ break;
+ case kwsysProcess_Pipe_STDOUT:
+ pfile = &cp->PipeFileSTDOUT;
+ break;
+ case kwsysProcess_Pipe_STDERR:
+ pfile = &cp->PipeFileSTDERR;
+ break;
+ default:
+ return 0;
+ }
+ if (*pfile) {
+ free(*pfile);
+ *pfile = 0;
+ }
+ if (file) {
+ *pfile = strdup(file);
+ if (!*pfile) {
+ return 0;
+ }
+ }
+
+ /* If we are redirecting the pipe, do not share it or use a native
+ pipe. */
+ if (*pfile) {
+ kwsysProcess_SetPipeNative(cp, pipe, 0);
+ kwsysProcess_SetPipeShared(cp, pipe, 0);
+ }
+
+ return 1;
+}
+
+void kwsysProcess_SetPipeShared(kwsysProcess* cp, int pipe, int shared)
+{
+ if (!cp) {
+ return;
+ }
+
+ switch (pipe) {
+ case kwsysProcess_Pipe_STDIN:
+ cp->PipeSharedSTDIN = shared ? 1 : 0;
+ break;
+ case kwsysProcess_Pipe_STDOUT:
+ cp->PipeSharedSTDOUT = shared ? 1 : 0;
+ break;
+ case kwsysProcess_Pipe_STDERR:
+ cp->PipeSharedSTDERR = shared ? 1 : 0;
+ break;
+ default:
+ return;
+ }
+
+ /* If we are sharing the pipe, do not redirect it to a file or use a
+ native pipe. */
+ if (shared) {
+ kwsysProcess_SetPipeFile(cp, pipe, 0);
+ kwsysProcess_SetPipeNative(cp, pipe, 0);
+ }
+}
+
+void kwsysProcess_SetPipeNative(kwsysProcess* cp, int pipe, HANDLE p[2])
+{
+ HANDLE* pPipeNative = 0;
+
+ if (!cp) {
+ return;
+ }
+
+ switch (pipe) {
+ case kwsysProcess_Pipe_STDIN:
+ pPipeNative = cp->PipeNativeSTDIN;
+ break;
+ case kwsysProcess_Pipe_STDOUT:
+ pPipeNative = cp->PipeNativeSTDOUT;
+ break;
+ case kwsysProcess_Pipe_STDERR:
+ pPipeNative = cp->PipeNativeSTDERR;
+ break;
+ default:
+ return;
+ }
+
+ /* Copy the native pipe handles provided. */
+ if (p) {
+ pPipeNative[0] = p[0];
+ pPipeNative[1] = p[1];
+ } else {
+ pPipeNative[0] = 0;
+ pPipeNative[1] = 0;
+ }
+
+ /* If we are using a native pipe, do not share it or redirect it to
+ a file. */
+ if (p) {
+ kwsysProcess_SetPipeFile(cp, pipe, 0);
+ kwsysProcess_SetPipeShared(cp, pipe, 0);
+ }
+}
+
+int kwsysProcess_GetOption(kwsysProcess* cp, int optionId)
+{
+ if (!cp) {
+ return 0;
+ }
+
+ switch (optionId) {
+ case kwsysProcess_Option_Detach:
+ return cp->OptionDetach;
+ case kwsysProcess_Option_HideWindow:
+ return cp->HideWindow;
+ case kwsysProcess_Option_MergeOutput:
+ return cp->MergeOutput;
+ case kwsysProcess_Option_Verbatim:
+ return cp->Verbatim;
+ case kwsysProcess_Option_CreateProcessGroup:
+ return cp->CreateProcessGroup;
+ default:
+ return 0;
+ }
+}
+
+void kwsysProcess_SetOption(kwsysProcess* cp, int optionId, int value)
+{
+ if (!cp) {
+ return;
+ }
+
+ switch (optionId) {
+ case kwsysProcess_Option_Detach:
+ cp->OptionDetach = value;
+ break;
+ case kwsysProcess_Option_HideWindow:
+ cp->HideWindow = value;
+ break;
+ case kwsysProcess_Option_MergeOutput:
+ cp->MergeOutput = value;
+ break;
+ case kwsysProcess_Option_Verbatim:
+ cp->Verbatim = value;
+ break;
+ case kwsysProcess_Option_CreateProcessGroup:
+ cp->CreateProcessGroup = value;
+ break;
+ default:
+ break;
+ }
+}
+
+int kwsysProcess_GetState(kwsysProcess* cp)
+{
+ return cp ? cp->State : kwsysProcess_State_Error;
+}
+
+int kwsysProcess_GetExitException(kwsysProcess* cp)
+{
+ return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0))
+ ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitException
+ : kwsysProcess_Exception_Other;
+}
+
+int kwsysProcess_GetExitValue(kwsysProcess* cp)
+{
+ return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0))
+ ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitValue
+ : -1;
+}
+
+int kwsysProcess_GetExitCode(kwsysProcess* cp)
+{
+ return (cp && cp->ProcessResults && (cp->NumberOfCommands > 0))
+ ? cp->ProcessResults[cp->NumberOfCommands - 1].ExitCode
+ : 0;
+}
+
+const char* kwsysProcess_GetErrorString(kwsysProcess* cp)
+{
+ if (!cp) {
+ return "Process management structure could not be allocated";
+ } else if (cp->State == kwsysProcess_State_Error) {
+ return cp->ErrorMessage;
+ }
+ return "Success";
+}
+
+const char* kwsysProcess_GetExceptionString(kwsysProcess* cp)
+{
+ if (!(cp && cp->ProcessResults && (cp->NumberOfCommands > 0))) {
+ return "GetExceptionString called with NULL process management structure";
+ } else if (cp->State == kwsysProcess_State_Exception) {
+ return cp->ProcessResults[cp->NumberOfCommands - 1].ExitExceptionString;
+ }
+ return "No exception";
+}
+
+/* the index should be in array bound. */
+#define KWSYSPE_IDX_CHK(RET) \
+ if (!cp || idx >= cp->NumberOfCommands || idx < 0) { \
+ KWSYSPE_DEBUG((stderr, "array index out of bound\n")); \
+ return RET; \
+ }
+
+int kwsysProcess_GetStateByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK(kwsysProcess_State_Error)
+ return cp->ProcessResults[idx].State;
+}
+
+int kwsysProcess_GetExitExceptionByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK(kwsysProcess_Exception_Other)
+ return cp->ProcessResults[idx].ExitException;
+}
+
+int kwsysProcess_GetExitValueByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK(-1)
+ return cp->ProcessResults[idx].ExitValue;
+}
+
+int kwsysProcess_GetExitCodeByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK(-1)
+ return cp->CommandExitCodes[idx];
+}
+
+const char* kwsysProcess_GetExceptionStringByIndex(kwsysProcess* cp, int idx)
+{
+ KWSYSPE_IDX_CHK("GetExceptionString called with NULL process management "
+ "structure or index out of bound")
+ if (cp->ProcessResults[idx].State == kwsysProcess_StateByIndex_Exception) {
+ return cp->ProcessResults[idx].ExitExceptionString;
+ }
+ return "No exception";
+}
+
+#undef KWSYSPE_IDX_CHK
+
+void kwsysProcess_Execute(kwsysProcess* cp)
+{
+ int i;
+
+ /* Do not execute a second time. */
+ if (!cp || cp->State == kwsysProcess_State_Executing) {
+ return;
+ }
+
+ /* Make sure we have something to run. */
+ if (cp->NumberOfCommands < 1) {
+ strcpy(cp->ErrorMessage, "No command");
+ cp->State = kwsysProcess_State_Error;
+ return;
+ }
+
+ /* Initialize the control structure for a new process. */
+ if (!kwsysProcessInitialize(cp)) {
+ strcpy(cp->ErrorMessage, "Out of memory");
+ cp->State = kwsysProcess_State_Error;
+ return;
+ }
+
+ /* Save the real working directory of this process and change to
+ the working directory for the child processes. This is needed
+ to make pipe file paths evaluate correctly. */
+ if (cp->WorkingDirectory) {
+ if (!GetCurrentDirectoryW(cp->RealWorkingDirectoryLength,
+ cp->RealWorkingDirectory)) {
+ kwsysProcessCleanup(cp, GetLastError());
+ return;
+ }
+ SetCurrentDirectoryW(cp->WorkingDirectory);
+ }
+
+ /* Setup the stdin pipe for the first process. */
+ if (cp->PipeFileSTDIN) {
+ /* Create a handle to read a file for stdin. */
+ wchar_t* wstdin = kwsysEncoding_DupToWide(cp->PipeFileSTDIN);
+ DWORD error;
+ cp->PipeChildStd[0] =
+ CreateFileW(wstdin, GENERIC_READ, FILE_SHARE_READ | FILE_SHARE_WRITE, 0,
+ OPEN_EXISTING, 0, 0);
+ error = GetLastError(); /* Check now in case free changes this. */
+ free(wstdin);
+ if (cp->PipeChildStd[0] == INVALID_HANDLE_VALUE) {
+ kwsysProcessCleanup(cp, error);
+ return;
+ }
+ } else if (cp->PipeSharedSTDIN) {
+ /* Share this process's stdin with the child. */
+ kwsysProcessSetupSharedPipe(STD_INPUT_HANDLE, &cp->PipeChildStd[0]);
+ } else if (cp->PipeNativeSTDIN[0]) {
+ /* Use the provided native pipe. */
+ kwsysProcessSetupPipeNative(cp->PipeNativeSTDIN[0], &cp->PipeChildStd[0]);
+ } else {
+ /* Explicitly give the child no stdin. */
+ cp->PipeChildStd[0] = INVALID_HANDLE_VALUE;
+ }
+
+ /* Create the output pipe for the last process.
+ We always create this so the pipe thread can run even if we
+ do not end up giving the write end to the child below. */
+ if (!CreatePipe(&cp->Pipe[KWSYSPE_PIPE_STDOUT].Read,
+ &cp->Pipe[KWSYSPE_PIPE_STDOUT].Write, 0, 0)) {
+ kwsysProcessCleanup(cp, GetLastError());
+ return;
+ }
+
+ if (cp->PipeFileSTDOUT) {
+ /* Use a file for stdout. */
+ DWORD error = kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[1],
+ cp->PipeFileSTDOUT);
+ if (error) {
+ kwsysProcessCleanup(cp, error);
+ return;
+ }
+ } else if (cp->PipeSharedSTDOUT) {
+ /* Use the parent stdout. */
+ kwsysProcessSetupSharedPipe(STD_OUTPUT_HANDLE, &cp->PipeChildStd[1]);
+ } else if (cp->PipeNativeSTDOUT[1]) {
+ /* Use the given handle for stdout. */
+ kwsysProcessSetupPipeNative(cp->PipeNativeSTDOUT[1], &cp->PipeChildStd[1]);
+ } else {
+ /* Use our pipe for stdout. Duplicate the handle since our waker
+ thread will use the original. Do not make it inherited yet. */
+ if (!DuplicateHandle(GetCurrentProcess(),
+ cp->Pipe[KWSYSPE_PIPE_STDOUT].Write,
+ GetCurrentProcess(), &cp->PipeChildStd[1], 0, FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ kwsysProcessCleanup(cp, GetLastError());
+ return;
+ }
+ }
+
+ /* Create stderr pipe to be shared by all processes in the pipeline.
+ We always create this so the pipe thread can run even if we do not
+ end up giving the write end to the child below. */
+ if (!CreatePipe(&cp->Pipe[KWSYSPE_PIPE_STDERR].Read,
+ &cp->Pipe[KWSYSPE_PIPE_STDERR].Write, 0, 0)) {
+ kwsysProcessCleanup(cp, GetLastError());
+ return;
+ }
+
+ if (cp->PipeFileSTDERR) {
+ /* Use a file for stderr. */
+ DWORD error = kwsysProcessSetupOutputPipeFile(&cp->PipeChildStd[2],
+ cp->PipeFileSTDERR);
+ if (error) {
+ kwsysProcessCleanup(cp, error);
+ return;
+ }
+ } else if (cp->PipeSharedSTDERR) {
+ /* Use the parent stderr. */
+ kwsysProcessSetupSharedPipe(STD_ERROR_HANDLE, &cp->PipeChildStd[2]);
+ } else if (cp->PipeNativeSTDERR[1]) {
+ /* Use the given handle for stderr. */
+ kwsysProcessSetupPipeNative(cp->PipeNativeSTDERR[1], &cp->PipeChildStd[2]);
+ } else {
+ /* Use our pipe for stderr. Duplicate the handle since our waker
+ thread will use the original. Do not make it inherited yet. */
+ if (!DuplicateHandle(GetCurrentProcess(),
+ cp->Pipe[KWSYSPE_PIPE_STDERR].Write,
+ GetCurrentProcess(), &cp->PipeChildStd[2], 0, FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ kwsysProcessCleanup(cp, GetLastError());
+ return;
+ }
+ }
+
+ /* Create the pipeline of processes. */
+ {
+ /* Child startup control data. */
+ kwsysProcessCreateInformation si;
+ HANDLE nextStdInput = cp->PipeChildStd[0];
+
+ /* Initialize startup info data. */
+ ZeroMemory(&si, sizeof(si));
+ si.StartupInfo.cb = sizeof(si.StartupInfo);
+
+ /* Decide whether a child window should be shown. */
+ si.StartupInfo.dwFlags |= STARTF_USESHOWWINDOW;
+ si.StartupInfo.wShowWindow =
+ (unsigned short)(cp->HideWindow ? SW_HIDE : SW_SHOWDEFAULT);
+
+ /* Connect the child's output pipes to the threads. */
+ si.StartupInfo.dwFlags |= STARTF_USESTDHANDLES;
+
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ /* Setup the process's pipes. */
+ si.hStdInput = nextStdInput;
+ if (i == cp->NumberOfCommands - 1) {
+ /* The last child gets the overall stdout. */
+ nextStdInput = INVALID_HANDLE_VALUE;
+ si.hStdOutput = cp->PipeChildStd[1];
+ } else {
+ /* Create a pipe to sit between the children. */
+ HANDLE p[2] = { INVALID_HANDLE_VALUE, INVALID_HANDLE_VALUE };
+ if (!CreatePipe(&p[0], &p[1], 0, 0)) {
+ DWORD error = GetLastError();
+ if (nextStdInput != cp->PipeChildStd[0]) {
+ kwsysProcessCleanupHandle(&nextStdInput);
+ }
+ kwsysProcessCleanup(cp, error);
+ return;
+ }
+ nextStdInput = p[0];
+ si.hStdOutput = p[1];
+ }
+ si.hStdError =
+ cp->MergeOutput ? cp->PipeChildStd[1] : cp->PipeChildStd[2];
+
+ {
+ DWORD error = kwsysProcessCreate(cp, i, &si);
+
+ /* Close our copies of pipes used between children. */
+ if (si.hStdInput != cp->PipeChildStd[0]) {
+ kwsysProcessCleanupHandle(&si.hStdInput);
+ }
+ if (si.hStdOutput != cp->PipeChildStd[1]) {
+ kwsysProcessCleanupHandle(&si.hStdOutput);
+ }
+ if (si.hStdError != cp->PipeChildStd[2] && !cp->MergeOutput) {
+ kwsysProcessCleanupHandle(&si.hStdError);
+ }
+ if (!error) {
+ cp->ProcessEvents[i + 1] = cp->ProcessInformation[i].hProcess;
+ } else {
+ if (nextStdInput != cp->PipeChildStd[0]) {
+ kwsysProcessCleanupHandle(&nextStdInput);
+ }
+ kwsysProcessCleanup(cp, error);
+ return;
+ }
+ }
+ }
+ }
+
+ /* The parent process does not need the child's pipe ends. */
+ for (i = 0; i < 3; ++i) {
+ kwsysProcessCleanupHandle(&cp->PipeChildStd[i]);
+ }
+
+ /* Restore the working directory. */
+ if (cp->RealWorkingDirectory) {
+ SetCurrentDirectoryW(cp->RealWorkingDirectory);
+ free(cp->RealWorkingDirectory);
+ cp->RealWorkingDirectory = 0;
+ }
+
+ /* The timeout period starts now. */
+ cp->StartTime = kwsysProcessTimeGetCurrent();
+ cp->TimeoutTime = kwsysProcessTimeFromDouble(-1);
+
+ /* All processes in the pipeline have been started in suspended
+ mode. Resume them all now. */
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ ResumeThread(cp->ProcessInformation[i].hThread);
+ }
+
+ /* ---- It is no longer safe to call kwsysProcessCleanup. ----- */
+ /* Tell the pipe threads that a process has started. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ ReleaseSemaphore(cp->Pipe[i].Reader.Ready, 1, 0);
+ ReleaseSemaphore(cp->Pipe[i].Waker.Ready, 1, 0);
+ }
+
+ /* We don't care about the children's main threads. */
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread);
+ }
+
+ /* No pipe has reported data. */
+ cp->CurrentIndex = KWSYSPE_PIPE_COUNT;
+ cp->PipesLeft = KWSYSPE_PIPE_COUNT;
+
+ /* The process has now started. */
+ cp->State = kwsysProcess_State_Executing;
+ cp->Detached = cp->OptionDetach;
+}
+
+void kwsysProcess_Disown(kwsysProcess* cp)
+{
+ int i;
+
+ /* Make sure we are executing a detached process. */
+ if (!cp || !cp->Detached || cp->State != kwsysProcess_State_Executing ||
+ cp->TimeoutExpired || cp->Killed || cp->Terminated) {
+ return;
+ }
+
+ /* Disable the reading threads. */
+ kwsysProcessDisablePipeThreads(cp);
+
+ /* Wait for all pipe threads to reset. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ WaitForSingleObject(cp->Pipe[i].Reader.Reset, INFINITE);
+ WaitForSingleObject(cp->Pipe[i].Waker.Reset, INFINITE);
+ }
+
+ /* We will not wait for exit, so cleanup now. */
+ kwsysProcessCleanup(cp, 0);
+
+ /* The process has been disowned. */
+ cp->State = kwsysProcess_State_Disowned;
+}
+
+int kwsysProcess_WaitForData(kwsysProcess* cp, char** data, int* length,
+ double* userTimeout)
+{
+ kwsysProcessTime userStartTime;
+ kwsysProcessTime timeoutLength;
+ kwsysProcessTime timeoutTime;
+ DWORD timeout;
+ int user;
+ int done = 0;
+ int expired = 0;
+ int pipeId = kwsysProcess_Pipe_None;
+ DWORD w;
+
+ /* Make sure we are executing a process. */
+ if (!cp || cp->State != kwsysProcess_State_Executing || cp->Killed ||
+ cp->TimeoutExpired) {
+ return kwsysProcess_Pipe_None;
+ }
+
+ /* Record the time at which user timeout period starts. */
+ userStartTime = kwsysProcessTimeGetCurrent();
+
+ /* Calculate the time at which a timeout will expire, and whether it
+ is the user or process timeout. */
+ user = kwsysProcessGetTimeoutTime(cp, userTimeout, &timeoutTime);
+
+ /* Loop until we have a reason to return. */
+ while (!done && cp->PipesLeft > 0) {
+ /* If we previously got data from a thread, let it know we are
+ done with the data. */
+ if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) {
+ KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex));
+ ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0);
+ cp->CurrentIndex = KWSYSPE_PIPE_COUNT;
+ }
+
+ /* Setup a timeout if required. */
+ if (kwsysProcessGetTimeoutLeft(&timeoutTime, user ? userTimeout : 0,
+ &timeoutLength)) {
+ /* Timeout has already expired. */
+ expired = 1;
+ break;
+ }
+ if (timeoutTime.QuadPart < 0) {
+ timeout = INFINITE;
+ } else {
+ timeout = kwsysProcessTimeToDWORD(timeoutLength);
+ }
+
+ /* Wait for a pipe's thread to signal or a process to terminate. */
+ w = WaitForMultipleObjects(cp->ProcessEventsLength, cp->ProcessEvents, 0,
+ timeout);
+ if (w == WAIT_TIMEOUT) {
+ /* Timeout has expired. */
+ expired = 1;
+ done = 1;
+ } else if (w == WAIT_OBJECT_0) {
+ /* Save the index of the reporting thread and release the mutex.
+ The thread will block until we signal its Empty mutex. */
+ cp->CurrentIndex = cp->SharedIndex;
+ ReleaseSemaphore(cp->SharedIndexMutex, 1, 0);
+
+ /* Data are available or a pipe closed. */
+ if (cp->Pipe[cp->CurrentIndex].Closed) {
+ /* The pipe closed at the write end. Close the read end and
+ inform the wakeup thread it is done with this process. */
+ kwsysProcessCleanupHandle(&cp->Pipe[cp->CurrentIndex].Read);
+ ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Waker.Go, 1, 0);
+ KWSYSPE_DEBUG((stderr, "wakeup %d\n", cp->CurrentIndex));
+ --cp->PipesLeft;
+ } else if (data && length) {
+ /* Report this data. */
+ *data = cp->Pipe[cp->CurrentIndex].DataBuffer;
+ *length = cp->Pipe[cp->CurrentIndex].DataLength;
+ switch (cp->CurrentIndex) {
+ case KWSYSPE_PIPE_STDOUT:
+ pipeId = kwsysProcess_Pipe_STDOUT;
+ break;
+ case KWSYSPE_PIPE_STDERR:
+ pipeId = kwsysProcess_Pipe_STDERR;
+ break;
+ }
+ done = 1;
+ }
+ } else {
+ /* A process has terminated. */
+ kwsysProcessDestroy(cp, w - WAIT_OBJECT_0);
+ }
+ }
+
+ /* Update the user timeout. */
+ if (userTimeout) {
+ kwsysProcessTime userEndTime = kwsysProcessTimeGetCurrent();
+ kwsysProcessTime difference =
+ kwsysProcessTimeSubtract(userEndTime, userStartTime);
+ double d = kwsysProcessTimeToDouble(difference);
+ *userTimeout -= d;
+ if (*userTimeout < 0) {
+ *userTimeout = 0;
+ }
+ }
+
+ /* Check what happened. */
+ if (pipeId) {
+ /* Data are ready on a pipe. */
+ return pipeId;
+ } else if (expired) {
+ /* A timeout has expired. */
+ if (user) {
+ /* The user timeout has expired. It has no time left. */
+ return kwsysProcess_Pipe_Timeout;
+ } else {
+ /* The process timeout has expired. Kill the child now. */
+ KWSYSPE_DEBUG((stderr, "killing child because timeout expired\n"));
+ kwsysProcess_Kill(cp);
+ cp->TimeoutExpired = 1;
+ cp->Killed = 0;
+ return kwsysProcess_Pipe_None;
+ }
+ } else {
+ /* The children have terminated and no more data are available. */
+ return kwsysProcess_Pipe_None;
+ }
+}
+
+int kwsysProcess_WaitForExit(kwsysProcess* cp, double* userTimeout)
+{
+ int i;
+ int pipe;
+
+ /* Make sure we are executing a process. */
+ if (!cp || cp->State != kwsysProcess_State_Executing) {
+ return 1;
+ }
+
+ /* Wait for the process to terminate. Ignore all data. */
+ while ((pipe = kwsysProcess_WaitForData(cp, 0, 0, userTimeout)) > 0) {
+ if (pipe == kwsysProcess_Pipe_Timeout) {
+ /* The user timeout has expired. */
+ return 0;
+ }
+ }
+
+ KWSYSPE_DEBUG((stderr, "no more data\n"));
+
+ /* When the last pipe closes in WaitForData, the loop terminates
+ without releasing the pipe's thread. Release it now. */
+ if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) {
+ KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex));
+ ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0);
+ cp->CurrentIndex = KWSYSPE_PIPE_COUNT;
+ }
+
+ /* Wait for all pipe threads to reset. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ KWSYSPE_DEBUG((stderr, "waiting reader reset %d\n", i));
+ WaitForSingleObject(cp->Pipe[i].Reader.Reset, INFINITE);
+ KWSYSPE_DEBUG((stderr, "waiting waker reset %d\n", i));
+ WaitForSingleObject(cp->Pipe[i].Waker.Reset, INFINITE);
+ }
+
+ /* ---- It is now safe again to call kwsysProcessCleanup. ----- */
+ /* Close all the pipes. */
+ kwsysProcessCleanup(cp, 0);
+
+ /* Determine the outcome. */
+ if (cp->Killed) {
+ /* We killed the child. */
+ cp->State = kwsysProcess_State_Killed;
+ } else if (cp->TimeoutExpired) {
+ /* The timeout expired. */
+ cp->State = kwsysProcess_State_Expired;
+ } else {
+ /* The children exited. Report the outcome of the child processes. */
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ cp->ProcessResults[i].ExitCode = cp->CommandExitCodes[i];
+ if ((cp->ProcessResults[i].ExitCode & 0xF0000000) == 0xC0000000) {
+ /* Child terminated due to exceptional behavior. */
+ cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Exception;
+ cp->ProcessResults[i].ExitValue = 1;
+ kwsysProcessSetExitExceptionByIndex(cp, cp->ProcessResults[i].ExitCode,
+ i);
+ } else {
+ /* Child exited without exception. */
+ cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Exited;
+ cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None;
+ cp->ProcessResults[i].ExitValue = cp->ProcessResults[i].ExitCode;
+ }
+ }
+ /* support legacy state status value */
+ cp->State = cp->ProcessResults[cp->NumberOfCommands - 1].State;
+ }
+
+ return 1;
+}
+
+void kwsysProcess_Interrupt(kwsysProcess* cp)
+{
+ int i;
+ /* Make sure we are executing a process. */
+ if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired ||
+ cp->Killed) {
+ KWSYSPE_DEBUG((stderr, "interrupt: child not executing\n"));
+ return;
+ }
+
+ /* Skip actually interrupting the child if it has already terminated. */
+ if (cp->Terminated) {
+ KWSYSPE_DEBUG((stderr, "interrupt: child already terminated\n"));
+ return;
+ }
+
+ /* Interrupt the children. */
+ if (cp->CreateProcessGroup) {
+ if (cp->ProcessInformation) {
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ /* Make sure the process handle isn't closed (e.g. from disowning). */
+ if (cp->ProcessInformation[i].hProcess) {
+ /* The user created a process group for this process. The group ID
+ is the process ID for the original process in the group. Note
+ that we have to use Ctrl+Break: Ctrl+C is not allowed for process
+ groups. */
+ GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT,
+ cp->ProcessInformation[i].dwProcessId);
+ }
+ }
+ }
+ } else {
+ /* No process group was created. Kill our own process group... */
+ GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, 0);
+ }
+}
+
+void kwsysProcess_Kill(kwsysProcess* cp)
+{
+ int i;
+ /* Make sure we are executing a process. */
+ if (!cp || cp->State != kwsysProcess_State_Executing || cp->TimeoutExpired ||
+ cp->Killed) {
+ KWSYSPE_DEBUG((stderr, "kill: child not executing\n"));
+ return;
+ }
+
+ /* Disable the reading threads. */
+ KWSYSPE_DEBUG((stderr, "kill: disabling pipe threads\n"));
+ kwsysProcessDisablePipeThreads(cp);
+
+ /* Skip actually killing the child if it has already terminated. */
+ if (cp->Terminated) {
+ KWSYSPE_DEBUG((stderr, "kill: child already terminated\n"));
+ return;
+ }
+
+ /* Kill the children. */
+ cp->Killed = 1;
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ kwsysProcessKillTree(cp->ProcessInformation[i].dwProcessId);
+ /* Remove from global list of processes and close handles. */
+ kwsysProcessesRemove(cp->ProcessInformation[i].hProcess);
+ kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread);
+ kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hProcess);
+ }
+
+ /* We are killing the children and ignoring all data. Do not wait
+ for them to exit. */
+}
+
+void kwsysProcess_KillPID(unsigned long process_id)
+{
+ kwsysProcessKillTree((DWORD)process_id);
+}
+
+/*
+ Function executed for each pipe's thread. Argument is a pointer to
+ the kwsysProcessPipeData instance for this thread.
+*/
+DWORD WINAPI kwsysProcessPipeThreadRead(LPVOID ptd)
+{
+ kwsysProcessPipeData* td = (kwsysProcessPipeData*)ptd;
+ kwsysProcess* cp = td->Process;
+
+ /* Wait for a process to be ready. */
+ while ((WaitForSingleObject(td->Reader.Ready, INFINITE), !cp->Deleting)) {
+ /* Read output from the process for this thread's pipe. */
+ kwsysProcessPipeThreadReadPipe(cp, td);
+
+ /* Signal the main thread we have reset for a new process. */
+ ReleaseSemaphore(td->Reader.Reset, 1, 0);
+ }
+ return 0;
+}
+
+/*
+ Function called in each pipe's thread to handle data for one
+ execution of a subprocess.
+*/
+void kwsysProcessPipeThreadReadPipe(kwsysProcess* cp, kwsysProcessPipeData* td)
+{
+ /* Wait for space in the thread's buffer. */
+ while ((KWSYSPE_DEBUG((stderr, "wait for read %d\n", td->Index)),
+ WaitForSingleObject(td->Reader.Go, INFINITE), !td->Closed)) {
+ KWSYSPE_DEBUG((stderr, "reading %d\n", td->Index));
+
+ /* Read data from the pipe. This may block until data are available. */
+ if (!ReadFile(td->Read, td->DataBuffer, KWSYSPE_PIPE_BUFFER_SIZE,
+ &td->DataLength, 0)) {
+ if (GetLastError() != ERROR_BROKEN_PIPE) {
+ /* UNEXPECTED failure to read the pipe. */
+ }
+
+ /* The pipe closed. There are no more data to read. */
+ td->Closed = 1;
+ KWSYSPE_DEBUG((stderr, "read closed %d\n", td->Index));
+ }
+
+ KWSYSPE_DEBUG((stderr, "read %d\n", td->Index));
+
+ /* Wait for our turn to be handled by the main thread. */
+ WaitForSingleObject(cp->SharedIndexMutex, INFINITE);
+
+ KWSYSPE_DEBUG((stderr, "reporting read %d\n", td->Index));
+
+ /* Tell the main thread we have something to report. */
+ cp->SharedIndex = td->Index;
+ ReleaseSemaphore(cp->Full, 1, 0);
+ }
+
+ /* We were signalled to exit with our buffer empty. Reset the
+ mutex for a new process. */
+ KWSYSPE_DEBUG((stderr, "self releasing reader %d\n", td->Index));
+ ReleaseSemaphore(td->Reader.Go, 1, 0);
+}
+
+/*
+ Function executed for each pipe's thread. Argument is a pointer to
+ the kwsysProcessPipeData instance for this thread.
+*/
+DWORD WINAPI kwsysProcessPipeThreadWake(LPVOID ptd)
+{
+ kwsysProcessPipeData* td = (kwsysProcessPipeData*)ptd;
+ kwsysProcess* cp = td->Process;
+
+ /* Wait for a process to be ready. */
+ while ((WaitForSingleObject(td->Waker.Ready, INFINITE), !cp->Deleting)) {
+ /* Wait for a possible wakeup. */
+ kwsysProcessPipeThreadWakePipe(cp, td);
+
+ /* Signal the main thread we have reset for a new process. */
+ ReleaseSemaphore(td->Waker.Reset, 1, 0);
+ }
+ return 0;
+}
+
+/*
+ Function called in each pipe's thread to handle reading thread
+ wakeup for one execution of a subprocess.
+*/
+void kwsysProcessPipeThreadWakePipe(kwsysProcess* cp, kwsysProcessPipeData* td)
+{
+ (void)cp;
+
+ /* Wait for a possible wake command. */
+ KWSYSPE_DEBUG((stderr, "wait for wake %d\n", td->Index));
+ WaitForSingleObject(td->Waker.Go, INFINITE);
+ KWSYSPE_DEBUG((stderr, "waking %d\n", td->Index));
+
+ /* If the pipe is not closed, we need to wake up the reading thread. */
+ if (!td->Closed) {
+ DWORD dummy;
+ KWSYSPE_DEBUG((stderr, "waker %d writing byte\n", td->Index));
+ WriteFile(td->Write, "", 1, &dummy, 0);
+ KWSYSPE_DEBUG((stderr, "waker %d wrote byte\n", td->Index));
+ }
+}
+
+/* Initialize a process control structure for kwsysProcess_Execute. */
+int kwsysProcessInitialize(kwsysProcess* cp)
+{
+ int i;
+ /* Reset internal status flags. */
+ cp->TimeoutExpired = 0;
+ cp->Terminated = 0;
+ cp->Killed = 0;
+
+ free(cp->ProcessResults);
+ /* Allocate process result information for each process. */
+ cp->ProcessResults = (kwsysProcessResults*)malloc(
+ sizeof(kwsysProcessResults) * (cp->NumberOfCommands));
+ if (!cp->ProcessResults) {
+ return 0;
+ }
+ ZeroMemory(cp->ProcessResults,
+ sizeof(kwsysProcessResults) * cp->NumberOfCommands);
+ for (i = 0; i < cp->NumberOfCommands; i++) {
+ cp->ProcessResults[i].ExitException = kwsysProcess_Exception_None;
+ cp->ProcessResults[i].State = kwsysProcess_StateByIndex_Starting;
+ cp->ProcessResults[i].ExitCode = 1;
+ cp->ProcessResults[i].ExitValue = 1;
+ strcpy(cp->ProcessResults[i].ExitExceptionString, "No exception");
+ }
+
+ /* Allocate process information for each process. */
+ free(cp->ProcessInformation);
+ cp->ProcessInformation = (PROCESS_INFORMATION*)malloc(
+ sizeof(PROCESS_INFORMATION) * cp->NumberOfCommands);
+ if (!cp->ProcessInformation) {
+ return 0;
+ }
+ ZeroMemory(cp->ProcessInformation,
+ sizeof(PROCESS_INFORMATION) * cp->NumberOfCommands);
+ free(cp->CommandExitCodes);
+ cp->CommandExitCodes = (DWORD*)malloc(sizeof(DWORD) * cp->NumberOfCommands);
+ if (!cp->CommandExitCodes) {
+ return 0;
+ }
+ ZeroMemory(cp->CommandExitCodes, sizeof(DWORD) * cp->NumberOfCommands);
+
+ /* Allocate event wait array. The first event is cp->Full, the rest
+ are the process termination events. */
+ cp->ProcessEvents =
+ (PHANDLE)malloc(sizeof(HANDLE) * (cp->NumberOfCommands + 1));
+ if (!cp->ProcessEvents) {
+ return 0;
+ }
+ ZeroMemory(cp->ProcessEvents, sizeof(HANDLE) * (cp->NumberOfCommands + 1));
+ cp->ProcessEvents[0] = cp->Full;
+ cp->ProcessEventsLength = cp->NumberOfCommands + 1;
+
+ /* Allocate space to save the real working directory of this process. */
+ if (cp->WorkingDirectory) {
+ cp->RealWorkingDirectoryLength = GetCurrentDirectoryW(0, 0);
+ if (cp->RealWorkingDirectoryLength > 0) {
+ cp->RealWorkingDirectory =
+ malloc(cp->RealWorkingDirectoryLength * sizeof(wchar_t));
+ if (!cp->RealWorkingDirectory) {
+ return 0;
+ }
+ }
+ }
+ {
+ for (i = 0; i < 3; ++i) {
+ cp->PipeChildStd[i] = INVALID_HANDLE_VALUE;
+ }
+ }
+
+ return 1;
+}
+
+static DWORD kwsysProcessCreateChildHandle(PHANDLE out, HANDLE in, int isStdIn)
+{
+ DWORD flags;
+
+ /* Check whether the handle is valid for this process. */
+ if (in != INVALID_HANDLE_VALUE && GetHandleInformation(in, &flags)) {
+ /* Use the handle as-is if it is already inherited. */
+ if (flags & HANDLE_FLAG_INHERIT) {
+ *out = in;
+ return ERROR_SUCCESS;
+ }
+
+ /* Create an inherited copy of this handle. */
+ if (DuplicateHandle(GetCurrentProcess(), in, GetCurrentProcess(), out, 0,
+ TRUE, DUPLICATE_SAME_ACCESS)) {
+ return ERROR_SUCCESS;
+ } else {
+ return GetLastError();
+ }
+ } else {
+ /* The given handle is not valid for this process. Some child
+ processes may break if they do not have a valid standard handle,
+ so open NUL to give to the child. */
+ SECURITY_ATTRIBUTES sa;
+ ZeroMemory(&sa, sizeof(sa));
+ sa.nLength = (DWORD)sizeof(sa);
+ sa.bInheritHandle = 1;
+ *out = CreateFileW(
+ L"NUL",
+ (isStdIn ? GENERIC_READ : (GENERIC_WRITE | FILE_READ_ATTRIBUTES)),
+ FILE_SHARE_READ | FILE_SHARE_WRITE, &sa, OPEN_EXISTING, 0, 0);
+ return (*out != INVALID_HANDLE_VALUE) ? ERROR_SUCCESS : GetLastError();
+ }
+}
+
+DWORD kwsysProcessCreate(kwsysProcess* cp, int index,
+ kwsysProcessCreateInformation* si)
+{
+ DWORD creationFlags;
+ DWORD error = ERROR_SUCCESS;
+
+ /* Check if we are currently exiting. */
+ if (!kwsysTryEnterCreateProcessSection()) {
+ /* The Ctrl handler is currently working on exiting our process. Rather
+ than return an error code, which could cause incorrect conclusions to be
+ reached by the caller, we simply hang. (For example, a CMake try_run
+ configure step might cause the project to configure wrong.) */
+ Sleep(INFINITE);
+ }
+
+ /* Create the child in a suspended state so we can wait until all
+ children have been created before running any one. */
+ creationFlags = CREATE_SUSPENDED;
+ if (cp->CreateProcessGroup) {
+ creationFlags |= CREATE_NEW_PROCESS_GROUP;
+ }
+
+ /* Create inherited copies of the handles. */
+ (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdInput,
+ si->hStdInput, 1)) ||
+ (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdOutput,
+ si->hStdOutput, 0)) ||
+ (error = kwsysProcessCreateChildHandle(&si->StartupInfo.hStdError,
+ si->hStdError, 0)) ||
+ /* Create the process. */
+ (!CreateProcessW(0, cp->Commands[index], 0, 0, TRUE, creationFlags, 0, 0,
+ &si->StartupInfo, &cp->ProcessInformation[index]) &&
+ (error = GetLastError()));
+
+ /* Close the inherited copies of the handles. */
+ if (si->StartupInfo.hStdInput != si->hStdInput) {
+ kwsysProcessCleanupHandle(&si->StartupInfo.hStdInput);
+ }
+ if (si->StartupInfo.hStdOutput != si->hStdOutput) {
+ kwsysProcessCleanupHandle(&si->StartupInfo.hStdOutput);
+ }
+ if (si->StartupInfo.hStdError != si->hStdError) {
+ kwsysProcessCleanupHandle(&si->StartupInfo.hStdError);
+ }
+
+ /* Add the process to the global list of processes. */
+ if (!error &&
+ !kwsysProcessesAdd(cp->ProcessInformation[index].hProcess,
+ cp->ProcessInformation[index].dwProcessId,
+ cp->CreateProcessGroup)) {
+ /* This failed for some reason. Kill the suspended process. */
+ TerminateProcess(cp->ProcessInformation[index].hProcess, 1);
+ /* And clean up... */
+ kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hProcess);
+ kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hThread);
+ strcpy(cp->ErrorMessage, "kwsysProcessesAdd function failed");
+ error = ERROR_NOT_ENOUGH_MEMORY; /* Most likely reason. */
+ }
+
+ /* If the console Ctrl handler is waiting for us, this will release it... */
+ kwsysLeaveCreateProcessSection();
+ return error;
+}
+
+void kwsysProcessDestroy(kwsysProcess* cp, int event)
+{
+ int i;
+ int index;
+
+ /* Find the process index for the termination event. */
+ for (index = 0; index < cp->NumberOfCommands; ++index) {
+ if (cp->ProcessInformation[index].hProcess == cp->ProcessEvents[event]) {
+ break;
+ }
+ }
+
+ /* Check the exit code of the process. */
+ GetExitCodeProcess(cp->ProcessInformation[index].hProcess,
+ &cp->CommandExitCodes[index]);
+
+ /* Remove from global list of processes. */
+ kwsysProcessesRemove(cp->ProcessInformation[index].hProcess);
+
+ /* Close the process handle for the terminated process. */
+ kwsysProcessCleanupHandle(&cp->ProcessInformation[index].hProcess);
+
+ /* Remove the process from the available events. */
+ cp->ProcessEventsLength -= 1;
+ for (i = event; i < cp->ProcessEventsLength; ++i) {
+ cp->ProcessEvents[i] = cp->ProcessEvents[i + 1];
+ }
+
+ /* Check if all processes have terminated. */
+ if (cp->ProcessEventsLength == 1) {
+ cp->Terminated = 1;
+
+ /* Close our copies of the pipe write handles so the pipe threads
+ can detect end-of-data. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ /* TODO: If the child created its own child (our grandchild)
+ which inherited a copy of the pipe write-end then the pipe
+ may not close and we will still need the waker write pipe.
+ However we still want to be able to detect end-of-data in the
+ normal case. The reader thread will have to switch to using
+ PeekNamedPipe to read the last bit of data from the pipe
+ without blocking. This is equivalent to using a non-blocking
+ read on posix. */
+ KWSYSPE_DEBUG((stderr, "closing wakeup write %d\n", i));
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Write);
+ }
+ }
+}
+
+DWORD kwsysProcessSetupOutputPipeFile(PHANDLE phandle, const char* name)
+{
+ HANDLE fout;
+ wchar_t* wname;
+ DWORD error;
+ if (!name) {
+ return ERROR_INVALID_PARAMETER;
+ }
+
+ /* Close the existing handle. */
+ kwsysProcessCleanupHandle(phandle);
+
+ /* Create a handle to write a file for the pipe. */
+ wname = kwsysEncoding_DupToWide(name);
+ fout =
+ CreateFileW(wname, GENERIC_WRITE, FILE_SHARE_READ, 0, CREATE_ALWAYS, 0, 0);
+ error = GetLastError();
+ free(wname);
+ if (fout == INVALID_HANDLE_VALUE) {
+ return error;
+ }
+
+ /* Assign the replacement handle. */
+ *phandle = fout;
+ return ERROR_SUCCESS;
+}
+
+void kwsysProcessSetupSharedPipe(DWORD nStdHandle, PHANDLE handle)
+{
+ /* Close the existing handle. */
+ kwsysProcessCleanupHandle(handle);
+ /* Store the new standard handle. */
+ *handle = GetStdHandle(nStdHandle);
+}
+
+void kwsysProcessSetupPipeNative(HANDLE native, PHANDLE handle)
+{
+ /* Close the existing handle. */
+ kwsysProcessCleanupHandle(handle);
+ /* Store the new given handle. */
+ *handle = native;
+}
+
+/* Close the given handle if it is open. Reset its value to 0. */
+void kwsysProcessCleanupHandle(PHANDLE h)
+{
+ if (h && *h && *h != INVALID_HANDLE_VALUE &&
+ *h != GetStdHandle(STD_INPUT_HANDLE) &&
+ *h != GetStdHandle(STD_OUTPUT_HANDLE) &&
+ *h != GetStdHandle(STD_ERROR_HANDLE)) {
+ CloseHandle(*h);
+ *h = INVALID_HANDLE_VALUE;
+ }
+}
+
+/* Close all handles created by kwsysProcess_Execute. */
+void kwsysProcessCleanup(kwsysProcess* cp, DWORD error)
+{
+ int i;
+ /* If this is an error case, report the error. */
+ if (error) {
+ /* Construct an error message if one has not been provided already. */
+ if (cp->ErrorMessage[0] == 0) {
+ /* Format the error message. */
+ wchar_t err_msg[KWSYSPE_PIPE_BUFFER_SIZE];
+ DWORD length = FormatMessageW(
+ FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, 0, error,
+ MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), err_msg,
+ KWSYSPE_PIPE_BUFFER_SIZE, 0);
+ if (length < 1) {
+ /* FormatMessage failed. Use a default message. */
+ _snprintf(cp->ErrorMessage, KWSYSPE_PIPE_BUFFER_SIZE,
+ "Process execution failed with error 0x%X. "
+ "FormatMessage failed with error 0x%X",
+ error, GetLastError());
+ }
+ if (!WideCharToMultiByte(CP_UTF8, 0, err_msg, -1, cp->ErrorMessage,
+ KWSYSPE_PIPE_BUFFER_SIZE, NULL, NULL)) {
+ /* WideCharToMultiByte failed. Use a default message. */
+ _snprintf(cp->ErrorMessage, KWSYSPE_PIPE_BUFFER_SIZE,
+ "Process execution failed with error 0x%X. "
+ "WideCharToMultiByte failed with error 0x%X",
+ error, GetLastError());
+ }
+ }
+
+ /* Remove trailing period and newline, if any. */
+ kwsysProcessCleanErrorMessage(cp);
+
+ /* Set the error state. */
+ cp->State = kwsysProcess_State_Error;
+
+ /* Cleanup any processes already started in a suspended state. */
+ if (cp->ProcessInformation) {
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ if (cp->ProcessInformation[i].hProcess) {
+ TerminateProcess(cp->ProcessInformation[i].hProcess, 255);
+ WaitForSingleObject(cp->ProcessInformation[i].hProcess, INFINITE);
+ }
+ }
+ for (i = 0; i < cp->NumberOfCommands; ++i) {
+ /* Remove from global list of processes and close handles. */
+ kwsysProcessesRemove(cp->ProcessInformation[i].hProcess);
+ kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hThread);
+ kwsysProcessCleanupHandle(&cp->ProcessInformation[i].hProcess);
+ }
+ }
+
+ /* Restore the working directory. */
+ if (cp->RealWorkingDirectory) {
+ SetCurrentDirectoryW(cp->RealWorkingDirectory);
+ }
+ }
+
+ /* Free memory. */
+ if (cp->ProcessInformation) {
+ free(cp->ProcessInformation);
+ cp->ProcessInformation = 0;
+ }
+ if (cp->ProcessEvents) {
+ free(cp->ProcessEvents);
+ cp->ProcessEvents = 0;
+ }
+ if (cp->RealWorkingDirectory) {
+ free(cp->RealWorkingDirectory);
+ cp->RealWorkingDirectory = 0;
+ }
+
+ /* Close each pipe. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Write);
+ kwsysProcessCleanupHandle(&cp->Pipe[i].Read);
+ cp->Pipe[i].Closed = 0;
+ }
+ for (i = 0; i < 3; ++i) {
+ kwsysProcessCleanupHandle(&cp->PipeChildStd[i]);
+ }
+}
+
+void kwsysProcessCleanErrorMessage(kwsysProcess* cp)
+{
+ /* Remove trailing period and newline, if any. */
+ size_t length = strlen(cp->ErrorMessage);
+ if (cp->ErrorMessage[length - 1] == '\n') {
+ cp->ErrorMessage[length - 1] = 0;
+ --length;
+ if (length > 0 && cp->ErrorMessage[length - 1] == '\r') {
+ cp->ErrorMessage[length - 1] = 0;
+ --length;
+ }
+ }
+ if (length > 0 && cp->ErrorMessage[length - 1] == '.') {
+ cp->ErrorMessage[length - 1] = 0;
+ }
+}
+
+/* Get the time at which either the process or user timeout will
+ expire. Returns 1 if the user timeout is first, and 0 otherwise. */
+int kwsysProcessGetTimeoutTime(kwsysProcess* cp, double* userTimeout,
+ kwsysProcessTime* timeoutTime)
+{
+ /* The first time this is called, we need to calculate the time at
+ which the child will timeout. */
+ if (cp->Timeout && cp->TimeoutTime.QuadPart < 0) {
+ kwsysProcessTime length = kwsysProcessTimeFromDouble(cp->Timeout);
+ cp->TimeoutTime = kwsysProcessTimeAdd(cp->StartTime, length);
+ }
+
+ /* Start with process timeout. */
+ *timeoutTime = cp->TimeoutTime;
+
+ /* Check if the user timeout is earlier. */
+ if (userTimeout) {
+ kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent();
+ kwsysProcessTime userTimeoutLength =
+ kwsysProcessTimeFromDouble(*userTimeout);
+ kwsysProcessTime userTimeoutTime =
+ kwsysProcessTimeAdd(currentTime, userTimeoutLength);
+ if (timeoutTime->QuadPart < 0 ||
+ kwsysProcessTimeLess(userTimeoutTime, *timeoutTime)) {
+ *timeoutTime = userTimeoutTime;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Get the length of time before the given timeout time arrives.
+ Returns 1 if the time has already arrived, and 0 otherwise. */
+int kwsysProcessGetTimeoutLeft(kwsysProcessTime* timeoutTime,
+ double* userTimeout,
+ kwsysProcessTime* timeoutLength)
+{
+ if (timeoutTime->QuadPart < 0) {
+ /* No timeout time has been requested. */
+ return 0;
+ } else {
+ /* Calculate the remaining time. */
+ kwsysProcessTime currentTime = kwsysProcessTimeGetCurrent();
+ *timeoutLength = kwsysProcessTimeSubtract(*timeoutTime, currentTime);
+
+ if (timeoutLength->QuadPart < 0 && userTimeout && *userTimeout <= 0) {
+ /* Caller has explicitly requested a zero timeout. */
+ timeoutLength->QuadPart = 0;
+ }
+
+ if (timeoutLength->QuadPart < 0) {
+ /* Timeout has already expired. */
+ return 1;
+ } else {
+ /* There is some time left. */
+ return 0;
+ }
+ }
+}
+
+kwsysProcessTime kwsysProcessTimeGetCurrent()
+{
+ kwsysProcessTime current;
+ FILETIME ft;
+ GetSystemTimeAsFileTime(&ft);
+ current.LowPart = ft.dwLowDateTime;
+ current.HighPart = ft.dwHighDateTime;
+ return current;
+}
+
+DWORD kwsysProcessTimeToDWORD(kwsysProcessTime t)
+{
+ return (DWORD)(t.QuadPart * 0.0001);
+}
+
+double kwsysProcessTimeToDouble(kwsysProcessTime t)
+{
+ return t.QuadPart * 0.0000001;
+}
+
+kwsysProcessTime kwsysProcessTimeFromDouble(double d)
+{
+ kwsysProcessTime t;
+ t.QuadPart = (LONGLONG)(d * 10000000);
+ return t;
+}
+
+int kwsysProcessTimeLess(kwsysProcessTime in1, kwsysProcessTime in2)
+{
+ return in1.QuadPart < in2.QuadPart;
+}
+
+kwsysProcessTime kwsysProcessTimeAdd(kwsysProcessTime in1,
+ kwsysProcessTime in2)
+{
+ kwsysProcessTime out;
+ out.QuadPart = in1.QuadPart + in2.QuadPart;
+ return out;
+}
+
+kwsysProcessTime kwsysProcessTimeSubtract(kwsysProcessTime in1,
+ kwsysProcessTime in2)
+{
+ kwsysProcessTime out;
+ out.QuadPart = in1.QuadPart - in2.QuadPart;
+ return out;
+}
+
+#define KWSYSPE_CASE(type, str) \
+ cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_##type; \
+ strcpy(cp->ProcessResults[idx].ExitExceptionString, str)
+static void kwsysProcessSetExitExceptionByIndex(kwsysProcess* cp, int code,
+ int idx)
+{
+ switch (code) {
+ case STATUS_CONTROL_C_EXIT:
+ KWSYSPE_CASE(Interrupt, "User interrupt");
+ break;
+
+ case STATUS_FLOAT_DENORMAL_OPERAND:
+ KWSYSPE_CASE(Numerical, "Floating-point exception (denormal operand)");
+ break;
+ case STATUS_FLOAT_DIVIDE_BY_ZERO:
+ KWSYSPE_CASE(Numerical, "Divide-by-zero");
+ break;
+ case STATUS_FLOAT_INEXACT_RESULT:
+ KWSYSPE_CASE(Numerical, "Floating-point exception (inexact result)");
+ break;
+ case STATUS_FLOAT_INVALID_OPERATION:
+ KWSYSPE_CASE(Numerical, "Invalid floating-point operation");
+ break;
+ case STATUS_FLOAT_OVERFLOW:
+ KWSYSPE_CASE(Numerical, "Floating-point overflow");
+ break;
+ case STATUS_FLOAT_STACK_CHECK:
+ KWSYSPE_CASE(Numerical, "Floating-point stack check failed");
+ break;
+ case STATUS_FLOAT_UNDERFLOW:
+ KWSYSPE_CASE(Numerical, "Floating-point underflow");
+ break;
+#ifdef STATUS_FLOAT_MULTIPLE_FAULTS
+ case STATUS_FLOAT_MULTIPLE_FAULTS:
+ KWSYSPE_CASE(Numerical, "Floating-point exception (multiple faults)");
+ break;
+#endif
+#ifdef STATUS_FLOAT_MULTIPLE_TRAPS
+ case STATUS_FLOAT_MULTIPLE_TRAPS:
+ KWSYSPE_CASE(Numerical, "Floating-point exception (multiple traps)");
+ break;
+#endif
+ case STATUS_INTEGER_DIVIDE_BY_ZERO:
+ KWSYSPE_CASE(Numerical, "Integer divide-by-zero");
+ break;
+ case STATUS_INTEGER_OVERFLOW:
+ KWSYSPE_CASE(Numerical, "Integer overflow");
+ break;
+
+ case STATUS_DATATYPE_MISALIGNMENT:
+ KWSYSPE_CASE(Fault, "Datatype misalignment");
+ break;
+ case STATUS_ACCESS_VIOLATION:
+ KWSYSPE_CASE(Fault, "Access violation");
+ break;
+ case STATUS_IN_PAGE_ERROR:
+ KWSYSPE_CASE(Fault, "In-page error");
+ break;
+ case STATUS_INVALID_HANDLE:
+ KWSYSPE_CASE(Fault, "Invalid hanlde");
+ break;
+ case STATUS_NONCONTINUABLE_EXCEPTION:
+ KWSYSPE_CASE(Fault, "Noncontinuable exception");
+ break;
+ case STATUS_INVALID_DISPOSITION:
+ KWSYSPE_CASE(Fault, "Invalid disposition");
+ break;
+ case STATUS_ARRAY_BOUNDS_EXCEEDED:
+ KWSYSPE_CASE(Fault, "Array bounds exceeded");
+ break;
+ case STATUS_STACK_OVERFLOW:
+ KWSYSPE_CASE(Fault, "Stack overflow");
+ break;
+
+ case STATUS_ILLEGAL_INSTRUCTION:
+ KWSYSPE_CASE(Illegal, "Illegal instruction");
+ break;
+ case STATUS_PRIVILEGED_INSTRUCTION:
+ KWSYSPE_CASE(Illegal, "Privileged instruction");
+ break;
+
+ case STATUS_NO_MEMORY:
+ default:
+ cp->ProcessResults[idx].ExitException = kwsysProcess_Exception_Other;
+ _snprintf(cp->ProcessResults[idx].ExitExceptionString,
+ KWSYSPE_PIPE_BUFFER_SIZE, "Exit code 0x%x\n", code);
+ break;
+ }
+}
+#undef KWSYSPE_CASE
+
+typedef struct kwsysProcess_List_s kwsysProcess_List;
+static kwsysProcess_List* kwsysProcess_List_New(void);
+static void kwsysProcess_List_Delete(kwsysProcess_List* self);
+static int kwsysProcess_List_Update(kwsysProcess_List* self);
+static int kwsysProcess_List_NextProcess(kwsysProcess_List* self);
+static int kwsysProcess_List_GetCurrentProcessId(kwsysProcess_List* self);
+static int kwsysProcess_List_GetCurrentParentId(kwsysProcess_List* self);
+
+/* Windows NT 4 API definitions. */
+#define STATUS_INFO_LENGTH_MISMATCH ((NTSTATUS)0xC0000004L)
+typedef LONG NTSTATUS;
+typedef LONG KPRIORITY;
+typedef struct _UNICODE_STRING UNICODE_STRING;
+struct _UNICODE_STRING
+{
+ USHORT Length;
+ USHORT MaximumLength;
+ PWSTR Buffer;
+};
+
+/* The process information structure. Declare only enough to get
+ process identifiers. The rest may be ignored because we use the
+ NextEntryDelta to move through an array of instances. */
+typedef struct _SYSTEM_PROCESS_INFORMATION SYSTEM_PROCESS_INFORMATION;
+typedef SYSTEM_PROCESS_INFORMATION* PSYSTEM_PROCESS_INFORMATION;
+struct _SYSTEM_PROCESS_INFORMATION
+{
+ ULONG NextEntryDelta;
+ ULONG ThreadCount;
+ ULONG Reserved1[6];
+ LARGE_INTEGER CreateTime;
+ LARGE_INTEGER UserTime;
+ LARGE_INTEGER KernelTime;
+ UNICODE_STRING ProcessName;
+ KPRIORITY BasePriority;
+ ULONG ProcessId;
+ ULONG InheritedFromProcessId;
+};
+
+/* Toolhelp32 API definitions. */
+#define TH32CS_SNAPPROCESS 0x00000002
+#if defined(_WIN64)
+typedef unsigned __int64 ProcessULONG_PTR;
+#else
+typedef unsigned long ProcessULONG_PTR;
+#endif
+typedef struct tagPROCESSENTRY32 PROCESSENTRY32;
+typedef PROCESSENTRY32* LPPROCESSENTRY32;
+struct tagPROCESSENTRY32
+{
+ DWORD dwSize;
+ DWORD cntUsage;
+ DWORD th32ProcessID;
+ ProcessULONG_PTR th32DefaultHeapID;
+ DWORD th32ModuleID;
+ DWORD cntThreads;
+ DWORD th32ParentProcessID;
+ LONG pcPriClassBase;
+ DWORD dwFlags;
+ char szExeFile[MAX_PATH];
+};
+
+/* Windows API function types. */
+typedef HANDLE(WINAPI* CreateToolhelp32SnapshotType)(DWORD, DWORD);
+typedef BOOL(WINAPI* Process32FirstType)(HANDLE, LPPROCESSENTRY32);
+typedef BOOL(WINAPI* Process32NextType)(HANDLE, LPPROCESSENTRY32);
+typedef NTSTATUS(WINAPI* ZwQuerySystemInformationType)(ULONG, PVOID, ULONG,
+ PULONG);
+
+static int kwsysProcess_List__New_NT4(kwsysProcess_List* self);
+static int kwsysProcess_List__New_Snapshot(kwsysProcess_List* self);
+static void kwsysProcess_List__Delete_NT4(kwsysProcess_List* self);
+static void kwsysProcess_List__Delete_Snapshot(kwsysProcess_List* self);
+static int kwsysProcess_List__Update_NT4(kwsysProcess_List* self);
+static int kwsysProcess_List__Update_Snapshot(kwsysProcess_List* self);
+static int kwsysProcess_List__Next_NT4(kwsysProcess_List* self);
+static int kwsysProcess_List__Next_Snapshot(kwsysProcess_List* self);
+static int kwsysProcess_List__GetProcessId_NT4(kwsysProcess_List* self);
+static int kwsysProcess_List__GetProcessId_Snapshot(kwsysProcess_List* self);
+static int kwsysProcess_List__GetParentId_NT4(kwsysProcess_List* self);
+static int kwsysProcess_List__GetParentId_Snapshot(kwsysProcess_List* self);
+
+struct kwsysProcess_List_s
+{
+ /* Implementation switches at runtime based on version of Windows. */
+ int NT4;
+
+ /* Implementation functions and data for NT 4. */
+ ZwQuerySystemInformationType P_ZwQuerySystemInformation;
+ char* Buffer;
+ int BufferSize;
+ PSYSTEM_PROCESS_INFORMATION CurrentInfo;
+
+ /* Implementation functions and data for other Windows versions. */
+ CreateToolhelp32SnapshotType P_CreateToolhelp32Snapshot;
+ Process32FirstType P_Process32First;
+ Process32NextType P_Process32Next;
+ HANDLE Snapshot;
+ PROCESSENTRY32 CurrentEntry;
+};
+
+static kwsysProcess_List* kwsysProcess_List_New(void)
+{
+ OSVERSIONINFO osv;
+ kwsysProcess_List* self;
+
+ /* Allocate and initialize the list object. */
+ if (!(self = (kwsysProcess_List*)malloc(sizeof(kwsysProcess_List)))) {
+ return 0;
+ }
+ memset(self, 0, sizeof(*self));
+
+ /* Select an implementation. */
+ ZeroMemory(&osv, sizeof(osv));
+ osv.dwOSVersionInfoSize = sizeof(osv);
+#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# pragma warning(push)
+# ifdef __INTEL_COMPILER
+# pragma warning(disable : 1478)
+# elif defined __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
+# else
+# pragma warning(disable : 4996)
+# endif
+#endif
+ GetVersionEx(&osv);
+#ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# ifdef __clang__
+# pragma clang diagnostic pop
+# else
+# pragma warning(pop)
+# endif
+#endif
+ self->NT4 =
+ (osv.dwPlatformId == VER_PLATFORM_WIN32_NT && osv.dwMajorVersion < 5) ? 1
+ : 0;
+
+ /* Initialize the selected implementation. */
+ if (!(self->NT4 ? kwsysProcess_List__New_NT4(self)
+ : kwsysProcess_List__New_Snapshot(self))) {
+ kwsysProcess_List_Delete(self);
+ return 0;
+ }
+
+ /* Update to the current set of processes. */
+ if (!kwsysProcess_List_Update(self)) {
+ kwsysProcess_List_Delete(self);
+ return 0;
+ }
+ return self;
+}
+
+static void kwsysProcess_List_Delete(kwsysProcess_List* self)
+{
+ if (self) {
+ if (self->NT4) {
+ kwsysProcess_List__Delete_NT4(self);
+ } else {
+ kwsysProcess_List__Delete_Snapshot(self);
+ }
+ free(self);
+ }
+}
+
+static int kwsysProcess_List_Update(kwsysProcess_List* self)
+{
+ return self ? (self->NT4 ? kwsysProcess_List__Update_NT4(self)
+ : kwsysProcess_List__Update_Snapshot(self))
+ : 0;
+}
+
+static int kwsysProcess_List_GetCurrentProcessId(kwsysProcess_List* self)
+{
+ return self ? (self->NT4 ? kwsysProcess_List__GetProcessId_NT4(self)
+ : kwsysProcess_List__GetProcessId_Snapshot(self))
+ : -1;
+}
+
+static int kwsysProcess_List_GetCurrentParentId(kwsysProcess_List* self)
+{
+ return self ? (self->NT4 ? kwsysProcess_List__GetParentId_NT4(self)
+ : kwsysProcess_List__GetParentId_Snapshot(self))
+ : -1;
+}
+
+static int kwsysProcess_List_NextProcess(kwsysProcess_List* self)
+{
+ return (self ? (self->NT4 ? kwsysProcess_List__Next_NT4(self)
+ : kwsysProcess_List__Next_Snapshot(self))
+ : 0);
+}
+
+static int kwsysProcess_List__New_NT4(kwsysProcess_List* self)
+{
+ /* Get a handle to the NT runtime module that should already be
+ loaded in this program. This does not actually increment the
+ reference count to the module so we do not need to close the
+ handle. */
+ HMODULE hNT = GetModuleHandleW(L"ntdll.dll");
+ if (hNT) {
+ /* Get pointers to the needed API functions. */
+ self->P_ZwQuerySystemInformation =
+ ((ZwQuerySystemInformationType)GetProcAddress(
+ hNT, "ZwQuerySystemInformation"));
+ }
+ if (!self->P_ZwQuerySystemInformation) {
+ return 0;
+ }
+
+ /* Allocate an initial process information buffer. */
+ self->BufferSize = 32768;
+ self->Buffer = (char*)malloc(self->BufferSize);
+ return self->Buffer ? 1 : 0;
+}
+
+static void kwsysProcess_List__Delete_NT4(kwsysProcess_List* self)
+{
+ /* Free the process information buffer. */
+ free(self->Buffer);
+}
+
+static int kwsysProcess_List__Update_NT4(kwsysProcess_List* self)
+{
+ self->CurrentInfo = 0;
+ for (;;) {
+ /* Query number 5 is for system process list. */
+ NTSTATUS status =
+ self->P_ZwQuerySystemInformation(5, self->Buffer, self->BufferSize, 0);
+ if (status == STATUS_INFO_LENGTH_MISMATCH) {
+ /* The query requires a bigger buffer. */
+ int newBufferSize = self->BufferSize * 2;
+ char* newBuffer = (char*)malloc(newBufferSize);
+ if (newBuffer) {
+ free(self->Buffer);
+ self->Buffer = newBuffer;
+ self->BufferSize = newBufferSize;
+ } else {
+ return 0;
+ }
+ } else if (status >= 0) {
+ /* The query succeeded. Initialize traversal of the process list. */
+ self->CurrentInfo = (PSYSTEM_PROCESS_INFORMATION)self->Buffer;
+ return 1;
+ } else {
+ /* The query failed. */
+ return 0;
+ }
+ }
+}
+
+static int kwsysProcess_List__Next_NT4(kwsysProcess_List* self)
+{
+ if (self->CurrentInfo) {
+ if (self->CurrentInfo->NextEntryDelta > 0) {
+ self->CurrentInfo =
+ ((PSYSTEM_PROCESS_INFORMATION)((char*)self->CurrentInfo +
+ self->CurrentInfo->NextEntryDelta));
+ return 1;
+ }
+ self->CurrentInfo = 0;
+ }
+ return 0;
+}
+
+static int kwsysProcess_List__GetProcessId_NT4(kwsysProcess_List* self)
+{
+ return self->CurrentInfo ? self->CurrentInfo->ProcessId : -1;
+}
+
+static int kwsysProcess_List__GetParentId_NT4(kwsysProcess_List* self)
+{
+ return self->CurrentInfo ? self->CurrentInfo->InheritedFromProcessId : -1;
+}
+
+static int kwsysProcess_List__New_Snapshot(kwsysProcess_List* self)
+{
+ /* Get a handle to the Windows runtime module that should already be
+ loaded in this program. This does not actually increment the
+ reference count to the module so we do not need to close the
+ handle. */
+ HMODULE hKernel = GetModuleHandleW(L"kernel32.dll");
+ if (hKernel) {
+ self->P_CreateToolhelp32Snapshot =
+ ((CreateToolhelp32SnapshotType)GetProcAddress(
+ hKernel, "CreateToolhelp32Snapshot"));
+ self->P_Process32First =
+ ((Process32FirstType)GetProcAddress(hKernel, "Process32First"));
+ self->P_Process32Next =
+ ((Process32NextType)GetProcAddress(hKernel, "Process32Next"));
+ }
+ return (self->P_CreateToolhelp32Snapshot && self->P_Process32First &&
+ self->P_Process32Next)
+ ? 1
+ : 0;
+}
+
+static void kwsysProcess_List__Delete_Snapshot(kwsysProcess_List* self)
+{
+ if (self->Snapshot) {
+ CloseHandle(self->Snapshot);
+ }
+}
+
+static int kwsysProcess_List__Update_Snapshot(kwsysProcess_List* self)
+{
+ if (self->Snapshot) {
+ CloseHandle(self->Snapshot);
+ }
+ if (!(self->Snapshot =
+ self->P_CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0))) {
+ return 0;
+ }
+ ZeroMemory(&self->CurrentEntry, sizeof(self->CurrentEntry));
+ self->CurrentEntry.dwSize = sizeof(self->CurrentEntry);
+ if (!self->P_Process32First(self->Snapshot, &self->CurrentEntry)) {
+ CloseHandle(self->Snapshot);
+ self->Snapshot = 0;
+ return 0;
+ }
+ return 1;
+}
+
+static int kwsysProcess_List__Next_Snapshot(kwsysProcess_List* self)
+{
+ if (self->Snapshot) {
+ if (self->P_Process32Next(self->Snapshot, &self->CurrentEntry)) {
+ return 1;
+ }
+ CloseHandle(self->Snapshot);
+ self->Snapshot = 0;
+ }
+ return 0;
+}
+
+static int kwsysProcess_List__GetProcessId_Snapshot(kwsysProcess_List* self)
+{
+ return self->Snapshot ? self->CurrentEntry.th32ProcessID : -1;
+}
+
+static int kwsysProcess_List__GetParentId_Snapshot(kwsysProcess_List* self)
+{
+ return self->Snapshot ? self->CurrentEntry.th32ParentProcessID : -1;
+}
+
+static void kwsysProcessKill(DWORD pid)
+{
+ HANDLE h = OpenProcess(PROCESS_TERMINATE, 0, pid);
+ if (h) {
+ TerminateProcess(h, 255);
+ WaitForSingleObject(h, INFINITE);
+ CloseHandle(h);
+ }
+}
+
+static void kwsysProcessKillTree(int pid)
+{
+ kwsysProcess_List* plist = kwsysProcess_List_New();
+ kwsysProcessKill(pid);
+ if (plist) {
+ do {
+ if (kwsysProcess_List_GetCurrentParentId(plist) == pid) {
+ int ppid = kwsysProcess_List_GetCurrentProcessId(plist);
+ kwsysProcessKillTree(ppid);
+ }
+ } while (kwsysProcess_List_NextProcess(plist));
+ kwsysProcess_List_Delete(plist);
+ }
+}
+
+static void kwsysProcessDisablePipeThreads(kwsysProcess* cp)
+{
+ int i;
+
+ /* If data were just reported data, release the pipe's thread. */
+ if (cp->CurrentIndex < KWSYSPE_PIPE_COUNT) {
+ KWSYSPE_DEBUG((stderr, "releasing reader %d\n", cp->CurrentIndex));
+ ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0);
+ cp->CurrentIndex = KWSYSPE_PIPE_COUNT;
+ }
+
+ /* Wakeup all reading threads that are not on closed pipes. */
+ for (i = 0; i < KWSYSPE_PIPE_COUNT; ++i) {
+ /* The wakeup threads will write one byte to the pipe write ends.
+ If there are no data in the pipe then this is enough to wakeup
+ the reading threads. If there are already data in the pipe
+ this may block. We cannot use PeekNamedPipe to check whether
+ there are data because an outside process might still be
+ writing data if we are disowning it. Also, PeekNamedPipe will
+ block if checking a pipe on which the reading thread is
+ currently calling ReadPipe. Therefore we need a separate
+ thread to call WriteFile. If it blocks, that is okay because
+ it will unblock when we close the read end and break the pipe
+ below. */
+ if (cp->Pipe[i].Read) {
+ KWSYSPE_DEBUG((stderr, "releasing waker %d\n", i));
+ ReleaseSemaphore(cp->Pipe[i].Waker.Go, 1, 0);
+ }
+ }
+
+ /* Tell pipe threads to reset until we run another process. */
+ while (cp->PipesLeft > 0) {
+ /* The waking threads will cause all reading threads to report.
+ Wait for the next one and save its index. */
+ KWSYSPE_DEBUG((stderr, "waiting for reader\n"));
+ WaitForSingleObject(cp->Full, INFINITE);
+ cp->CurrentIndex = cp->SharedIndex;
+ ReleaseSemaphore(cp->SharedIndexMutex, 1, 0);
+ KWSYSPE_DEBUG((stderr, "got reader %d\n", cp->CurrentIndex));
+
+ /* We are done reading this pipe. Close its read handle. */
+ cp->Pipe[cp->CurrentIndex].Closed = 1;
+ kwsysProcessCleanupHandle(&cp->Pipe[cp->CurrentIndex].Read);
+ --cp->PipesLeft;
+
+ /* Tell the reading thread we are done with the data. It will
+ reset immediately because the pipe is closed. */
+ ReleaseSemaphore(cp->Pipe[cp->CurrentIndex].Reader.Go, 1, 0);
+ }
+}
+
+/* Global set of executing processes for use by the Ctrl handler.
+ This global instance will be zero-initialized by the compiler.
+
+ Note that the console Ctrl handler runs on a background thread and so
+ everything it does must be thread safe. Here, we track the hProcess
+ HANDLEs directly instead of kwsysProcess instances, so that we don't have
+ to make kwsysProcess thread safe. */
+typedef struct kwsysProcessInstance_s
+{
+ HANDLE hProcess;
+ DWORD dwProcessId;
+ int NewProcessGroup; /* Whether the process was created in a new group. */
+} kwsysProcessInstance;
+
+typedef struct kwsysProcessInstances_s
+{
+ /* Whether we have initialized key fields below, like critical sections. */
+ int Initialized;
+
+ /* Ctrl handler runs on a different thread, so we must sync access. */
+ CRITICAL_SECTION Lock;
+
+ int Exiting;
+ size_t Count;
+ size_t Size;
+ kwsysProcessInstance* Processes;
+} kwsysProcessInstances;
+static kwsysProcessInstances kwsysProcesses;
+
+/* Initialize critial section and set up console Ctrl handler. You MUST call
+ this before using any other kwsysProcesses* functions below. */
+static int kwsysProcessesInitialize(void)
+{
+ /* Initialize everything if not done already. */
+ if (!kwsysProcesses.Initialized) {
+ InitializeCriticalSection(&kwsysProcesses.Lock);
+
+ /* Set up console ctrl handler. */
+ if (!SetConsoleCtrlHandler(kwsysCtrlHandler, TRUE)) {
+ return 0;
+ }
+
+ kwsysProcesses.Initialized = 1;
+ }
+ return 1;
+}
+
+/* The Ctrl handler waits on the global list of processes. To prevent an
+ orphaned process, do not create a new process if the Ctrl handler is
+ already running. Do so by using this function to check if it is ok to
+ create a process. */
+static int kwsysTryEnterCreateProcessSection(void)
+{
+ /* Enter main critical section; this means creating a process and the Ctrl
+ handler are mutually exclusive. */
+ EnterCriticalSection(&kwsysProcesses.Lock);
+ /* Indicate to the caller if they can create a process. */
+ if (kwsysProcesses.Exiting) {
+ LeaveCriticalSection(&kwsysProcesses.Lock);
+ return 0;
+ } else {
+ return 1;
+ }
+}
+
+/* Matching function on successful kwsysTryEnterCreateProcessSection return.
+ Make sure you called kwsysProcessesAdd if applicable before calling this.*/
+static void kwsysLeaveCreateProcessSection(void)
+{
+ LeaveCriticalSection(&kwsysProcesses.Lock);
+}
+
+/* Add new process to global process list. The Ctrl handler will wait for
+ the process to exit before it returns. Do not close the process handle
+ until after calling kwsysProcessesRemove. The newProcessGroup parameter
+ must be set if the process was created with CREATE_NEW_PROCESS_GROUP. */
+static int kwsysProcessesAdd(HANDLE hProcess, DWORD dwProcessid,
+ int newProcessGroup)
+{
+ if (!kwsysProcessesInitialize() || !hProcess ||
+ hProcess == INVALID_HANDLE_VALUE) {
+ return 0;
+ }
+
+ /* Enter the critical section. */
+ EnterCriticalSection(&kwsysProcesses.Lock);
+
+ /* Make sure there is enough space for the new process handle. */
+ if (kwsysProcesses.Count == kwsysProcesses.Size) {
+ size_t newSize;
+ kwsysProcessInstance* newArray;
+ /* Start with enough space for a small number of process handles
+ and double the size each time more is needed. */
+ newSize = kwsysProcesses.Size ? kwsysProcesses.Size * 2 : 4;
+
+ /* Try allocating the new block of memory. */
+ if ((newArray = (kwsysProcessInstance*)malloc(
+ newSize * sizeof(kwsysProcessInstance)))) {
+ /* Copy the old process handles to the new memory. */
+ if (kwsysProcesses.Count > 0) {
+ memcpy(newArray, kwsysProcesses.Processes,
+ kwsysProcesses.Count * sizeof(kwsysProcessInstance));
+ }
+ } else {
+ /* Failed to allocate memory for the new process handle set. */
+ LeaveCriticalSection(&kwsysProcesses.Lock);
+ return 0;
+ }
+
+ /* Free original array. */
+ free(kwsysProcesses.Processes);
+
+ /* Update original structure with new allocation. */
+ kwsysProcesses.Size = newSize;
+ kwsysProcesses.Processes = newArray;
+ }
+
+ /* Append the new process information to the set. */
+ kwsysProcesses.Processes[kwsysProcesses.Count].hProcess = hProcess;
+ kwsysProcesses.Processes[kwsysProcesses.Count].dwProcessId = dwProcessid;
+ kwsysProcesses.Processes[kwsysProcesses.Count++].NewProcessGroup =
+ newProcessGroup;
+
+ /* Leave critical section and return success. */
+ LeaveCriticalSection(&kwsysProcesses.Lock);
+
+ return 1;
+}
+
+/* Removes process to global process list. */
+static void kwsysProcessesRemove(HANDLE hProcess)
+{
+ size_t i;
+
+ if (!hProcess || hProcess == INVALID_HANDLE_VALUE) {
+ return;
+ }
+
+ EnterCriticalSection(&kwsysProcesses.Lock);
+
+ /* Find the given process in the set. */
+ for (i = 0; i < kwsysProcesses.Count; ++i) {
+ if (kwsysProcesses.Processes[i].hProcess == hProcess) {
+ break;
+ }
+ }
+ if (i < kwsysProcesses.Count) {
+ /* Found it! Remove the process from the set. */
+ --kwsysProcesses.Count;
+ for (; i < kwsysProcesses.Count; ++i) {
+ kwsysProcesses.Processes[i] = kwsysProcesses.Processes[i + 1];
+ }
+
+ /* If this was the last process, free the array. */
+ if (kwsysProcesses.Count == 0) {
+ kwsysProcesses.Size = 0;
+ free(kwsysProcesses.Processes);
+ kwsysProcesses.Processes = 0;
+ }
+ }
+
+ LeaveCriticalSection(&kwsysProcesses.Lock);
+}
+
+static BOOL WINAPI kwsysCtrlHandler(DWORD dwCtrlType)
+{
+ size_t i;
+ (void)dwCtrlType;
+ /* Enter critical section. */
+ EnterCriticalSection(&kwsysProcesses.Lock);
+
+ /* Set flag indicating that we are exiting. */
+ kwsysProcesses.Exiting = 1;
+
+ /* If some of our processes were created in a new process group, we must
+ manually interrupt them. They won't otherwise receive a Ctrl+C/Break. */
+ for (i = 0; i < kwsysProcesses.Count; ++i) {
+ if (kwsysProcesses.Processes[i].NewProcessGroup) {
+ DWORD groupId = kwsysProcesses.Processes[i].dwProcessId;
+ if (groupId) {
+ GenerateConsoleCtrlEvent(CTRL_BREAK_EVENT, groupId);
+ }
+ }
+ }
+
+ /* Wait for each child process to exit. This is the key step that prevents
+ us from leaving several orphaned children processes running in the
+ background when the user presses Ctrl+C. */
+ for (i = 0; i < kwsysProcesses.Count; ++i) {
+ WaitForSingleObject(kwsysProcesses.Processes[i].hProcess, INFINITE);
+ }
+
+ /* Leave critical section. */
+ LeaveCriticalSection(&kwsysProcesses.Lock);
+
+ /* Continue on to default Ctrl handler (which calls ExitProcess). */
+ return FALSE;
+}
+
+void kwsysProcess_ResetStartTime(kwsysProcess* cp)
+{
+ if (!cp) {
+ return;
+ }
+ /* Reset start time. */
+ cp->StartTime = kwsysProcessTimeGetCurrent();
+}
diff --git a/test/API/driver/kwsys/README.rst b/test/API/driver/kwsys/README.rst
new file mode 100644
index 0000000..fc6b590
--- /dev/null
+++ b/test/API/driver/kwsys/README.rst
@@ -0,0 +1,37 @@
+KWSys
+*****
+
+Introduction
+============
+
+KWSys is the Kitware System Library. It provides platform-independent
+APIs to many common system features that are implemented differently on
+every platform. This library is intended to be shared among many
+projects at the source level, so it has a configurable namespace.
+Each project should configure KWSys to use a namespace unique to itself.
+See comments in `CMakeLists.txt`_ for details.
+
+.. _`CMakeLists.txt`: CMakeLists.txt
+
+License
+=======
+
+KWSys is distributed under the OSI-approved BSD 3-clause License.
+See `Copyright.txt`_ for details.
+
+.. _`Copyright.txt`: Copyright.txt
+
+Reporting Bugs
+==============
+
+KWSys has no independent issue tracker. After encountering an issue
+(bug) please submit a patch using the instructions for `Contributing`_.
+Otherwise please report the issue to the tracker for the project that
+hosts the copy of KWSys in which the problem was found.
+
+Contributing
+============
+
+See `CONTRIBUTING.rst`_ for instructions to contribute.
+
+.. _`CONTRIBUTING.rst`: CONTRIBUTING.rst
diff --git a/test/API/driver/kwsys/RegularExpression.cxx b/test/API/driver/kwsys/RegularExpression.cxx
new file mode 100644
index 0000000..5e6f8da
--- /dev/null
+++ b/test/API/driver/kwsys/RegularExpression.cxx
@@ -0,0 +1,1218 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+//
+// Copyright (C) 1991 Texas Instruments Incorporated.
+//
+// Permission is granted to any individual or institution to use, copy, modify
+// and distribute this software, provided that this complete copyright and
+// permission notice is maintained, intact, in all copies and supporting
+// documentation.
+//
+// Texas Instruments Incorporated provides this software "as is" without
+// express or implied warranty.
+//
+//
+// Created: MNF 06/13/89 Initial Design and Implementation
+// Updated: LGO 08/09/89 Inherit from Generic
+// Updated: MBN 09/07/89 Added conditional exception handling
+// Updated: MBN 12/15/89 Sprinkled "const" qualifiers all over the place!
+// Updated: DLS 03/22/91 New lite version
+//
+
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(RegularExpression.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "RegularExpression.hxx.in"
+#endif
+
+#include <stdio.h>
+#include <string.h>
+
+namespace KWSYS_NAMESPACE {
+
+// RegularExpression -- Copies the given regular expression.
+RegularExpression::RegularExpression(const RegularExpression& rxp)
+{
+ if (!rxp.program) {
+ this->program = nullptr;
+ return;
+ }
+ int ind;
+ this->progsize = rxp.progsize; // Copy regular expression size
+ this->program = new char[this->progsize]; // Allocate storage
+ for (ind = this->progsize; ind-- != 0;) // Copy regular expression
+ this->program[ind] = rxp.program[ind];
+ // Copy pointers into last successful "find" operation
+ this->regmatch = rxp.regmatch;
+ this->regmust = rxp.regmust; // Copy field
+ if (rxp.regmust != nullptr) {
+ char* dum = rxp.program;
+ ind = 0;
+ while (dum != rxp.regmust) {
+ ++dum;
+ ++ind;
+ }
+ this->regmust = this->program + ind;
+ }
+ this->regstart = rxp.regstart; // Copy starting index
+ this->reganch = rxp.reganch; // Copy remaining private data
+ this->regmlen = rxp.regmlen; // Copy remaining private data
+}
+
+// operator= -- Copies the given regular expression.
+RegularExpression& RegularExpression::operator=(const RegularExpression& rxp)
+{
+ if (this == &rxp) {
+ return *this;
+ }
+ if (!rxp.program) {
+ this->program = nullptr;
+ return *this;
+ }
+ int ind;
+ this->progsize = rxp.progsize; // Copy regular expression size
+ delete[] this->program;
+ this->program = new char[this->progsize]; // Allocate storage
+ for (ind = this->progsize; ind-- != 0;) // Copy regular expression
+ this->program[ind] = rxp.program[ind];
+ // Copy pointers into last successful "find" operation
+ this->regmatch = rxp.regmatch;
+ this->regmust = rxp.regmust; // Copy field
+ if (rxp.regmust != nullptr) {
+ char* dum = rxp.program;
+ ind = 0;
+ while (dum != rxp.regmust) {
+ ++dum;
+ ++ind;
+ }
+ this->regmust = this->program + ind;
+ }
+ this->regstart = rxp.regstart; // Copy starting index
+ this->reganch = rxp.reganch; // Copy remaining private data
+ this->regmlen = rxp.regmlen; // Copy remaining private data
+
+ return *this;
+}
+
+// operator== -- Returns true if two regular expressions have the same
+// compiled program for pattern matching.
+bool RegularExpression::operator==(const RegularExpression& rxp) const
+{
+ if (this != &rxp) { // Same address?
+ int ind = this->progsize; // Get regular expression size
+ if (ind != rxp.progsize) // If different size regexp
+ return false; // Return failure
+ while (ind-- != 0) // Else while still characters
+ if (this->program[ind] != rxp.program[ind]) // If regexp are different
+ return false; // Return failure
+ }
+ return true; // Else same, return success
+}
+
+// deep_equal -- Returns true if have the same compiled regular expressions
+// and the same start and end pointers.
+
+bool RegularExpression::deep_equal(const RegularExpression& rxp) const
+{
+ int ind = this->progsize; // Get regular expression size
+ if (ind != rxp.progsize) // If different size regexp
+ return false; // Return failure
+ while (ind-- != 0) // Else while still characters
+ if (this->program[ind] != rxp.program[ind]) // If regexp are different
+ return false; // Return failure
+ // Else if same start/end ptrs, return true
+ return (this->regmatch.start() == rxp.regmatch.start() &&
+ this->regmatch.end() == rxp.regmatch.end());
+}
+
+// The remaining code in this file is derived from the regular expression code
+// whose copyright statement appears below. It has been changed to work
+// with the class concepts of C++ and COOL.
+
+/*
+ * compile and find
+ *
+ * Copyright (c) 1986 by University of Toronto.
+ * Written by Henry Spencer. Not derived from licensed software.
+ *
+ * Permission is granted to anyone to use this software for any
+ * purpose on any computer system, and to redistribute it freely,
+ * subject to the following restrictions:
+ *
+ * 1. The author is not responsible for the consequences of use of
+ * this software, no matter how awful, even if they arise
+ * from defects in it.
+ *
+ * 2. The origin of this software must not be misrepresented, either
+ * by explicit claim or by omission.
+ *
+ * 3. Altered versions must be plainly marked as such, and must not
+ * be misrepresented as being the original software.
+ *
+ * Beware that some of this code is subtly aware of the way operator
+ * precedence is structured in regular expressions. Serious changes in
+ * regular-expression syntax might require a total rethink.
+ */
+
+/*
+ * The "internal use only" fields in regexp.h are present to pass info from
+ * compile to execute that permits the execute phase to run lots faster on
+ * simple cases. They are:
+ *
+ * regstart char that must begin a match; '\0' if none obvious
+ * reganch is the match anchored (at beginning-of-line only)?
+ * regmust string (pointer into program) that match must include, or
+ * nullptr regmlen length of regmust string
+ *
+ * Regstart and reganch permit very fast decisions on suitable starting points
+ * for a match, cutting down the work a lot. Regmust permits fast rejection
+ * of lines that cannot possibly match. The regmust tests are costly enough
+ * that compile() supplies a regmust only if the r.e. contains something
+ * potentially expensive (at present, the only such thing detected is * or +
+ * at the start of the r.e., which can involve a lot of backup). Regmlen is
+ * supplied because the test in find() needs it and compile() is computing
+ * it anyway.
+ */
+
+/*
+ * Structure for regexp "program". This is essentially a linear encoding
+ * of a nondeterministic finite-state machine (aka syntax charts or
+ * "railroad normal form" in parsing technology). Each node is an opcode
+ * plus a "next" pointer, possibly plus an operand. "Next" pointers of
+ * all nodes except BRANCH implement concatenation; a "next" pointer with
+ * a BRANCH on both ends of it is connecting two alternatives. (Here we
+ * have one of the subtle syntax dependencies: an individual BRANCH (as
+ * opposed to a collection of them) is never concatenated with anything
+ * because of operator precedence.) The operand of some types of node is
+ * a literal string; for others, it is a node leading into a sub-FSM. In
+ * particular, the operand of a BRANCH node is the first node of the branch.
+ * (NB this is *not* a tree structure: the tail of the branch connects
+ * to the thing following the set of BRANCHes.) The opcodes are:
+ */
+
+// definition number opnd? meaning
+#define END 0 // no End of program.
+#define BOL 1 // no Match "" at beginning of line.
+#define EOL 2 // no Match "" at end of line.
+#define ANY 3 // no Match any one character.
+#define ANYOF 4 // str Match any character in this string.
+#define ANYBUT \
+ 5 // str Match any character not in this
+ // string.
+#define BRANCH \
+ 6 // node Match this alternative, or the
+ // next...
+#define BACK 7 // no Match "", "next" ptr points backward.
+#define EXACTLY 8 // str Match this string.
+#define NOTHING 9 // no Match empty string.
+#define STAR \
+ 10 // node Match this (simple) thing 0 or more
+ // times.
+#define PLUS \
+ 11 // node Match this (simple) thing 1 or more
+ // times.
+#define OPEN \
+ 20 // no Mark this point in input as start of
+ // #n.
+// OPEN+1 is number 1, etc.
+#define CLOSE 30 // no Analogous to OPEN.
+
+/*
+ * Opcode notes:
+ *
+ * BRANCH The set of branches constituting a single choice are hooked
+ * together with their "next" pointers, since precedence prevents
+ * anything being concatenated to any individual branch. The
+ * "next" pointer of the last BRANCH in a choice points to the
+ * thing following the whole choice. This is also where the
+ * final "next" pointer of each individual branch points; each
+ * branch starts with the operand node of a BRANCH node.
+ *
+ * BACK Normal "next" pointers all implicitly point forward; BACK
+ * exists to make loop structures possible.
+ *
+ * STAR,PLUS '?', and complex '*' and '+', are implemented as circular
+ * BRANCH structures using BACK. Simple cases (one character
+ * per match) are implemented with STAR and PLUS for speed
+ * and to minimize recursive plunges.
+ *
+ * OPEN,CLOSE ...are numbered at compile time.
+ */
+
+/*
+ * A node is one char of opcode followed by two chars of "next" pointer.
+ * "Next" pointers are stored as two 8-bit pieces, high order first. The
+ * value is a positive offset from the opcode of the node containing it.
+ * An operand, if any, simply follows the node. (Note that much of the
+ * code generation knows about this implicit relationship.)
+ *
+ * Using two bytes for the "next" pointer is vast overkill for most things,
+ * but allows patterns to get big without disasters.
+ */
+
+#define OP(p) (*(p))
+#define NEXT(p) (((*((p) + 1) & 0377) << 8) + (*((p) + 2) & 0377))
+#define OPERAND(p) ((p) + 3)
+
+const unsigned char MAGIC = 0234;
+/*
+ * Utility definitions.
+ */
+
+#define UCHARAT(p) (reinterpret_cast<const unsigned char*>(p))[0]
+
+#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?')
+#define META "^$.[()|?+*\\"
+
+/*
+ * Flags to be passed up and down.
+ */
+#define HASWIDTH 01 // Known never to match null string.
+#define SIMPLE 02 // Simple enough to be STAR/PLUS operand.
+#define SPSTART 04 // Starts with * or +.
+#define WORST 0 // Worst case.
+
+/////////////////////////////////////////////////////////////////////////
+//
+// COMPILE AND ASSOCIATED FUNCTIONS
+//
+/////////////////////////////////////////////////////////////////////////
+
+/*
+ * Read only utility variables.
+ */
+static char regdummy;
+static char* const regdummyptr = &regdummy;
+
+/*
+ * Utility class for RegularExpression::compile().
+ */
+class RegExpCompile
+{
+public:
+ const char* regparse; // Input-scan pointer.
+ int regnpar; // () count.
+ char* regcode; // Code-emit pointer; regdummyptr = don't.
+ long regsize; // Code size.
+
+ char* reg(int, int*);
+ char* regbranch(int*);
+ char* regpiece(int*);
+ char* regatom(int*);
+ char* regnode(char);
+ void regc(char);
+ void reginsert(char, char*);
+ static void regtail(char*, const char*);
+ static void regoptail(char*, const char*);
+};
+
+static const char* regnext(const char*);
+static char* regnext(char*);
+
+#ifdef STRCSPN
+static int strcspn();
+#endif
+
+/*
+ * We can't allocate space until we know how big the compiled form will be,
+ * but we can't compile it (and thus know how big it is) until we've got a
+ * place to put the code. So we cheat: we compile it twice, once with code
+ * generation turned off and size counting turned on, and once "for real".
+ * This also means that we don't allocate space until we are sure that the
+ * thing really will compile successfully, and we never have to move the
+ * code and thus invalidate pointers into it. (Note that it has to be in
+ * one piece because free() must be able to free it all.)
+ *
+ * Beware that the optimization-preparation code in here knows about some
+ * of the structure of the compiled regexp.
+ */
+
+// compile -- compile a regular expression into internal code
+// for later pattern matching.
+
+bool RegularExpression::compile(const char* exp)
+{
+ const char* scan;
+ const char* longest;
+ int flags;
+
+ if (exp == nullptr) {
+ // RAISE Error, SYM(RegularExpression), SYM(No_Expr),
+ printf("RegularExpression::compile(): No expression supplied.\n");
+ return false;
+ }
+
+ // First pass: determine size, legality.
+ RegExpCompile comp;
+ comp.regparse = exp;
+ comp.regnpar = 1;
+ comp.regsize = 0L;
+ comp.regcode = regdummyptr;
+ comp.regc(static_cast<char>(MAGIC));
+ if (!comp.reg(0, &flags)) {
+ printf("RegularExpression::compile(): Error in compile.\n");
+ return false;
+ }
+ this->regmatch.clear();
+
+ // Small enough for pointer-storage convention?
+ if (comp.regsize >= 32767L) { // Probably could be 65535L.
+ // RAISE Error, SYM(RegularExpression), SYM(Expr_Too_Big),
+ printf("RegularExpression::compile(): Expression too big.\n");
+ return false;
+ }
+
+ // Allocate space.
+ //#ifndef _WIN32
+ if (this->program != nullptr)
+ delete[] this->program;
+ //#endif
+ this->program = new char[comp.regsize];
+ this->progsize = static_cast<int>(comp.regsize);
+
+ if (this->program == nullptr) {
+ // RAISE Error, SYM(RegularExpression), SYM(Out_Of_Memory),
+ printf("RegularExpression::compile(): Out of memory.\n");
+ return false;
+ }
+
+ // Second pass: emit code.
+ comp.regparse = exp;
+ comp.regnpar = 1;
+ comp.regcode = this->program;
+ comp.regc(static_cast<char>(MAGIC));
+ comp.reg(0, &flags);
+
+ // Dig out information for optimizations.
+ this->regstart = '\0'; // Worst-case defaults.
+ this->reganch = 0;
+ this->regmust = nullptr;
+ this->regmlen = 0;
+ scan = this->program + 1; // First BRANCH.
+ if (OP(regnext(scan)) == END) { // Only one top-level choice.
+ scan = OPERAND(scan);
+
+ // Starting-point info.
+ if (OP(scan) == EXACTLY)
+ this->regstart = *OPERAND(scan);
+ else if (OP(scan) == BOL)
+ this->reganch++;
+
+ //
+ // If there's something expensive in the r.e., find the longest
+ // literal string that must appear and make it the regmust. Resolve
+ // ties in favor of later strings, since the regstart check works
+ // with the beginning of the r.e. and avoiding duplication
+ // strengthens checking. Not a strong reason, but sufficient in the
+ // absence of others.
+ //
+ if (flags & SPSTART) {
+ longest = nullptr;
+ size_t len = 0;
+ for (; scan != nullptr; scan = regnext(scan))
+ if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) {
+ longest = OPERAND(scan);
+ len = strlen(OPERAND(scan));
+ }
+ this->regmust = longest;
+ this->regmlen = len;
+ }
+ }
+ return true;
+}
+
+/*
+ - reg - regular expression, i.e. main body or parenthesized thing
+ *
+ * Caller must absorb opening parenthesis.
+ *
+ * Combining parenthesis handling with the base level of regular expression
+ * is a trifle forced, but the need to tie the tails of the branches to what
+ * follows makes it hard to avoid.
+ */
+char* RegExpCompile::reg(int paren, int* flagp)
+{
+ char* ret;
+ char* br;
+ char* ender;
+ int parno = 0;
+ int flags;
+
+ *flagp = HASWIDTH; // Tentatively.
+
+ // Make an OPEN node, if parenthesized.
+ if (paren) {
+ if (regnpar >= RegularExpressionMatch::NSUBEXP) {
+ // RAISE Error, SYM(RegularExpression), SYM(Too_Many_Parens),
+ printf("RegularExpression::compile(): Too many parentheses.\n");
+ return nullptr;
+ }
+ parno = regnpar;
+ regnpar++;
+ ret = regnode(static_cast<char>(OPEN + parno));
+ } else
+ ret = nullptr;
+
+ // Pick up the branches, linking them together.
+ br = regbranch(&flags);
+ if (br == nullptr)
+ return (nullptr);
+ if (ret != nullptr)
+ regtail(ret, br); // OPEN -> first.
+ else
+ ret = br;
+ if (!(flags & HASWIDTH))
+ *flagp &= ~HASWIDTH;
+ *flagp |= flags & SPSTART;
+ while (*regparse == '|') {
+ regparse++;
+ br = regbranch(&flags);
+ if (br == nullptr)
+ return (nullptr);
+ regtail(ret, br); // BRANCH -> BRANCH.
+ if (!(flags & HASWIDTH))
+ *flagp &= ~HASWIDTH;
+ *flagp |= flags & SPSTART;
+ }
+
+ // Make a closing node, and hook it on the end.
+ ender = regnode(static_cast<char>((paren) ? CLOSE + parno : END));
+ regtail(ret, ender);
+
+ // Hook the tails of the branches to the closing node.
+ for (br = ret; br != nullptr; br = regnext(br))
+ regoptail(br, ender);
+
+ // Check for proper termination.
+ if (paren && *regparse++ != ')') {
+ // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Parens),
+ printf("RegularExpression::compile(): Unmatched parentheses.\n");
+ return nullptr;
+ } else if (!paren && *regparse != '\0') {
+ if (*regparse == ')') {
+ // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Parens),
+ printf("RegularExpression::compile(): Unmatched parentheses.\n");
+ return nullptr;
+ } else {
+ // RAISE Error, SYM(RegularExpression), SYM(Internal_Error),
+ printf("RegularExpression::compile(): Internal error.\n");
+ return nullptr;
+ }
+ // NOTREACHED
+ }
+ return (ret);
+}
+
+/*
+ - regbranch - one alternative of an | operator
+ *
+ * Implements the concatenation operator.
+ */
+char* RegExpCompile::regbranch(int* flagp)
+{
+ char* ret;
+ char* chain;
+ char* latest;
+ int flags;
+
+ *flagp = WORST; // Tentatively.
+
+ ret = regnode(BRANCH);
+ chain = nullptr;
+ while (*regparse != '\0' && *regparse != '|' && *regparse != ')') {
+ latest = regpiece(&flags);
+ if (latest == nullptr)
+ return (nullptr);
+ *flagp |= flags & HASWIDTH;
+ if (chain == nullptr) // First piece.
+ *flagp |= flags & SPSTART;
+ else
+ regtail(chain, latest);
+ chain = latest;
+ }
+ if (chain == nullptr) // Loop ran zero times.
+ regnode(NOTHING);
+
+ return (ret);
+}
+
+/*
+ - regpiece - something followed by possible [*+?]
+ *
+ * Note that the branching code sequences used for ? and the general cases
+ * of * and + are somewhat optimized: they use the same NOTHING node as
+ * both the endmarker for their branch list and the body of the last branch.
+ * It might seem that this node could be dispensed with entirely, but the
+ * endmarker role is not redundant.
+ */
+char* RegExpCompile::regpiece(int* flagp)
+{
+ char* ret;
+ char op;
+ char* next;
+ int flags;
+
+ ret = regatom(&flags);
+ if (ret == nullptr)
+ return (nullptr);
+
+ op = *regparse;
+ if (!ISMULT(op)) {
+ *flagp = flags;
+ return (ret);
+ }
+
+ if (!(flags & HASWIDTH) && op != '?') {
+ // RAISE Error, SYM(RegularExpression), SYM(Empty_Operand),
+ printf("RegularExpression::compile() : *+ operand could be empty.\n");
+ return nullptr;
+ }
+ *flagp = (op != '+') ? (WORST | SPSTART) : (WORST | HASWIDTH);
+
+ if (op == '*' && (flags & SIMPLE))
+ reginsert(STAR, ret);
+ else if (op == '*') {
+ // Emit x* as (x&|), where & means "self".
+ reginsert(BRANCH, ret); // Either x
+ regoptail(ret, regnode(BACK)); // and loop
+ regoptail(ret, ret); // back
+ regtail(ret, regnode(BRANCH)); // or
+ regtail(ret, regnode(NOTHING)); // null.
+ } else if (op == '+' && (flags & SIMPLE))
+ reginsert(PLUS, ret);
+ else if (op == '+') {
+ // Emit x+ as x(&|), where & means "self".
+ next = regnode(BRANCH); // Either
+ regtail(ret, next);
+ regtail(regnode(BACK), ret); // loop back
+ regtail(next, regnode(BRANCH)); // or
+ regtail(ret, regnode(NOTHING)); // null.
+ } else if (op == '?') {
+ // Emit x? as (x|)
+ reginsert(BRANCH, ret); // Either x
+ regtail(ret, regnode(BRANCH)); // or
+ next = regnode(NOTHING); // null.
+ regtail(ret, next);
+ regoptail(ret, next);
+ }
+ regparse++;
+ if (ISMULT(*regparse)) {
+ // RAISE Error, SYM(RegularExpression), SYM(Nested_Operand),
+ printf("RegularExpression::compile(): Nested *?+.\n");
+ return nullptr;
+ }
+ return (ret);
+}
+
+/*
+ - regatom - the lowest level
+ *
+ * Optimization: gobbles an entire sequence of ordinary characters so that
+ * it can turn them into a single node, which is smaller to store and
+ * faster to run. Backslashed characters are exceptions, each becoming a
+ * separate node; the code is simpler that way and it's not worth fixing.
+ */
+char* RegExpCompile::regatom(int* flagp)
+{
+ char* ret;
+ int flags;
+
+ *flagp = WORST; // Tentatively.
+
+ switch (*regparse++) {
+ case '^':
+ ret = regnode(BOL);
+ break;
+ case '$':
+ ret = regnode(EOL);
+ break;
+ case '.':
+ ret = regnode(ANY);
+ *flagp |= HASWIDTH | SIMPLE;
+ break;
+ case '[': {
+ int rxpclass;
+ int rxpclassend;
+
+ if (*regparse == '^') { // Complement of range.
+ ret = regnode(ANYBUT);
+ regparse++;
+ } else
+ ret = regnode(ANYOF);
+ if (*regparse == ']' || *regparse == '-')
+ regc(*regparse++);
+ while (*regparse != '\0' && *regparse != ']') {
+ if (*regparse == '-') {
+ regparse++;
+ if (*regparse == ']' || *regparse == '\0')
+ regc('-');
+ else {
+ rxpclass = UCHARAT(regparse - 2) + 1;
+ rxpclassend = UCHARAT(regparse);
+ if (rxpclass > rxpclassend + 1) {
+ // RAISE Error, SYM(RegularExpression), SYM(Invalid_Range),
+ printf("RegularExpression::compile(): Invalid range in [].\n");
+ return nullptr;
+ }
+ for (; rxpclass <= rxpclassend; rxpclass++)
+ regc(static_cast<char>(rxpclass));
+ regparse++;
+ }
+ } else
+ regc(*regparse++);
+ }
+ regc('\0');
+ if (*regparse != ']') {
+ // RAISE Error, SYM(RegularExpression), SYM(Unmatched_Bracket),
+ printf("RegularExpression::compile(): Unmatched [].\n");
+ return nullptr;
+ }
+ regparse++;
+ *flagp |= HASWIDTH | SIMPLE;
+ } break;
+ case '(':
+ ret = reg(1, &flags);
+ if (ret == nullptr)
+ return (nullptr);
+ *flagp |= flags & (HASWIDTH | SPSTART);
+ break;
+ case '\0':
+ case '|':
+ case ')':
+ // RAISE Error, SYM(RegularExpression), SYM(Internal_Error),
+ printf("RegularExpression::compile(): Internal error.\n"); // Never here
+ return nullptr;
+ case '?':
+ case '+':
+ case '*':
+ // RAISE Error, SYM(RegularExpression), SYM(No_Operand),
+ printf("RegularExpression::compile(): ?+* follows nothing.\n");
+ return nullptr;
+ case '\\':
+ if (*regparse == '\0') {
+ // RAISE Error, SYM(RegularExpression), SYM(Trailing_Backslash),
+ printf("RegularExpression::compile(): Trailing backslash.\n");
+ return nullptr;
+ }
+ ret = regnode(EXACTLY);
+ regc(*regparse++);
+ regc('\0');
+ *flagp |= HASWIDTH | SIMPLE;
+ break;
+ default: {
+ int len;
+ char ender;
+
+ regparse--;
+ len = int(strcspn(regparse, META));
+ if (len <= 0) {
+ // RAISE Error, SYM(RegularExpression), SYM(Internal_Error),
+ printf("RegularExpression::compile(): Internal error.\n");
+ return nullptr;
+ }
+ ender = *(regparse + len);
+ if (len > 1 && ISMULT(ender))
+ len--; // Back off clear of ?+* operand.
+ *flagp |= HASWIDTH;
+ if (len == 1)
+ *flagp |= SIMPLE;
+ ret = regnode(EXACTLY);
+ while (len > 0) {
+ regc(*regparse++);
+ len--;
+ }
+ regc('\0');
+ } break;
+ }
+ return (ret);
+}
+
+/*
+ - regnode - emit a node
+ Location.
+ */
+char* RegExpCompile::regnode(char op)
+{
+ char* ret;
+ char* ptr;
+
+ ret = regcode;
+ if (ret == regdummyptr) {
+ regsize += 3;
+ return (ret);
+ }
+
+ ptr = ret;
+ *ptr++ = op;
+ *ptr++ = '\0'; // Null "next" pointer.
+ *ptr++ = '\0';
+ regcode = ptr;
+
+ return (ret);
+}
+
+/*
+ - regc - emit (if appropriate) a byte of code
+ */
+void RegExpCompile::regc(char b)
+{
+ if (regcode != regdummyptr)
+ *regcode++ = b;
+ else
+ regsize++;
+}
+
+/*
+ - reginsert - insert an operator in front of already-emitted operand
+ *
+ * Means relocating the operand.
+ */
+void RegExpCompile::reginsert(char op, char* opnd)
+{
+ char* src;
+ char* dst;
+ char* place;
+
+ if (regcode == regdummyptr) {
+ regsize += 3;
+ return;
+ }
+
+ src = regcode;
+ regcode += 3;
+ dst = regcode;
+ while (src > opnd)
+ *--dst = *--src;
+
+ place = opnd; // Op node, where operand used to be.
+ *place++ = op;
+ *place++ = '\0';
+ *place = '\0';
+}
+
+/*
+ - regtail - set the next-pointer at the end of a node chain
+ */
+void RegExpCompile::regtail(char* p, const char* val)
+{
+ char* scan;
+ char* temp;
+ int offset;
+
+ if (p == regdummyptr)
+ return;
+
+ // Find last node.
+ scan = p;
+ for (;;) {
+ temp = regnext(scan);
+ if (temp == nullptr)
+ break;
+ scan = temp;
+ }
+
+ if (OP(scan) == BACK)
+ offset = int(scan - val);
+ else
+ offset = int(val - scan);
+ *(scan + 1) = static_cast<char>((offset >> 8) & 0377);
+ *(scan + 2) = static_cast<char>(offset & 0377);
+}
+
+/*
+ - regoptail - regtail on operand of first argument; nop if operandless
+ */
+void RegExpCompile::regoptail(char* p, const char* val)
+{
+ // "Operandless" and "op != BRANCH" are synonymous in practice.
+ if (p == nullptr || p == regdummyptr || OP(p) != BRANCH)
+ return;
+ regtail(OPERAND(p), val);
+}
+
+////////////////////////////////////////////////////////////////////////
+//
+// find and friends
+//
+////////////////////////////////////////////////////////////////////////
+
+/*
+ * Utility class for RegularExpression::find().
+ */
+class RegExpFind
+{
+public:
+ const char* reginput; // String-input pointer.
+ const char* regbol; // Beginning of input, for ^ check.
+ const char** regstartp; // Pointer to startp array.
+ const char** regendp; // Ditto for endp.
+
+ int regtry(const char*, const char**, const char**, const char*);
+ int regmatch(const char*);
+ int regrepeat(const char*);
+};
+
+// find -- Matches the regular expression to the given string.
+// Returns true if found, and sets start and end indexes accordingly.
+bool RegularExpression::find(char const* string,
+ RegularExpressionMatch& rmatch) const
+{
+ const char* s;
+
+ rmatch.clear();
+ rmatch.searchstring = string;
+
+ if (!this->program) {
+ return false;
+ }
+
+ // Check validity of program.
+ if (UCHARAT(this->program) != MAGIC) {
+ // RAISE Error, SYM(RegularExpression), SYM(Internal_Error),
+ printf(
+ "RegularExpression::find(): Compiled regular expression corrupted.\n");
+ return false;
+ }
+
+ // If there is a "must appear" string, look for it.
+ if (this->regmust != nullptr) {
+ s = string;
+ while ((s = strchr(s, this->regmust[0])) != nullptr) {
+ if (strncmp(s, this->regmust, this->regmlen) == 0)
+ break; // Found it.
+ s++;
+ }
+ if (s == nullptr) // Not present.
+ return false;
+ }
+
+ RegExpFind regFind;
+
+ // Mark beginning of line for ^ .
+ regFind.regbol = string;
+
+ // Simplest case: anchored match need be tried only once.
+ if (this->reganch)
+ return (
+ regFind.regtry(string, rmatch.startp, rmatch.endp, this->program) != 0);
+
+ // Messy cases: unanchored match.
+ s = string;
+ if (this->regstart != '\0')
+ // We know what char it must start with.
+ while ((s = strchr(s, this->regstart)) != nullptr) {
+ if (regFind.regtry(s, rmatch.startp, rmatch.endp, this->program))
+ return true;
+ s++;
+ }
+ else
+ // We don't -- general case.
+ do {
+ if (regFind.regtry(s, rmatch.startp, rmatch.endp, this->program))
+ return true;
+ } while (*s++ != '\0');
+
+ // Failure.
+ return false;
+}
+
+/*
+ - regtry - try match at specific point
+ 0 failure, 1 success
+ */
+int RegExpFind::regtry(const char* string, const char** start,
+ const char** end, const char* prog)
+{
+ int i;
+ const char** sp1;
+ const char** ep;
+
+ reginput = string;
+ regstartp = start;
+ regendp = end;
+
+ sp1 = start;
+ ep = end;
+ for (i = RegularExpressionMatch::NSUBEXP; i > 0; i--) {
+ *sp1++ = nullptr;
+ *ep++ = nullptr;
+ }
+ if (regmatch(prog + 1)) {
+ start[0] = string;
+ end[0] = reginput;
+ return (1);
+ } else
+ return (0);
+}
+
+/*
+ - regmatch - main matching routine
+ *
+ * Conceptually the strategy is simple: check to see whether the current
+ * node matches, call self recursively to see whether the rest matches,
+ * and then act accordingly. In practice we make some effort to avoid
+ * recursion, in particular by going through "ordinary" nodes (that don't
+ * need to know whether the rest of the match failed) by a loop instead of
+ * by recursion.
+ * 0 failure, 1 success
+ */
+int RegExpFind::regmatch(const char* prog)
+{
+ const char* scan; // Current node.
+ const char* next; // Next node.
+
+ scan = prog;
+
+ while (scan != nullptr) {
+
+ next = regnext(scan);
+
+ switch (OP(scan)) {
+ case BOL:
+ if (reginput != regbol)
+ return (0);
+ break;
+ case EOL:
+ if (*reginput != '\0')
+ return (0);
+ break;
+ case ANY:
+ if (*reginput == '\0')
+ return (0);
+ reginput++;
+ break;
+ case EXACTLY: {
+ size_t len;
+ const char* opnd;
+
+ opnd = OPERAND(scan);
+ // Inline the first character, for speed.
+ if (*opnd != *reginput)
+ return (0);
+ len = strlen(opnd);
+ if (len > 1 && strncmp(opnd, reginput, len) != 0)
+ return (0);
+ reginput += len;
+ } break;
+ case ANYOF:
+ if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) == nullptr)
+ return (0);
+ reginput++;
+ break;
+ case ANYBUT:
+ if (*reginput == '\0' || strchr(OPERAND(scan), *reginput) != nullptr)
+ return (0);
+ reginput++;
+ break;
+ case NOTHING:
+ break;
+ case BACK:
+ break;
+ case OPEN + 1:
+ case OPEN + 2:
+ case OPEN + 3:
+ case OPEN + 4:
+ case OPEN + 5:
+ case OPEN + 6:
+ case OPEN + 7:
+ case OPEN + 8:
+ case OPEN + 9: {
+ int no;
+ const char* save;
+
+ no = OP(scan) - OPEN;
+ save = reginput;
+
+ if (regmatch(next)) {
+
+ //
+ // Don't set startp if some later invocation of the
+ // same parentheses already has.
+ //
+ if (regstartp[no] == nullptr)
+ regstartp[no] = save;
+ return (1);
+ } else
+ return (0);
+ }
+ // break;
+ case CLOSE + 1:
+ case CLOSE + 2:
+ case CLOSE + 3:
+ case CLOSE + 4:
+ case CLOSE + 5:
+ case CLOSE + 6:
+ case CLOSE + 7:
+ case CLOSE + 8:
+ case CLOSE + 9: {
+ int no;
+ const char* save;
+
+ no = OP(scan) - CLOSE;
+ save = reginput;
+
+ if (regmatch(next)) {
+
+ //
+ // Don't set endp if some later invocation of the
+ // same parentheses already has.
+ //
+ if (regendp[no] == nullptr)
+ regendp[no] = save;
+ return (1);
+ } else
+ return (0);
+ }
+ // break;
+ case BRANCH: {
+
+ const char* save;
+
+ if (OP(next) != BRANCH) // No choice.
+ next = OPERAND(scan); // Avoid recursion.
+ else {
+ do {
+ save = reginput;
+ if (regmatch(OPERAND(scan)))
+ return (1);
+ reginput = save;
+ scan = regnext(scan);
+ } while (scan != nullptr && OP(scan) == BRANCH);
+ return (0);
+ // NOTREACHED
+ }
+ } break;
+ case STAR:
+ case PLUS: {
+ char nextch;
+ int no;
+ const char* save;
+ int min_no;
+
+ //
+ // Lookahead to avoid useless match attempts when we know
+ // what character comes next.
+ //
+ nextch = '\0';
+ if (OP(next) == EXACTLY)
+ nextch = *OPERAND(next);
+ min_no = (OP(scan) == STAR) ? 0 : 1;
+ save = reginput;
+ no = regrepeat(OPERAND(scan));
+ while (no >= min_no) {
+ // If it could work, try it.
+ if (nextch == '\0' || *reginput == nextch)
+ if (regmatch(next))
+ return (1);
+ // Couldn't or didn't -- back up.
+ no--;
+ reginput = save + no;
+ }
+ return (0);
+ }
+ // break;
+ case END:
+ return (1); // Success!
+
+ default:
+ // RAISE Error, SYM(RegularExpression), SYM(Internal_Error),
+ printf(
+ "RegularExpression::find(): Internal error -- memory corrupted.\n");
+ return 0;
+ }
+ scan = next;
+ }
+
+ //
+ // We get here only if there's trouble -- normally "case END" is the
+ // terminating point.
+ //
+ // RAISE Error, SYM(RegularExpression), SYM(Internal_Error),
+ printf("RegularExpression::find(): Internal error -- corrupted pointers.\n");
+ return (0);
+}
+
+/*
+ - regrepeat - repeatedly match something simple, report how many
+ */
+int RegExpFind::regrepeat(const char* p)
+{
+ int count = 0;
+ const char* scan;
+ const char* opnd;
+
+ scan = reginput;
+ opnd = OPERAND(p);
+ switch (OP(p)) {
+ case ANY:
+ count = int(strlen(scan));
+ scan += count;
+ break;
+ case EXACTLY:
+ while (*opnd == *scan) {
+ count++;
+ scan++;
+ }
+ break;
+ case ANYOF:
+ while (*scan != '\0' && strchr(opnd, *scan) != nullptr) {
+ count++;
+ scan++;
+ }
+ break;
+ case ANYBUT:
+ while (*scan != '\0' && strchr(opnd, *scan) == nullptr) {
+ count++;
+ scan++;
+ }
+ break;
+ default: // Oh dear. Called inappropriately.
+ // RAISE Error, SYM(RegularExpression), SYM(Internal_Error),
+ printf("cm RegularExpression::find(): Internal error.\n");
+ return 0;
+ }
+ reginput = scan;
+ return (count);
+}
+
+/*
+ - regnext - dig the "next" pointer out of a node
+ */
+static const char* regnext(const char* p)
+{
+ int offset;
+
+ if (p == regdummyptr)
+ return (nullptr);
+
+ offset = NEXT(p);
+ if (offset == 0)
+ return (nullptr);
+
+ if (OP(p) == BACK)
+ return (p - offset);
+ else
+ return (p + offset);
+}
+
+static char* regnext(char* p)
+{
+ int offset;
+
+ if (p == regdummyptr)
+ return (nullptr);
+
+ offset = NEXT(p);
+ if (offset == 0)
+ return (nullptr);
+
+ if (OP(p) == BACK)
+ return (p - offset);
+ else
+ return (p + offset);
+}
+
+} // namespace KWSYS_NAMESPACE
diff --git a/test/API/driver/kwsys/RegularExpression.hxx.in b/test/API/driver/kwsys/RegularExpression.hxx.in
new file mode 100644
index 0000000..0c2366b
--- /dev/null
+++ b/test/API/driver/kwsys/RegularExpression.hxx.in
@@ -0,0 +1,562 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+// Original Copyright notice:
+// Copyright (C) 1991 Texas Instruments Incorporated.
+//
+// Permission is granted to any individual or institution to use, copy, modify,
+// and distribute this software, provided that this complete copyright and
+// permission notice is maintained, intact, in all copies and supporting
+// documentation.
+//
+// Texas Instruments Incorporated provides this software "as is" without
+// express or implied warranty.
+//
+// Created: MNF 06/13/89 Initial Design and Implementation
+// Updated: LGO 08/09/89 Inherit from Generic
+// Updated: MBN 09/07/89 Added conditional exception handling
+// Updated: MBN 12/15/89 Sprinkled "const" qualifiers all over the place!
+// Updated: DLS 03/22/91 New lite version
+//
+
+#ifndef @KWSYS_NAMESPACE@_RegularExpression_hxx
+#define @KWSYS_NAMESPACE@_RegularExpression_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <string>
+
+/* Disable useless Borland warnings. KWSys tries not to force things
+ on its includers, but there is no choice here. */
+#if defined(__BORLANDC__)
+# pragma warn - 8027 /* function not inlined. */
+#endif
+
+namespace @KWSYS_NAMESPACE@ {
+
+// Forward declaration
+class RegularExpression;
+
+/** \class RegularExpressionMatch
+ * \brief Stores the pattern matches of a RegularExpression
+ */
+class @KWSYS_NAMESPACE@_EXPORT RegularExpressionMatch
+{
+public:
+ RegularExpressionMatch();
+
+ bool isValid() const;
+ void clear();
+
+ std::string::size_type start() const;
+ std::string::size_type end() const;
+ std::string::size_type start(int n) const;
+ std::string::size_type end(int n) const;
+ std::string match(int n) const;
+
+ enum
+ {
+ NSUBEXP = 10
+ };
+
+private:
+ friend class RegularExpression;
+ const char* startp[NSUBEXP];
+ const char* endp[NSUBEXP];
+ const char* searchstring;
+};
+
+/**
+ * \brief Creates an invalid match object
+ */
+inline RegularExpressionMatch::RegularExpressionMatch()
+ : startp{}
+ , endp{}
+ , searchstring{}
+{
+}
+
+/**
+ * \brief Returns true if the match pointers are valid
+ */
+inline bool RegularExpressionMatch::isValid() const
+{
+ return (this->startp[0] != nullptr);
+}
+
+/**
+ * \brief Resets to the (invalid) construction state.
+ */
+inline void RegularExpressionMatch::clear()
+{
+ startp[0] = nullptr;
+ endp[0] = nullptr;
+ searchstring = nullptr;
+}
+
+/**
+ * \brief Returns the start index of the full match.
+ */
+inline std::string::size_type RegularExpressionMatch::start() const
+{
+ return static_cast<std::string::size_type>(this->startp[0] - searchstring);
+}
+
+/**
+ * \brief Returns the end index of the full match.
+ */
+inline std::string::size_type RegularExpressionMatch::end() const
+{
+ return static_cast<std::string::size_type>(this->endp[0] - searchstring);
+}
+
+/**
+ * \brief Returns the start index of nth submatch.
+ * start(0) is the start of the full match.
+ */
+inline std::string::size_type RegularExpressionMatch::start(int n) const
+{
+ return static_cast<std::string::size_type>(this->startp[n] -
+ this->searchstring);
+}
+
+/**
+ * \brief Returns the end index of nth submatch.
+ * end(0) is the end of the full match.
+ */
+inline std::string::size_type RegularExpressionMatch::end(int n) const
+{
+ return static_cast<std::string::size_type>(this->endp[n] -
+ this->searchstring);
+}
+
+/**
+ * \brief Returns the nth submatch as a string.
+ */
+inline std::string RegularExpressionMatch::match(int n) const
+{
+ if (this->startp[n] == nullptr) {
+ return std::string();
+ } else {
+ return std::string(
+ this->startp[n],
+ static_cast<std::string::size_type>(this->endp[n] - this->startp[n]));
+ }
+}
+
+/** \class RegularExpression
+ * \brief Implements pattern matching with regular expressions.
+ *
+ * This is the header file for the regular expression class. An object of
+ * this class contains a regular expression, in a special "compiled" format.
+ * This compiled format consists of several slots all kept as the objects
+ * private data. The RegularExpression class provides a convenient way to
+ * represent regular expressions. It makes it easy to search for the same
+ * regular expression in many different strings without having to compile a
+ * string to regular expression format more than necessary.
+ *
+ * This class implements pattern matching via regular expressions.
+ * A regular expression allows a programmer to specify complex
+ * patterns that can be searched for and matched against the
+ * character string of a string object. In its simplest form, a
+ * regular expression is a sequence of characters used to
+ * search for exact character matches. However, many times the
+ * exact sequence to be found is not known, or only a match at
+ * the beginning or end of a string is desired. The RegularExpression regu-
+ * lar expression class implements regular expression pattern
+ * matching as is found and implemented in many UNIX commands
+ * and utilities.
+ *
+ * Example: The perl code
+ *
+ * $filename =~ m"([a-z]+)\.cc";
+ * print $1;
+ *
+ * Is written as follows in C++
+ *
+ * RegularExpression re("([a-z]+)\\.cc");
+ * re.find(filename);
+ * cerr << re.match(1);
+ *
+ *
+ * The regular expression class provides a convenient mechanism
+ * for specifying and manipulating regular expressions. The
+ * regular expression object allows specification of such pat-
+ * terns by using the following regular expression metacharac-
+ * ters:
+ *
+ * ^ Matches at beginning of a line
+ *
+ * $ Matches at end of a line
+ *
+ * . Matches any single character
+ *
+ * [ ] Matches any character(s) inside the brackets
+ *
+ * [^ ] Matches any character(s) not inside the brackets
+ *
+ * - Matches any character in range on either side of a dash
+ *
+ * * Matches preceding pattern zero or more times
+ *
+ * + Matches preceding pattern one or more times
+ *
+ * ? Matches preceding pattern zero or once only
+ *
+ * () Saves a matched expression and uses it in a later match
+ *
+ * Note that more than one of these metacharacters can be used
+ * in a single regular expression in order to create complex
+ * search patterns. For example, the pattern [^ab1-9] says to
+ * match any character sequence that does not begin with the
+ * characters "ab" followed by numbers in the series one
+ * through nine.
+ *
+ * There are three constructors for RegularExpression. One just creates an
+ * empty RegularExpression object. Another creates a RegularExpression
+ * object and initializes it with a regular expression that is given in the
+ * form of a char*. The third takes a reference to a RegularExpression
+ * object as an argument and creates an object initialized with the
+ * information from the given RegularExpression object.
+ *
+ * The find member function finds the first occurrence of the regular
+ * expression of that object in the string given to find as an argument. Find
+ * returns a boolean, and if true, mutates the private data appropriately.
+ * Find sets pointers to the beginning and end of the thing last found, they
+ * are pointers into the actual string that was searched. The start and end
+ * member functions return indices into the searched string that correspond
+ * to the beginning and end pointers respectively. The compile member
+ * function takes a char* and puts the compiled version of the char* argument
+ * into the object's private data fields. The == and != operators only check
+ * the to see if the compiled regular expression is the same, and the
+ * deep_equal functions also checks to see if the start and end pointers are
+ * the same. The is_valid function returns false if program is set to
+ * nullptr, (i.e. there is no valid compiled expression). The set_invalid
+ * function sets the program to nullptr (Warning: this deletes the compiled
+ * expression). The following examples may help clarify regular expression
+ * usage:
+ *
+ * * The regular expression "^hello" matches a "hello" only at the
+ * beginning of a line. It would match "hello there" but not "hi,
+ * hello there".
+ *
+ * * The regular expression "long$" matches a "long" only at the end
+ * of a line. It would match "so long\0", but not "long ago".
+ *
+ * * The regular expression "t..t..g" will match anything that has a
+ * "t" then any two characters, another "t", any two characters and
+ * then a "g". It will match "testing", or "test again" but would
+ * not match "toasting"
+ *
+ * * The regular expression "[1-9ab]" matches any number one through
+ * nine, and the characters "a" and "b". It would match "hello 1"
+ * or "begin", but would not match "no-match".
+ *
+ * * The regular expression "[^1-9ab]" matches any character that is
+ * not a number one through nine, or an "a" or "b". It would NOT
+ * match "hello 1" or "begin", but would match "no-match".
+ *
+ * * The regular expression "br* " matches something that begins with
+ * a "b", is followed by zero or more "r"s, and ends in a space. It
+ * would match "brrrrr ", and "b ", but would not match "brrh ".
+ *
+ * * The regular expression "br+ " matches something that begins with
+ * a "b", is followed by one or more "r"s, and ends in a space. It
+ * would match "brrrrr ", and "br ", but would not match "b " or
+ * "brrh ".
+ *
+ * * The regular expression "br? " matches something that begins with
+ * a "b", is followed by zero or one "r"s, and ends in a space. It
+ * would match "br ", and "b ", but would not match "brrrr " or
+ * "brrh ".
+ *
+ * * The regular expression "(..p)b" matches something ending with pb
+ * and beginning with whatever the two characters before the first p
+ * encountered in the line were. It would find "repb" in "rep drepa
+ * qrepb". The regular expression "(..p)a" would find "repa qrepb"
+ * in "rep drepa qrepb"
+ *
+ * * The regular expression "d(..p)" matches something ending with p,
+ * beginning with d, and having two characters in between that are
+ * the same as the two characters before the first p encountered in
+ * the line. It would match "drepa qrepb" in "rep drepa qrepb".
+ *
+ * All methods of RegularExpression can be called simultaneously from
+ * different threads but only if each invocation uses an own instance of
+ * RegularExpression.
+ */
+class @KWSYS_NAMESPACE@_EXPORT RegularExpression
+{
+public:
+ /**
+ * Instantiate RegularExpression with program=nullptr.
+ */
+ inline RegularExpression();
+
+ /**
+ * Instantiate RegularExpression with compiled char*.
+ */
+ inline RegularExpression(char const*);
+
+ /**
+ * Instantiate RegularExpression as a copy of another regular expression.
+ */
+ RegularExpression(RegularExpression const&);
+
+ /**
+ * Instantiate RegularExpression with compiled string.
+ */
+ inline RegularExpression(std::string const&);
+
+ /**
+ * Destructor.
+ */
+ inline ~RegularExpression();
+
+ /**
+ * Compile a regular expression into internal code
+ * for later pattern matching.
+ */
+ bool compile(char const*);
+
+ /**
+ * Compile a regular expression into internal code
+ * for later pattern matching.
+ */
+ inline bool compile(std::string const&);
+
+ /**
+ * Matches the regular expression to the given string.
+ * Returns true if found, and sets start and end indexes
+ * in the RegularExpressionMatch instance accordingly.
+ *
+ * This method is thread safe when called with different
+ * RegularExpressionMatch instances.
+ */
+ bool find(char const*, RegularExpressionMatch&) const;
+
+ /**
+ * Matches the regular expression to the given string.
+ * Returns true if found, and sets start and end indexes accordingly.
+ */
+ inline bool find(char const*);
+
+ /**
+ * Matches the regular expression to the given std string.
+ * Returns true if found, and sets start and end indexes accordingly.
+ */
+ inline bool find(std::string const&);
+
+ /**
+ * Match indices
+ */
+ inline RegularExpressionMatch const& regMatch() const;
+ inline std::string::size_type start() const;
+ inline std::string::size_type end() const;
+ inline std::string::size_type start(int n) const;
+ inline std::string::size_type end(int n) const;
+
+ /**
+ * Match strings
+ */
+ inline std::string match(int n) const;
+
+ /**
+ * Copy the given regular expression.
+ */
+ RegularExpression& operator=(const RegularExpression& rxp);
+
+ /**
+ * Returns true if two regular expressions have the same
+ * compiled program for pattern matching.
+ */
+ bool operator==(RegularExpression const&) const;
+
+ /**
+ * Returns true if two regular expressions have different
+ * compiled program for pattern matching.
+ */
+ inline bool operator!=(RegularExpression const&) const;
+
+ /**
+ * Returns true if have the same compiled regular expressions
+ * and the same start and end pointers.
+ */
+ bool deep_equal(RegularExpression const&) const;
+
+ /**
+ * True if the compiled regexp is valid.
+ */
+ inline bool is_valid() const;
+
+ /**
+ * Marks the regular expression as invalid.
+ */
+ inline void set_invalid();
+
+private:
+ RegularExpressionMatch regmatch;
+ char regstart; // Internal use only
+ char reganch; // Internal use only
+ const char* regmust; // Internal use only
+ std::string::size_type regmlen; // Internal use only
+ char* program;
+ int progsize;
+};
+
+/**
+ * Create an empty regular expression.
+ */
+inline RegularExpression::RegularExpression()
+ : regstart{}
+ , reganch{}
+ , regmust{}
+ , program{ nullptr }
+ , progsize{}
+{
+}
+
+/**
+ * Creates a regular expression from string s, and
+ * compiles s.
+ */
+inline RegularExpression::RegularExpression(const char* s)
+ : regstart{}
+ , reganch{}
+ , regmust{}
+ , program{ nullptr }
+ , progsize{}
+{
+ if (s) {
+ this->compile(s);
+ }
+}
+
+/**
+ * Creates a regular expression from string s, and
+ * compiles s.
+ */
+inline RegularExpression::RegularExpression(const std::string& s)
+ : regstart{}
+ , reganch{}
+ , regmust{}
+ , program{ nullptr }
+ , progsize{}
+{
+ this->compile(s);
+}
+
+/**
+ * Destroys and frees space allocated for the regular expression.
+ */
+inline RegularExpression::~RegularExpression()
+{
+ //#ifndef _WIN32
+ delete[] this->program;
+ //#endif
+}
+
+/**
+ * Compile a regular expression into internal code
+ * for later pattern matching.
+ */
+inline bool RegularExpression::compile(std::string const& s)
+{
+ return this->compile(s.c_str());
+}
+
+/**
+ * Matches the regular expression to the given std string.
+ * Returns true if found, and sets start and end indexes accordingly.
+ */
+inline bool RegularExpression::find(const char* s)
+{
+ return this->find(s, this->regmatch);
+}
+
+/**
+ * Matches the regular expression to the given std string.
+ * Returns true if found, and sets start and end indexes accordingly.
+ */
+inline bool RegularExpression::find(std::string const& s)
+{
+ return this->find(s.c_str());
+}
+
+/**
+ * Returns the internal match object
+ */
+inline RegularExpressionMatch const& RegularExpression::regMatch() const
+{
+ return this->regmatch;
+}
+
+/**
+ * Returns the start index of the full match.
+ */
+inline std::string::size_type RegularExpression::start() const
+{
+ return regmatch.start();
+}
+
+/**
+ * Returns the end index of the full match.
+ */
+inline std::string::size_type RegularExpression::end() const
+{
+ return regmatch.end();
+}
+
+/**
+ * Return start index of nth submatch. start(0) is the start of the full match.
+ */
+inline std::string::size_type RegularExpression::start(int n) const
+{
+ return regmatch.start(n);
+}
+
+/**
+ * Return end index of nth submatch. end(0) is the end of the full match.
+ */
+inline std::string::size_type RegularExpression::end(int n) const
+{
+ return regmatch.end(n);
+}
+
+/**
+ * Return nth submatch as a string.
+ */
+inline std::string RegularExpression::match(int n) const
+{
+ return regmatch.match(n);
+}
+
+/**
+ * Returns true if two regular expressions have different
+ * compiled program for pattern matching.
+ */
+inline bool RegularExpression::operator!=(const RegularExpression& r) const
+{
+ return (!(*this == r));
+}
+
+/**
+ * Returns true if a valid regular expression is compiled
+ * and ready for pattern matching.
+ */
+inline bool RegularExpression::is_valid() const
+{
+ return (this->program != nullptr);
+}
+
+inline void RegularExpression::set_invalid()
+{
+ //#ifndef _WIN32
+ delete[] this->program;
+ //#endif
+ this->program = nullptr;
+}
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/SetupForDevelopment.sh b/test/API/driver/kwsys/SetupForDevelopment.sh
new file mode 100644
index 0000000..c3a2b16
--- /dev/null
+++ b/test/API/driver/kwsys/SetupForDevelopment.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+cd "${BASH_SOURCE%/*}" &&
+GitSetup/setup-user && echo &&
+GitSetup/setup-hooks && echo &&
+GitSetup/setup-aliases && echo &&
+GitSetup/setup-upstream && echo &&
+GitSetup/tips
+
+# Rebase master by default
+git config rebase.stat true
+git config branch.master.rebase true
+
+# Disable Gerrit hook explicitly so the commit-msg hook will
+# not complain even if some gerrit remotes are still configured.
+git config hooks.GerritId false
+
+# Record the version of this setup so Scripts/pre-commit can check it.
+SetupForDevelopment_VERSION=2
+git config hooks.SetupForDevelopment ${SetupForDevelopment_VERSION}
diff --git a/test/API/driver/kwsys/SharedForward.h.in b/test/API/driver/kwsys/SharedForward.h.in
new file mode 100644
index 0000000..5716cd4
--- /dev/null
+++ b/test/API/driver/kwsys/SharedForward.h.in
@@ -0,0 +1,879 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_SharedForward_h
+# define @KWSYS_NAMESPACE@_SharedForward_h
+
+/*
+ This header is used to create a forwarding executable sets up the
+ shared library search path and replaces itself with a real
+ executable. This is useful when creating installations on UNIX with
+ shared libraries that will run from any install directory. Typical
+ usage:
+
+ #if defined(CMAKE_INTDIR)
+ # define CONFIG_DIR_PRE CMAKE_INTDIR "/"
+ # define CONFIG_DIR_POST "/" CMAKE_INTDIR
+ #else
+ # define CONFIG_DIR_PRE ""
+ # define CONFIG_DIR_POST ""
+ #endif
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD "/path/to/foo-build/bin"
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD "." CONFIG_DIR_POST
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL "../lib/foo-1.2"
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD CONFIG_DIR_PRE "foo-real"
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL
+ "../lib/foo-1.2/foo-real"
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND "--command"
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT "--print"
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD "--ldd"
+ #if defined(CMAKE_INTDIR)
+ # define @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME CMAKE_INTDIR
+ #endif
+ #include <@KWSYS_NAMESPACE@/SharedForward.h>
+ int main(int argc, char** argv)
+ {
+ return @KWSYS_NAMESPACE@_shared_forward_to_real(argc, argv);
+ }
+
+ Specify search and executable paths relative to the forwarding
+ executable location or as full paths. Include no trailing slash.
+ In the case of a multi-configuration build, when CMAKE_INTDIR is
+ defined, the DIR_BUILD setting should point at the directory above
+ the executable (the one containing the per-configuration
+ subdirectory specified by CMAKE_INTDIR). Then PATH_BUILD entries
+ and EXE_BUILD should be specified relative to this location and use
+ CMAKE_INTDIR as necessary. In the above example imagine appending
+ the PATH_BUILD or EXE_BUILD setting to the DIR_BUILD setting. The
+ result should form a valid path with per-configuration subdirectory.
+
+ Additional paths may be specified in the PATH_BUILD and PATH_INSTALL
+ variables by using comma-separated strings. For example:
+
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD \
+ "." CONFIG_DIR_POST, "/path/to/bar-build" CONFIG_DIR_POST
+ #define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL \
+ "../lib/foo-1.2", "../lib/bar-4.5"
+
+ See the comments below for specific explanations of each macro.
+*/
+
+/* Disable -Wcast-qual warnings since they are too hard to fix in a
+ cross-platform way. */
+# if defined(__clang__) && defined(__has_warning)
+# if __has_warning("-Wcast-qual")
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wcast-qual"
+# endif
+# endif
+
+# if defined(__BORLANDC__) && !defined(__cplusplus)
+/* Code has no effect; raised by winnt.h in C (not C++) when ignoring an
+ unused parameter using "(param)" syntax (i.e. no cast to void). */
+# pragma warn - 8019
+# endif
+
+/* Full path to the directory in which this executable is built. Do
+ not include a trailing slash. */
+# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD)
+# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD"
+# endif
+# if !defined(KWSYS_SHARED_FORWARD_DIR_BUILD)
+# define KWSYS_SHARED_FORWARD_DIR_BUILD \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD
+# endif
+
+/* Library search path for build tree. */
+# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD)
+# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD"
+# endif
+# if !defined(KWSYS_SHARED_FORWARD_PATH_BUILD)
+# define KWSYS_SHARED_FORWARD_PATH_BUILD \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD
+# endif
+
+/* Library search path for install tree. */
+# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL)
+# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL"
+# endif
+# if !defined(KWSYS_SHARED_FORWARD_PATH_INSTALL)
+# define KWSYS_SHARED_FORWARD_PATH_INSTALL \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL
+# endif
+
+/* The real executable to which to forward in the build tree. */
+# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD)
+# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD"
+# endif
+# if !defined(KWSYS_SHARED_FORWARD_EXE_BUILD)
+# define KWSYS_SHARED_FORWARD_EXE_BUILD \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD
+# endif
+
+/* The real executable to which to forward in the install tree. */
+# if !defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL)
+# error "Must define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL"
+# endif
+# if !defined(KWSYS_SHARED_FORWARD_EXE_INSTALL)
+# define KWSYS_SHARED_FORWARD_EXE_INSTALL \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL
+# endif
+
+/* The configuration name with which this executable was built (Debug/Release).
+ */
+# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME)
+# define KWSYS_SHARED_FORWARD_CONFIG_NAME \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME
+# else
+# undef KWSYS_SHARED_FORWARD_CONFIG_NAME
+# endif
+
+/* Create command line option to replace executable. */
+# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND)
+# if !defined(KWSYS_SHARED_FORWARD_OPTION_COMMAND)
+# define KWSYS_SHARED_FORWARD_OPTION_COMMAND \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND
+# endif
+# else
+# undef KWSYS_SHARED_FORWARD_OPTION_COMMAND
+# endif
+
+/* Create command line option to print environment setting and exit. */
+# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT)
+# if !defined(KWSYS_SHARED_FORWARD_OPTION_PRINT)
+# define KWSYS_SHARED_FORWARD_OPTION_PRINT \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT
+# endif
+# else
+# undef KWSYS_SHARED_FORWARD_OPTION_PRINT
+# endif
+
+/* Create command line option to run ldd or equivalent. */
+# if defined(@KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD)
+# if !defined(KWSYS_SHARED_FORWARD_OPTION_LDD)
+# define KWSYS_SHARED_FORWARD_OPTION_LDD \
+ @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD
+# endif
+# else
+# undef KWSYS_SHARED_FORWARD_OPTION_LDD
+# endif
+
+/* Include needed system headers. */
+
+# include <errno.h>
+# include <limits.h>
+# include <stddef.h> /* size_t */
+# include <stdio.h>
+# include <stdlib.h>
+# include <string.h>
+
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# include <windows.h>
+
+# include <io.h>
+# include <process.h>
+# define KWSYS_SHARED_FORWARD_ESCAPE_ARGV /* re-escape argv for execvp */
+# else
+# include <sys/stat.h>
+# include <unistd.h>
+# endif
+
+/* Configuration for this platform. */
+
+/* The path separator for this platform. */
+# if defined(_WIN32) && !defined(__CYGWIN__)
+# define KWSYS_SHARED_FORWARD_PATH_SEP ';'
+# define KWSYS_SHARED_FORWARD_PATH_SLASH '\\'
+# else
+# define KWSYS_SHARED_FORWARD_PATH_SEP ':'
+# define KWSYS_SHARED_FORWARD_PATH_SLASH '/'
+# endif
+static const char kwsys_shared_forward_path_sep[2] = {
+ KWSYS_SHARED_FORWARD_PATH_SEP, 0
+};
+static const char kwsys_shared_forward_path_slash[2] = {
+ KWSYS_SHARED_FORWARD_PATH_SLASH, 0
+};
+
+/* The maximum length of a file name. */
+# if defined(PATH_MAX)
+# define KWSYS_SHARED_FORWARD_MAXPATH PATH_MAX
+# elif defined(MAXPATHLEN)
+# define KWSYS_SHARED_FORWARD_MAXPATH MAXPATHLEN
+# else
+# define KWSYS_SHARED_FORWARD_MAXPATH 16384
+# endif
+
+/* Select the environment variable holding the shared library runtime
+ search path for this platform and build configuration. Also select
+ ldd command equivalent. */
+
+/* Linux */
+# if defined(__linux)
+# define KWSYS_SHARED_FORWARD_LDD "ldd"
+# define KWSYS_SHARED_FORWARD_LDD_N 1
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH"
+
+/* FreeBSD */
+# elif defined(__FreeBSD__)
+# define KWSYS_SHARED_FORWARD_LDD "ldd"
+# define KWSYS_SHARED_FORWARD_LDD_N 1
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH"
+
+/* OpenBSD */
+# elif defined(__OpenBSD__)
+# define KWSYS_SHARED_FORWARD_LDD "ldd"
+# define KWSYS_SHARED_FORWARD_LDD_N 1
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH"
+
+/* OS X */
+# elif defined(__APPLE__)
+# define KWSYS_SHARED_FORWARD_LDD "otool", "-L"
+# define KWSYS_SHARED_FORWARD_LDD_N 2
+# define KWSYS_SHARED_FORWARD_LDPATH "DYLD_LIBRARY_PATH"
+
+/* AIX */
+# elif defined(_AIX)
+# define KWSYS_SHARED_FORWARD_LDD "dump", "-H"
+# define KWSYS_SHARED_FORWARD_LDD_N 2
+# define KWSYS_SHARED_FORWARD_LDPATH "LIBPATH"
+
+/* SUN */
+# elif defined(__sun)
+# define KWSYS_SHARED_FORWARD_LDD "ldd"
+# define KWSYS_SHARED_FORWARD_LDD_N 1
+# include <sys/isa_defs.h>
+# if defined(_ILP32)
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH"
+# elif defined(_LP64)
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH_64"
+# endif
+
+/* HP-UX */
+# elif defined(__hpux)
+# define KWSYS_SHARED_FORWARD_LDD "chatr"
+# define KWSYS_SHARED_FORWARD_LDD_N 1
+# if defined(__LP64__)
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH"
+# else
+# define KWSYS_SHARED_FORWARD_LDPATH "SHLIB_PATH"
+# endif
+
+/* SGI MIPS */
+# elif defined(__sgi) && defined(_MIPS_SIM)
+# define KWSYS_SHARED_FORWARD_LDD "ldd"
+# define KWSYS_SHARED_FORWARD_LDD_N 1
+# if _MIPS_SIM == _ABIO32
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH"
+# elif _MIPS_SIM == _ABIN32
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARYN32_PATH"
+# elif _MIPS_SIM == _ABI64
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY64_PATH"
+# endif
+
+/* Cygwin */
+# elif defined(__CYGWIN__)
+# define KWSYS_SHARED_FORWARD_LDD \
+ "cygcheck" /* TODO: cygwin 1.7 has ldd \
+ */
+# define KWSYS_SHARED_FORWARD_LDD_N 1
+# define KWSYS_SHARED_FORWARD_LDPATH "PATH"
+
+/* Windows */
+# elif defined(_WIN32)
+# define KWSYS_SHARED_FORWARD_LDPATH "PATH"
+
+/* Guess on this unknown system. */
+# else
+# define KWSYS_SHARED_FORWARD_LDD "ldd"
+# define KWSYS_SHARED_FORWARD_LDD_N 1
+# define KWSYS_SHARED_FORWARD_LDPATH "LD_LIBRARY_PATH"
+# endif
+
+# ifdef KWSYS_SHARED_FORWARD_ESCAPE_ARGV
+typedef struct kwsys_sf_arg_info_s
+{
+ const char* arg;
+ int size;
+ int quote;
+} kwsys_sf_arg_info;
+
+static kwsys_sf_arg_info kwsys_sf_get_arg_info(const char* in)
+{
+ /* Initialize information. */
+ kwsys_sf_arg_info info;
+
+ /* String iterator. */
+ const char* c;
+
+ /* Keep track of how many backslashes have been encountered in a row. */
+ int windows_backslashes = 0;
+
+ /* Start with the length of the original argument, plus one for
+ either a terminating null or a separating space. */
+ info.arg = in;
+ info.size = (int)strlen(in) + 1;
+ info.quote = 0;
+
+ /* Scan the string for characters that require escaping or quoting. */
+ for (c = in; *c; ++c) {
+ /* Check whether this character needs quotes. */
+ if (strchr(" \t?'#&<>|^", *c)) {
+ info.quote = 1;
+ }
+
+ /* On Windows only backslashes and double-quotes need escaping. */
+ if (*c == '\\') {
+ /* Found a backslash. It may need to be escaped later. */
+ ++windows_backslashes;
+ } else if (*c == '"') {
+ /* Found a double-quote. We need to escape it and all
+ immediately preceding backslashes. */
+ info.size += windows_backslashes + 1;
+ windows_backslashes = 0;
+ } else {
+ /* Found another character. This eliminates the possibility
+ that any immediately preceding backslashes will be
+ escaped. */
+ windows_backslashes = 0;
+ }
+ }
+
+ /* Check whether the argument needs surrounding quotes. */
+ if (info.quote) {
+ /* Surrounding quotes are needed. Allocate space for them. */
+ info.size += 2;
+
+ /* We must escape all ending backslashes when quoting on windows. */
+ info.size += windows_backslashes;
+ }
+
+ return info;
+}
+
+static char* kwsys_sf_get_arg(kwsys_sf_arg_info info, char* out)
+{
+ /* String iterator. */
+ const char* c;
+
+ /* Keep track of how many backslashes have been encountered in a row. */
+ int windows_backslashes = 0;
+
+ /* Whether the argument must be quoted. */
+ if (info.quote) {
+ /* Add the opening quote for this argument. */
+ *out++ = '"';
+ }
+
+ /* Scan the string for characters that require escaping or quoting. */
+ for (c = info.arg; *c; ++c) {
+ /* On Windows only backslashes and double-quotes need escaping. */
+ if (*c == '\\') {
+ /* Found a backslash. It may need to be escaped later. */
+ ++windows_backslashes;
+ } else if (*c == '"') {
+ /* Found a double-quote. Escape all immediately preceding
+ backslashes. */
+ while (windows_backslashes > 0) {
+ --windows_backslashes;
+ *out++ = '\\';
+ }
+
+ /* Add the backslash to escape the double-quote. */
+ *out++ = '\\';
+ } else {
+ /* We encountered a normal character. This eliminates any
+ escaping needed for preceding backslashes. */
+ windows_backslashes = 0;
+ }
+
+ /* Store this character. */
+ *out++ = *c;
+ }
+
+ if (info.quote) {
+ /* Add enough backslashes to escape any trailing ones. */
+ while (windows_backslashes > 0) {
+ --windows_backslashes;
+ *out++ = '\\';
+ }
+
+ /* Add the closing quote for this argument. */
+ *out++ = '"';
+ }
+
+ /* Store a terminating null without incrementing. */
+ *out = 0;
+
+ return out;
+}
+# endif
+
+/* Function to convert a logical or relative path to a physical full path. */
+static int kwsys_shared_forward_realpath(const char* in_path, char* out_path)
+{
+# if defined(_WIN32) && !defined(__CYGWIN__)
+ /* Implementation for Windows. */
+ DWORD n =
+ GetFullPathNameA(in_path, KWSYS_SHARED_FORWARD_MAXPATH, out_path, 0);
+ return n > 0 && n <= KWSYS_SHARED_FORWARD_MAXPATH;
+# else
+ /* Implementation for UNIX. */
+ return realpath(in_path, out_path) != 0;
+# endif
+}
+
+static int kwsys_shared_forward_samepath(const char* file1, const char* file2)
+{
+# if defined(_WIN32)
+ int result = 0;
+ HANDLE h1 = CreateFileA(file1, GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+ HANDLE h2 = CreateFileA(file2, GENERIC_READ, FILE_SHARE_READ, NULL,
+ OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL);
+ if (h1 != INVALID_HANDLE_VALUE && h2 != INVALID_HANDLE_VALUE) {
+ BY_HANDLE_FILE_INFORMATION fi1;
+ BY_HANDLE_FILE_INFORMATION fi2;
+ GetFileInformationByHandle(h1, &fi1);
+ GetFileInformationByHandle(h2, &fi2);
+ result = (fi1.dwVolumeSerialNumber == fi2.dwVolumeSerialNumber &&
+ fi1.nFileIndexHigh == fi2.nFileIndexHigh &&
+ fi1.nFileIndexLow == fi2.nFileIndexLow);
+ }
+ CloseHandle(h1);
+ CloseHandle(h2);
+ return result;
+# else
+ struct stat fs1, fs2;
+ return (stat(file1, &fs1) == 0 && stat(file2, &fs2) == 0 &&
+ memcmp(&fs2.st_dev, &fs1.st_dev, sizeof(fs1.st_dev)) == 0 &&
+ memcmp(&fs2.st_ino, &fs1.st_ino, sizeof(fs1.st_ino)) == 0 &&
+ fs2.st_size == fs1.st_size);
+# endif
+}
+
+/* Function to report a system error message. */
+static void kwsys_shared_forward_strerror(char* message)
+{
+# if defined(_WIN32) && !defined(__CYGWIN__)
+ /* Implementation for Windows. */
+ DWORD original = GetLastError();
+ DWORD length =
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
+ 0, original, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ message, KWSYS_SHARED_FORWARD_MAXPATH, 0);
+ if (length < 1 || length > KWSYS_SHARED_FORWARD_MAXPATH) {
+ /* FormatMessage failed. Use a default message. */
+ _snprintf(message, KWSYS_SHARED_FORWARD_MAXPATH,
+ "Error 0x%X (FormatMessage failed with error 0x%X)", original,
+ GetLastError());
+ }
+# else
+ /* Implementation for UNIX. */
+ strcpy(message, strerror(errno));
+# endif
+}
+
+/* Functions to execute a child process. */
+static void kwsys_shared_forward_execvp(const char* cmd,
+ char const* const* argv)
+{
+# ifdef KWSYS_SHARED_FORWARD_ESCAPE_ARGV
+ /* Count the number of arguments. */
+ int argc = 0;
+ {
+ char const* const* argvc;
+ for (argvc = argv; *argvc; ++argvc, ++argc) {
+ }
+ }
+
+ /* Create the escaped arguments. */
+ {
+ char** nargv = (char**)malloc((argc + 1) * sizeof(char*));
+ int i;
+ for (i = 0; i < argc; ++i) {
+ kwsys_sf_arg_info info = kwsys_sf_get_arg_info(argv[i]);
+ nargv[i] = (char*)malloc(info.size);
+ kwsys_sf_get_arg(info, nargv[i]);
+ }
+ nargv[argc] = 0;
+
+ /* Replace the command line to be used. */
+ argv = (char const* const*)nargv;
+ }
+# endif
+
+/* Invoke the child process. */
+# if defined(_MSC_VER)
+ _execvp(cmd, argv);
+# elif defined(__MINGW32__) && !defined(__MINGW64__)
+ execvp(cmd, argv);
+# else
+ execvp(cmd, (char* const*)argv);
+# endif
+}
+
+/* Function to get the directory containing the given file or directory. */
+static void kwsys_shared_forward_dirname(const char* begin, char* result)
+{
+ /* Find the location of the last slash. */
+ int last_slash_index = -1;
+ const char* end = begin + strlen(begin);
+ for (; begin <= end && last_slash_index < 0; --end) {
+ if (*end == '/' || *end == '\\') {
+ last_slash_index = (int)(end - begin);
+ }
+ }
+
+ /* Handle each case of the index of the last slash. */
+ if (last_slash_index < 0) {
+ /* No slashes. */
+ strcpy(result, ".");
+ } else if (last_slash_index == 0) {
+ /* Only one leading slash. */
+ strcpy(result, kwsys_shared_forward_path_slash);
+ }
+# if defined(_WIN32)
+ else if (last_slash_index == 2 && begin[1] == ':') {
+ /* Only one leading drive letter and slash. */
+ strncpy(result, begin, (size_t)last_slash_index);
+ result[last_slash_index] = KWSYS_SHARED_FORWARD_PATH_SLASH;
+ result[last_slash_index + 1] = 0;
+ }
+# endif
+ else {
+ /* A non-leading slash. */
+ strncpy(result, begin, (size_t)last_slash_index);
+ result[last_slash_index] = 0;
+ }
+}
+
+/* Function to check if a file exists and is executable. */
+static int kwsys_shared_forward_is_executable(const char* f)
+{
+# if defined(_MSC_VER)
+# define KWSYS_SHARED_FORWARD_ACCESS _access
+# else
+# define KWSYS_SHARED_FORWARD_ACCESS access
+# endif
+# if defined(X_OK)
+# define KWSYS_SHARED_FORWARD_ACCESS_OK X_OK
+# else
+# define KWSYS_SHARED_FORWARD_ACCESS_OK 04
+# endif
+ if (KWSYS_SHARED_FORWARD_ACCESS(f, KWSYS_SHARED_FORWARD_ACCESS_OK) == 0) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/* Function to locate the executable currently running. */
+static int kwsys_shared_forward_self_path(const char* argv0, char* result)
+{
+ /* Check whether argv0 has a slash. */
+ int has_slash = 0;
+ const char* p = argv0;
+ for (; *p && !has_slash; ++p) {
+ if (*p == '/' || *p == '\\') {
+ has_slash = 1;
+ }
+ }
+
+ if (has_slash) {
+ /* There is a slash. Use the dirname of the given location. */
+ kwsys_shared_forward_dirname(argv0, result);
+ return 1;
+ } else {
+ /* There is no slash. Search the PATH for the executable. */
+ const char* path = getenv("PATH");
+ const char* begin = path;
+ const char* end = begin + (begin ? strlen(begin) : 0);
+ const char* first = begin;
+ while (first != end) {
+ /* Store the end of this path entry. */
+ const char* last;
+
+ /* Skip all path separators. */
+ for (; *first && *first == KWSYS_SHARED_FORWARD_PATH_SEP; ++first)
+ ;
+
+ /* Find the next separator. */
+ for (last = first; *last && *last != KWSYS_SHARED_FORWARD_PATH_SEP;
+ ++last)
+ ;
+
+ /* If we got a non-empty directory, look for the executable there. */
+ if (first < last) {
+ /* Determine the length without trailing slash. */
+ size_t length = (size_t)(last - first);
+ if (*(last - 1) == '/' || *(last - 1) == '\\') {
+ --length;
+ }
+
+ /* Construct the name of the executable in this location. */
+ strncpy(result, first, length);
+ result[length] = KWSYS_SHARED_FORWARD_PATH_SLASH;
+ strcpy(result + (length) + 1, argv0);
+
+ /* Check if it exists and is executable. */
+ if (kwsys_shared_forward_is_executable(result)) {
+ /* Found it. */
+ result[length] = 0;
+ return 1;
+ }
+ }
+
+ /* Move to the next directory in the path. */
+ first = last;
+ }
+ }
+
+ /* We could not find the executable. */
+ return 0;
+}
+
+/* Function to convert a specified path to a full path. If it is not
+ already full, it is taken relative to the self path. */
+static int kwsys_shared_forward_fullpath(const char* self_path,
+ const char* in_path, char* result,
+ const char* desc)
+{
+ /* Check the specified path type. */
+ if (in_path[0] == '/') {
+ /* Already a full path. */
+ strcpy(result, in_path);
+ }
+# if defined(_WIN32)
+ else if (in_path[0] && in_path[1] == ':') {
+ /* Already a full path. */
+ strcpy(result, in_path);
+ }
+# endif
+ else {
+ /* Relative to self path. */
+ char temp_path[KWSYS_SHARED_FORWARD_MAXPATH];
+ strcpy(temp_path, self_path);
+ strcat(temp_path, kwsys_shared_forward_path_slash);
+ strcat(temp_path, in_path);
+ if (!kwsys_shared_forward_realpath(temp_path, result)) {
+ if (desc) {
+ char msgbuf[KWSYS_SHARED_FORWARD_MAXPATH];
+ kwsys_shared_forward_strerror(msgbuf);
+ fprintf(stderr, "Error converting %s \"%s\" to real path: %s\n", desc,
+ temp_path, msgbuf);
+ }
+ return 0;
+ }
+ }
+ return 1;
+}
+
+/* Function to compute the library search path and executable name
+ based on the self path. */
+static int kwsys_shared_forward_get_settings(const char* self_path,
+ char* ldpath, char* exe)
+{
+ /* Possible search paths. */
+ static const char* search_path_build[] = { KWSYS_SHARED_FORWARD_PATH_BUILD,
+ 0 };
+ static const char* search_path_install[] = {
+ KWSYS_SHARED_FORWARD_PATH_INSTALL, 0
+ };
+
+ /* Chosen paths. */
+ const char** search_path;
+ const char* exe_path;
+
+/* Get the real name of the build and self paths. */
+# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME)
+ char build_path[] =
+ KWSYS_SHARED_FORWARD_DIR_BUILD "/" KWSYS_SHARED_FORWARD_CONFIG_NAME;
+ char self_path_logical[KWSYS_SHARED_FORWARD_MAXPATH];
+# else
+ char build_path[] = KWSYS_SHARED_FORWARD_DIR_BUILD;
+ const char* self_path_logical = self_path;
+# endif
+ char build_path_real[KWSYS_SHARED_FORWARD_MAXPATH];
+ char self_path_real[KWSYS_SHARED_FORWARD_MAXPATH];
+ if (!kwsys_shared_forward_realpath(self_path, self_path_real)) {
+ char msgbuf[KWSYS_SHARED_FORWARD_MAXPATH];
+ kwsys_shared_forward_strerror(msgbuf);
+ fprintf(stderr, "Error converting self path \"%s\" to real path: %s\n",
+ self_path, msgbuf);
+ return 0;
+ }
+
+ /* Check whether we are running in the build tree or an install tree. */
+ if (kwsys_shared_forward_realpath(build_path, build_path_real) &&
+ kwsys_shared_forward_samepath(self_path_real, build_path_real)) {
+ /* Running in build tree. Use the build path and exe. */
+ search_path = search_path_build;
+# if defined(_WIN32)
+ exe_path = KWSYS_SHARED_FORWARD_EXE_BUILD ".exe";
+# else
+ exe_path = KWSYS_SHARED_FORWARD_EXE_BUILD;
+# endif
+
+# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME)
+ /* Remove the configuration directory from self_path. */
+ kwsys_shared_forward_dirname(self_path, self_path_logical);
+# endif
+ } else {
+ /* Running in install tree. Use the install path and exe. */
+ search_path = search_path_install;
+# if defined(_WIN32)
+ exe_path = KWSYS_SHARED_FORWARD_EXE_INSTALL ".exe";
+# else
+ exe_path = KWSYS_SHARED_FORWARD_EXE_INSTALL;
+# endif
+
+# if defined(KWSYS_SHARED_FORWARD_CONFIG_NAME)
+ /* Use the original self path directory. */
+ strcpy(self_path_logical, self_path);
+# endif
+ }
+
+ /* Construct the runtime search path. */
+ {
+ const char** dir;
+ for (dir = search_path; *dir; ++dir) {
+ /* Add separator between path components. */
+ if (dir != search_path) {
+ strcat(ldpath, kwsys_shared_forward_path_sep);
+ }
+
+ /* Add this path component. */
+ if (!kwsys_shared_forward_fullpath(self_path_logical, *dir,
+ ldpath + strlen(ldpath),
+ "runtime path entry")) {
+ return 0;
+ }
+ }
+ }
+
+ /* Construct the executable location. */
+ if (!kwsys_shared_forward_fullpath(self_path_logical, exe_path, exe,
+ "executable file")) {
+ return 0;
+ }
+ return 1;
+}
+
+/* Function to print why execution of a command line failed. */
+static void kwsys_shared_forward_print_failure(char const* const* argv)
+{
+ char msg[KWSYS_SHARED_FORWARD_MAXPATH];
+ char const* const* arg = argv;
+ kwsys_shared_forward_strerror(msg);
+ fprintf(stderr, "Error running");
+ for (; *arg; ++arg) {
+ fprintf(stderr, " \"%s\"", *arg);
+ }
+ fprintf(stderr, ": %s\n", msg);
+}
+
+/* Static storage space to store the updated environment variable. */
+static char kwsys_shared_forward_ldpath[65535] =
+ KWSYS_SHARED_FORWARD_LDPATH "=";
+
+/* Main driver function to be called from main. */
+static int @KWSYS_NAMESPACE@_shared_forward_to_real(int argc, char** argv_in)
+{
+ char const** argv = (char const**)argv_in;
+ /* Get the directory containing this executable. */
+ char self_path[KWSYS_SHARED_FORWARD_MAXPATH];
+ if (kwsys_shared_forward_self_path(argv[0], self_path)) {
+ /* Found this executable. Use it to get the library directory. */
+ char exe[KWSYS_SHARED_FORWARD_MAXPATH];
+ if (kwsys_shared_forward_get_settings(self_path,
+ kwsys_shared_forward_ldpath, exe)) {
+ /* Append the old runtime search path. */
+ const char* old_ldpath = getenv(KWSYS_SHARED_FORWARD_LDPATH);
+ if (old_ldpath) {
+ strcat(kwsys_shared_forward_ldpath, kwsys_shared_forward_path_sep);
+ strcat(kwsys_shared_forward_ldpath, old_ldpath);
+ }
+
+ /* Store the environment variable. */
+ putenv(kwsys_shared_forward_ldpath);
+
+# if defined(KWSYS_SHARED_FORWARD_OPTION_COMMAND)
+ /* Look for the command line replacement option. */
+ if (argc > 1 &&
+ strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_COMMAND) == 0) {
+ if (argc > 2) {
+ /* Use the command line given. */
+ strcpy(exe, argv[2]);
+ argv += 2;
+ argc -= 2;
+ } else {
+ /* The option was not given an executable. */
+ fprintf(stderr,
+ "Option " KWSYS_SHARED_FORWARD_OPTION_COMMAND
+ " must be followed by a command line.\n");
+ return 1;
+ }
+ }
+# endif
+
+# if defined(KWSYS_SHARED_FORWARD_OPTION_PRINT)
+ /* Look for the print command line option. */
+ if (argc > 1 &&
+ strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_PRINT) == 0) {
+ fprintf(stdout, "%s\n", kwsys_shared_forward_ldpath);
+ fprintf(stdout, "%s\n", exe);
+ return 0;
+ }
+# endif
+
+# if defined(KWSYS_SHARED_FORWARD_OPTION_LDD)
+ /* Look for the ldd command line option. */
+ if (argc > 1 && strcmp(argv[1], KWSYS_SHARED_FORWARD_OPTION_LDD) == 0) {
+# if defined(KWSYS_SHARED_FORWARD_LDD)
+ /* Use the named ldd-like executable and arguments. */
+ char const* ldd_argv[] = { KWSYS_SHARED_FORWARD_LDD, 0, 0 };
+ ldd_argv[KWSYS_SHARED_FORWARD_LDD_N] = exe;
+ kwsys_shared_forward_execvp(ldd_argv[0], ldd_argv);
+
+ /* Report why execution failed. */
+ kwsys_shared_forward_print_failure(ldd_argv);
+ return 1;
+# else
+ /* We have no ldd-like executable available on this platform. */
+ fprintf(stderr, "No ldd-like tool is known to this executable.\n");
+ return 1;
+# endif
+ }
+# endif
+
+ /* Replace this process with the real executable. */
+ argv[0] = exe;
+ kwsys_shared_forward_execvp(argv[0], argv);
+
+ /* Report why execution failed. */
+ kwsys_shared_forward_print_failure(argv);
+ } else {
+ /* Could not convert self path to the library directory. */
+ }
+ } else {
+ /* Could not find this executable. */
+ fprintf(stderr, "Error locating executable \"%s\".\n", argv[0]);
+ }
+
+ /* Avoid unused argument warning. */
+ (void)argc;
+
+ /* Exit with failure. */
+ return 1;
+}
+
+/* Restore warning stack. */
+# if defined(__clang__) && defined(__has_warning)
+# if __has_warning("-Wcast-qual")
+# pragma clang diagnostic pop
+# endif
+# endif
+
+#else
+# error "@KWSYS_NAMESPACE@/SharedForward.h should be included only once."
+#endif
diff --git a/test/API/driver/kwsys/String.c b/test/API/driver/kwsys/String.c
new file mode 100644
index 0000000..daf7ad1
--- /dev/null
+++ b/test/API/driver/kwsys/String.c
@@ -0,0 +1,100 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifdef KWSYS_STRING_C
+/*
+All code in this source file is conditionally compiled to work-around
+template definition auto-search on VMS. Other source files in this
+directory that use the stl string cause the compiler to load this
+source to try to get the definition of the string template. This
+condition blocks the compiler from seeing the symbols defined here.
+*/
+# include "kwsysPrivate.h"
+# include KWSYS_HEADER(String.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+# if 0
+# include "String.h.in"
+# endif
+
+/* Select an implementation for strcasecmp. */
+# if defined(_MSC_VER)
+# define KWSYS_STRING_USE_STRICMP
+# include <string.h>
+# elif defined(__GNUC__)
+# define KWSYS_STRING_USE_STRCASECMP
+# include <strings.h>
+# else
+/* Table to convert upper case letters to lower case and leave all
+ other characters alone. */
+static char kwsysString_strcasecmp_tolower[] = {
+ '\000', '\001', '\002', '\003', '\004', '\005', '\006', '\007', '\010',
+ '\011', '\012', '\013', '\014', '\015', '\016', '\017', '\020', '\021',
+ '\022', '\023', '\024', '\025', '\026', '\027', '\030', '\031', '\032',
+ '\033', '\034', '\035', '\036', '\037', '\040', '\041', '\042', '\043',
+ '\044', '\045', '\046', '\047', '\050', '\051', '\052', '\053', '\054',
+ '\055', '\056', '\057', '\060', '\061', '\062', '\063', '\064', '\065',
+ '\066', '\067', '\070', '\071', '\072', '\073', '\074', '\075', '\076',
+ '\077', '\100', '\141', '\142', '\143', '\144', '\145', '\146', '\147',
+ '\150', '\151', '\152', '\153', '\154', '\155', '\156', '\157', '\160',
+ '\161', '\162', '\163', '\164', '\165', '\166', '\167', '\170', '\171',
+ '\172', '\133', '\134', '\135', '\136', '\137', '\140', '\141', '\142',
+ '\143', '\144', '\145', '\146', '\147', '\150', '\151', '\152', '\153',
+ '\154', '\155', '\156', '\157', '\160', '\161', '\162', '\163', '\164',
+ '\165', '\166', '\167', '\170', '\171', '\172', '\173', '\174', '\175',
+ '\176', '\177', '\200', '\201', '\202', '\203', '\204', '\205', '\206',
+ '\207', '\210', '\211', '\212', '\213', '\214', '\215', '\216', '\217',
+ '\220', '\221', '\222', '\223', '\224', '\225', '\226', '\227', '\230',
+ '\231', '\232', '\233', '\234', '\235', '\236', '\237', '\240', '\241',
+ '\242', '\243', '\244', '\245', '\246', '\247', '\250', '\251', '\252',
+ '\253', '\254', '\255', '\256', '\257', '\260', '\261', '\262', '\263',
+ '\264', '\265', '\266', '\267', '\270', '\271', '\272', '\273', '\274',
+ '\275', '\276', '\277', '\300', '\301', '\302', '\303', '\304', '\305',
+ '\306', '\307', '\310', '\311', '\312', '\313', '\314', '\315', '\316',
+ '\317', '\320', '\321', '\322', '\323', '\324', '\325', '\326', '\327',
+ '\330', '\331', '\332', '\333', '\334', '\335', '\336', '\337', '\340',
+ '\341', '\342', '\343', '\344', '\345', '\346', '\347', '\350', '\351',
+ '\352', '\353', '\354', '\355', '\356', '\357', '\360', '\361', '\362',
+ '\363', '\364', '\365', '\366', '\367', '\370', '\371', '\372', '\373',
+ '\374', '\375', '\376', '\377'
+};
+# endif
+
+/*--------------------------------------------------------------------------*/
+int kwsysString_strcasecmp(const char* lhs, const char* rhs)
+{
+# if defined(KWSYS_STRING_USE_STRICMP)
+ return _stricmp(lhs, rhs);
+# elif defined(KWSYS_STRING_USE_STRCASECMP)
+ return strcasecmp(lhs, rhs);
+# else
+ const char* const lower = kwsysString_strcasecmp_tolower;
+ unsigned char const* us1 = (unsigned char const*)lhs;
+ unsigned char const* us2 = (unsigned char const*)rhs;
+ int result;
+ while ((result = lower[*us1] - lower[*us2++], result == 0) && *us1++) {
+ }
+ return result;
+# endif
+}
+
+/*--------------------------------------------------------------------------*/
+int kwsysString_strncasecmp(const char* lhs, const char* rhs, size_t n)
+{
+# if defined(KWSYS_STRING_USE_STRICMP)
+ return _strnicmp(lhs, rhs, n);
+# elif defined(KWSYS_STRING_USE_STRCASECMP)
+ return strncasecmp(lhs, rhs, n);
+# else
+ const char* const lower = kwsysString_strcasecmp_tolower;
+ unsigned char const* us1 = (unsigned char const*)lhs;
+ unsigned char const* us2 = (unsigned char const*)rhs;
+ int result = 0;
+ while (n && (result = lower[*us1] - lower[*us2++], result == 0) && *us1++) {
+ --n;
+ }
+ return result;
+# endif
+}
+
+#endif /* KWSYS_STRING_C */
diff --git a/test/API/driver/kwsys/String.h.in b/test/API/driver/kwsys/String.h.in
new file mode 100644
index 0000000..7c9348a
--- /dev/null
+++ b/test/API/driver/kwsys/String.h.in
@@ -0,0 +1,57 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_String_h
+#define @KWSYS_NAMESPACE@_String_h
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+#include <stddef.h> /* size_t */
+
+/* Redefine all public interface symbol names to be in the proper
+ namespace. These macros are used internally to kwsys only, and are
+ not visible to user code. Use kwsysHeaderDump.pl to reproduce
+ these macros after making changes to the interface. */
+#if !defined(KWSYS_NAMESPACE)
+# define kwsys_ns(x) @KWSYS_NAMESPACE@##x
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsysString_strcasecmp kwsys_ns(String_strcasecmp)
+# define kwsysString_strncasecmp kwsys_ns(String_strncasecmp)
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * Compare two strings ignoring the case of the characters. The
+ * integer returned is negative, zero, or positive if the first string
+ * is found to be less than, equal to, or greater than the second
+ * string, respectively.
+ */
+kwsysEXPORT int kwsysString_strcasecmp(const char* lhs, const char* rhs);
+
+/**
+ * Identical to String_strcasecmp except that only the first n
+ * characters are considered.
+ */
+kwsysEXPORT int kwsysString_strncasecmp(const char* lhs, const char* rhs,
+ size_t n);
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif
+
+/* If we are building a kwsys .c or .cxx file, let it use these macros.
+ Otherwise, undefine them to keep the namespace clean. */
+#if !defined(KWSYS_NAMESPACE)
+# undef kwsys_ns
+# undef kwsysEXPORT
+# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# undef kwsysString_strcasecmp
+# undef kwsysString_strncasecmp
+# endif
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/String.hxx.in b/test/API/driver/kwsys/String.hxx.in
new file mode 100644
index 0000000..db1cf22
--- /dev/null
+++ b/test/API/driver/kwsys/String.hxx.in
@@ -0,0 +1,65 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_String_hxx
+#define @KWSYS_NAMESPACE@_String_hxx
+
+#include <string>
+
+namespace @KWSYS_NAMESPACE@ {
+
+/** \class String
+ * \brief Short-name version of the STL basic_string class template.
+ *
+ * The standard library "string" type is actually a typedef for
+ * "basic_string<..long argument list..>". This string class is
+ * simply a subclass of this type with the same interface so that the
+ * name is shorter in debugging symbols and error messages.
+ */
+class String : public std::string
+{
+ /** The original string type. */
+ typedef std::string stl_string;
+
+public:
+ /** String member types. */
+ typedef stl_string::value_type value_type;
+ typedef stl_string::pointer pointer;
+ typedef stl_string::reference reference;
+ typedef stl_string::const_reference const_reference;
+ typedef stl_string::size_type size_type;
+ typedef stl_string::difference_type difference_type;
+ typedef stl_string::iterator iterator;
+ typedef stl_string::const_iterator const_iterator;
+ typedef stl_string::reverse_iterator reverse_iterator;
+ typedef stl_string::const_reverse_iterator const_reverse_iterator;
+
+ /** String constructors. */
+ String()
+ : stl_string()
+ {
+ }
+ String(const value_type* s)
+ : stl_string(s)
+ {
+ }
+ String(const value_type* s, size_type n)
+ : stl_string(s, n)
+ {
+ }
+ String(const stl_string& s, size_type pos = 0, size_type n = npos)
+ : stl_string(s, pos, n)
+ {
+ }
+}; // End Class: String
+
+#if defined(__WATCOMC__)
+inline bool operator<(String const& l, String const& r)
+{
+ return (static_cast<std::string const&>(l) <
+ static_cast<std::string const&>(r));
+}
+#endif
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/System.c b/test/API/driver/kwsys/System.c
new file mode 100644
index 0000000..d43cc6f
--- /dev/null
+++ b/test/API/driver/kwsys/System.c
@@ -0,0 +1,236 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(System.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "System.h.in"
+#endif
+
+#include <ctype.h> /* isspace */
+#include <stddef.h> /* ptrdiff_t */
+#include <stdlib.h> /* malloc, free */
+#include <string.h> /* memcpy */
+
+#include <stdio.h>
+
+#if defined(KWSYS_C_HAS_PTRDIFF_T) && KWSYS_C_HAS_PTRDIFF_T
+typedef ptrdiff_t kwsysSystem_ptrdiff_t;
+#else
+typedef int kwsysSystem_ptrdiff_t;
+#endif
+
+static int kwsysSystem__AppendByte(char* local, char** begin, char** end,
+ int* size, char c)
+{
+ /* Allocate space for the character. */
+ if ((*end - *begin) >= *size) {
+ kwsysSystem_ptrdiff_t length = *end - *begin;
+ char* newBuffer = (char*)malloc((size_t)(*size * 2));
+ if (!newBuffer) {
+ return 0;
+ }
+ memcpy(newBuffer, *begin, (size_t)(length) * sizeof(char));
+ if (*begin != local) {
+ free(*begin);
+ }
+ *begin = newBuffer;
+ *end = *begin + length;
+ *size *= 2;
+ }
+
+ /* Store the character. */
+ *(*end)++ = c;
+ return 1;
+}
+
+static int kwsysSystem__AppendArgument(char** local, char*** begin,
+ char*** end, int* size, char* arg_local,
+ char** arg_begin, char** arg_end,
+ int* arg_size)
+{
+ /* Append a null-terminator to the argument string. */
+ if (!kwsysSystem__AppendByte(arg_local, arg_begin, arg_end, arg_size,
+ '\0')) {
+ return 0;
+ }
+
+ /* Allocate space for the argument pointer. */
+ if ((*end - *begin) >= *size) {
+ kwsysSystem_ptrdiff_t length = *end - *begin;
+ char** newPointers = (char**)malloc((size_t)(*size) * 2 * sizeof(char*));
+ if (!newPointers) {
+ return 0;
+ }
+ memcpy(newPointers, *begin, (size_t)(length) * sizeof(char*));
+ if (*begin != local) {
+ free(*begin);
+ }
+ *begin = newPointers;
+ *end = *begin + length;
+ *size *= 2;
+ }
+
+ /* Allocate space for the argument string. */
+ **end = (char*)malloc((size_t)(*arg_end - *arg_begin));
+ if (!**end) {
+ return 0;
+ }
+
+ /* Store the argument in the command array. */
+ memcpy(**end, *arg_begin, (size_t)(*arg_end - *arg_begin));
+ ++(*end);
+
+ /* Reset the argument to be empty. */
+ *arg_end = *arg_begin;
+
+ return 1;
+}
+
+#define KWSYSPE_LOCAL_BYTE_COUNT 1024
+#define KWSYSPE_LOCAL_ARGS_COUNT 32
+static char** kwsysSystem__ParseUnixCommand(const char* command, int flags)
+{
+ /* Create a buffer for argument pointers during parsing. */
+ char* local_pointers[KWSYSPE_LOCAL_ARGS_COUNT];
+ int pointers_size = KWSYSPE_LOCAL_ARGS_COUNT;
+ char** pointer_begin = local_pointers;
+ char** pointer_end = pointer_begin;
+
+ /* Create a buffer for argument strings during parsing. */
+ char local_buffer[KWSYSPE_LOCAL_BYTE_COUNT];
+ int buffer_size = KWSYSPE_LOCAL_BYTE_COUNT;
+ char* buffer_begin = local_buffer;
+ char* buffer_end = buffer_begin;
+
+ /* Parse the command string. Try to behave like a UNIX shell. */
+ char** newCommand = 0;
+ const char* c = command;
+ int in_argument = 0;
+ int in_escape = 0;
+ int in_single = 0;
+ int in_double = 0;
+ int failed = 0;
+ for (; *c; ++c) {
+ if (in_escape) {
+ /* This character is escaped so do no special handling. */
+ if (!in_argument) {
+ in_argument = 1;
+ }
+ if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin, &buffer_end,
+ &buffer_size, *c)) {
+ failed = 1;
+ break;
+ }
+ in_escape = 0;
+ } else if (*c == '\\') {
+ /* The next character should be escaped. */
+ in_escape = 1;
+ } else if (*c == '\'' && !in_double) {
+ /* Enter or exit single-quote state. */
+ if (in_single) {
+ in_single = 0;
+ } else {
+ in_single = 1;
+ if (!in_argument) {
+ in_argument = 1;
+ }
+ }
+ } else if (*c == '"' && !in_single) {
+ /* Enter or exit double-quote state. */
+ if (in_double) {
+ in_double = 0;
+ } else {
+ in_double = 1;
+ if (!in_argument) {
+ in_argument = 1;
+ }
+ }
+ } else if (isspace((unsigned char)*c)) {
+ if (in_argument) {
+ if (in_single || in_double) {
+ /* This space belongs to a quoted argument. */
+ if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin,
+ &buffer_end, &buffer_size, *c)) {
+ failed = 1;
+ break;
+ }
+ } else {
+ /* This argument has been terminated by whitespace. */
+ if (!kwsysSystem__AppendArgument(
+ local_pointers, &pointer_begin, &pointer_end, &pointers_size,
+ local_buffer, &buffer_begin, &buffer_end, &buffer_size)) {
+ failed = 1;
+ break;
+ }
+ in_argument = 0;
+ }
+ }
+ } else {
+ /* This character belong to an argument. */
+ if (!in_argument) {
+ in_argument = 1;
+ }
+ if (!kwsysSystem__AppendByte(local_buffer, &buffer_begin, &buffer_end,
+ &buffer_size, *c)) {
+ failed = 1;
+ break;
+ }
+ }
+ }
+
+ /* Finish the last argument. */
+ if (in_argument) {
+ if (!kwsysSystem__AppendArgument(
+ local_pointers, &pointer_begin, &pointer_end, &pointers_size,
+ local_buffer, &buffer_begin, &buffer_end, &buffer_size)) {
+ failed = 1;
+ }
+ }
+
+ /* If we still have memory allocate space for the new command
+ buffer. */
+ if (!failed) {
+ kwsysSystem_ptrdiff_t n = pointer_end - pointer_begin;
+ newCommand = (char**)malloc((size_t)(n + 1) * sizeof(char*));
+ }
+
+ if (newCommand) {
+ /* Copy the arguments into the new command buffer. */
+ kwsysSystem_ptrdiff_t n = pointer_end - pointer_begin;
+ memcpy(newCommand, pointer_begin, sizeof(char*) * (size_t)(n));
+ newCommand[n] = 0;
+ } else {
+ /* Free arguments already allocated. */
+ while (pointer_end != pointer_begin) {
+ free(*(--pointer_end));
+ }
+ }
+
+ /* Free temporary buffers. */
+ if (pointer_begin != local_pointers) {
+ free(pointer_begin);
+ }
+ if (buffer_begin != local_buffer) {
+ free(buffer_begin);
+ }
+
+ /* The flags argument is currently unused. */
+ (void)flags;
+
+ /* Return the final command buffer. */
+ return newCommand;
+}
+
+char** kwsysSystem_Parse_CommandForUnix(const char* command, int flags)
+{
+ /* Validate the flags. */
+ if (flags != 0) {
+ return 0;
+ }
+
+ /* Forward to our internal implementation. */
+ return kwsysSystem__ParseUnixCommand(command, flags);
+}
diff --git a/test/API/driver/kwsys/System.h.in b/test/API/driver/kwsys/System.h.in
new file mode 100644
index 0000000..a9d4f5e
--- /dev/null
+++ b/test/API/driver/kwsys/System.h.in
@@ -0,0 +1,60 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_System_h
+#define @KWSYS_NAMESPACE@_System_h
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+/* Redefine all public interface symbol names to be in the proper
+ namespace. These macros are used internally to kwsys only, and are
+ not visible to user code. Use kwsysHeaderDump.pl to reproduce
+ these macros after making changes to the interface. */
+#if !defined(KWSYS_NAMESPACE)
+# define kwsys_ns(x) @KWSYS_NAMESPACE@##x
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsysSystem_Parse_CommandForUnix \
+ kwsys_ns(System_Parse_CommandForUnix)
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * Parse a unix-style command line string into separate arguments.
+ *
+ * On success, returns a pointer to an array of pointers to individual
+ * argument strings. Each string is null-terminated and the last
+ * entry in the array is a NULL pointer (just like argv). It is the
+ * caller's responsibility to free() the strings and the array of
+ * pointers to them.
+ *
+ * On failure, returns NULL. Failure occurs only on invalid flags or
+ * when memory cannot be allocated; never due to content of the input
+ * string. Missing close-quotes are treated as if the necessary
+ * closing quote appears.
+ *
+ * By default single- and double-quoted arguments are supported, and
+ * any character may be escaped by a backslash. The flags argument is
+ * reserved for future use, and must be zero (or the call will fail).
+ */
+kwsysEXPORT char** kwsysSystem_Parse_CommandForUnix(const char* command,
+ int flags);
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif
+
+/* If we are building a kwsys .c or .cxx file, let it use these macros.
+ Otherwise, undefine them to keep the namespace clean. */
+#if !defined(KWSYS_NAMESPACE)
+# undef kwsys_ns
+# undef kwsysEXPORT
+# if !defined(KWSYS_NAMESPACE) && !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# undef kwsysSystem_Parse_CommandForUnix
+# endif
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/SystemInformation.cxx b/test/API/driver/kwsys/SystemInformation.cxx
new file mode 100644
index 0000000..6ec6e48
--- /dev/null
+++ b/test/API/driver/kwsys/SystemInformation.cxx
@@ -0,0 +1,5466 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#if defined(_WIN32)
+# define NOMINMAX // use our min,max
+# if !defined(_WIN32_WINNT) && defined(_MSC_VER) && _MSC_VER >= 1800
+# define _WIN32_WINNT 0x0600 // vista
+# endif
+# if !defined(_WIN32_WINNT) && !(defined(_MSC_VER) && _MSC_VER < 1300)
+# define _WIN32_WINNT 0x0501
+# endif
+# include <winsock.h> // WSADATA, include before sys/types.h
+#endif
+
+#if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE)
+# define _GNU_SOURCE
+#endif
+
+// TODO:
+// We need an alternative implementation for many functions in this file
+// when USE_ASM_INSTRUCTIONS gets defined as 0.
+//
+// Consider using these on Win32/Win64 for some of them:
+//
+// IsProcessorFeaturePresent
+// http://msdn.microsoft.com/en-us/library/ms724482(VS.85).aspx
+//
+// GetProcessMemoryInfo
+// http://msdn.microsoft.com/en-us/library/ms683219(VS.85).aspx
+
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(SystemInformation.hxx)
+#include KWSYS_HEADER(Process.h)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Process.h.in"
+# include "SystemInformation.hxx.in"
+#endif
+
+#include <algorithm>
+#include <bitset>
+#include <cassert>
+#include <fstream>
+#include <iostream>
+#include <limits>
+#include <set>
+#include <sstream>
+#include <string>
+#include <vector>
+
+#if defined(_WIN32)
+# include <windows.h>
+# if defined(_MSC_VER) && _MSC_VER >= 1800
+# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# endif
+# include <errno.h>
+# if defined(KWSYS_SYS_HAS_PSAPI)
+# include <psapi.h>
+# endif
+# if !defined(siginfo_t)
+typedef int siginfo_t;
+# endif
+#else
+# include <sys/types.h>
+
+# include <errno.h> // extern int errno;
+# include <fcntl.h>
+# include <signal.h>
+# include <sys/resource.h> // getrlimit
+# include <sys/time.h>
+# include <sys/utsname.h> // int uname(struct utsname *buf);
+# include <unistd.h>
+#endif
+
+#if defined(__CYGWIN__) && !defined(_WIN32)
+# include <windows.h>
+# undef _WIN32
+#endif
+
+#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__DragonFly__)
+# include <netdb.h>
+# include <netinet/in.h>
+# include <sys/param.h>
+# include <sys/socket.h>
+# include <sys/sysctl.h>
+# if defined(KWSYS_SYS_HAS_IFADDRS_H)
+# include <ifaddrs.h>
+# include <net/if.h>
+# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN
+# endif
+#endif
+
+#if defined(KWSYS_SYS_HAS_MACHINE_CPU_H)
+# include <machine/cpu.h>
+#endif
+
+#ifdef __APPLE__
+# include <mach/host_info.h>
+# include <mach/mach.h>
+# include <mach/mach_types.h>
+# include <mach/vm_statistics.h>
+# include <netdb.h>
+# include <netinet/in.h>
+# include <sys/socket.h>
+# include <sys/sysctl.h>
+# if defined(KWSYS_SYS_HAS_IFADDRS_H)
+# include <ifaddrs.h>
+# include <net/if.h>
+# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN
+# endif
+# if !(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ - 0 >= 1050)
+# undef KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE
+# endif
+#endif
+
+#if defined(__linux) || defined(__sun) || defined(_SCO_DS) || \
+ defined(__GLIBC__) || defined(__GNU__)
+# include <netdb.h>
+# include <netinet/in.h>
+# include <sys/socket.h>
+# if defined(KWSYS_SYS_HAS_IFADDRS_H)
+# include <ifaddrs.h>
+# include <net/if.h>
+# if defined(__LSB_VERSION__)
+/* LSB has no getifaddrs */
+# elif defined(__ANDROID_API__) && __ANDROID_API__ < 24
+/* Android has no getifaddrs prior to API 24. */
+# else
+# define KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN
+# endif
+# endif
+# if defined(KWSYS_CXX_HAS_RLIMIT64)
+typedef struct rlimit64 ResourceLimitType;
+# define GetResourceLimit getrlimit64
+# else
+typedef struct rlimit ResourceLimitType;
+# define GetResourceLimit getrlimit
+# endif
+#elif defined(__hpux)
+# include <sys/param.h>
+# include <sys/pstat.h>
+# if defined(KWSYS_SYS_HAS_MPCTL_H)
+# include <sys/mpctl.h>
+# endif
+#endif
+
+#ifdef __HAIKU__
+# include <OS.h>
+#endif
+
+#if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE)
+# include <execinfo.h>
+# if defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE)
+# include <cxxabi.h>
+# endif
+# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP)
+# include <dlfcn.h>
+# endif
+#else
+# undef KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE
+# undef KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP
+#endif
+
+#include <ctype.h> // int isdigit(int c);
+#include <memory.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(KWSYS_USE_LONG_LONG)
+# if defined(KWSYS_IOS_HAS_OSTREAM_LONG_LONG)
+# define iostreamLongLong(x) (x)
+# else
+# define iostreamLongLong(x) ((long)(x))
+# endif
+#elif defined(KWSYS_USE___INT64)
+# if defined(KWSYS_IOS_HAS_OSTREAM___INT64)
+# define iostreamLongLong(x) (x)
+# else
+# define iostreamLongLong(x) ((long)(x))
+# endif
+#else
+# error "No Long Long"
+#endif
+
+#if defined(KWSYS_CXX_HAS_ATOLL)
+# define atoLongLong atoll
+#else
+# if defined(KWSYS_CXX_HAS__ATOI64)
+# define atoLongLong _atoi64
+# elif defined(KWSYS_CXX_HAS_ATOL)
+# define atoLongLong atol
+# else
+# define atoLongLong atoi
+# endif
+#endif
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1300) && !defined(_WIN64) && \
+ !defined(__clang__)
+# define USE_ASM_INSTRUCTIONS 1
+#else
+# define USE_ASM_INSTRUCTIONS 0
+#endif
+
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(__clang__)
+# include <intrin.h>
+# define USE_CPUID_INTRINSICS 1
+#else
+# define USE_CPUID_INTRINSICS 0
+#endif
+
+#if USE_ASM_INSTRUCTIONS || USE_CPUID_INTRINSICS || \
+ defined(KWSYS_CXX_HAS_BORLAND_ASM_CPUID)
+# define USE_CPUID 1
+#else
+# define USE_CPUID 0
+#endif
+
+#if USE_CPUID
+
+# define CPUID_AWARE_COMPILER
+
+/**
+ * call CPUID instruction
+ *
+ * Will return false if the instruction failed.
+ */
+static bool call_cpuid(int select, int result[4])
+{
+# if USE_CPUID_INTRINSICS
+ __cpuid(result, select);
+ return true;
+# else
+ int tmp[4];
+# if defined(_MSC_VER)
+ // Use SEH to determine CPUID presence
+ __try {
+ _asm {
+# ifdef CPUID_AWARE_COMPILER
+ ; we must push/pop the registers <<CPUID>> writes to, as the
+ ; optimiser does not know about <<CPUID>>, and so does not expect
+ ; these registers to change.
+ push eax
+ push ebx
+ push ecx
+ push edx
+# endif
+ ; <<CPUID>>
+ mov eax, select
+# ifdef CPUID_AWARE_COMPILER
+ cpuid
+# else
+ _asm _emit 0x0f
+ _asm _emit 0xa2
+# endif
+ mov tmp[0 * TYPE int], eax
+ mov tmp[1 * TYPE int], ebx
+ mov tmp[2 * TYPE int], ecx
+ mov tmp[3 * TYPE int], edx
+
+# ifdef CPUID_AWARE_COMPILER
+ pop edx
+ pop ecx
+ pop ebx
+ pop eax
+# endif
+ }
+ } __except (1) {
+ return false;
+ }
+
+ memcpy(result, tmp, sizeof(tmp));
+# elif defined(KWSYS_CXX_HAS_BORLAND_ASM_CPUID)
+ unsigned int a, b, c, d;
+ __asm {
+ mov EAX, select;
+ cpuid
+ mov a, EAX;
+ mov b, EBX;
+ mov c, ECX;
+ mov d, EDX;
+ }
+
+ result[0] = a;
+ result[1] = b;
+ result[2] = c;
+ result[3] = d;
+# endif
+
+ // The cpuid instruction succeeded.
+ return true;
+# endif
+}
+#endif
+
+namespace KWSYS_NAMESPACE {
+template <typename T>
+T min(T a, T b)
+{
+ return a < b ? a : b;
+}
+
+extern "C" {
+typedef void (*SigAction)(int, siginfo_t*, void*);
+}
+
+// Define SystemInformationImplementation class
+typedef void (*DELAY_FUNC)(unsigned int uiMS);
+
+class SystemInformationImplementation
+{
+public:
+ typedef SystemInformation::LongLong LongLong;
+ SystemInformationImplementation();
+ ~SystemInformationImplementation();
+
+ const char* GetVendorString();
+ const char* GetVendorID();
+ std::string GetTypeID();
+ std::string GetFamilyID();
+ std::string GetModelID();
+ std::string GetModelName();
+ std::string GetSteppingCode();
+ const char* GetExtendedProcessorName();
+ const char* GetProcessorSerialNumber();
+ int GetProcessorCacheSize();
+ unsigned int GetLogicalProcessorsPerPhysical();
+ float GetProcessorClockFrequency();
+ int GetProcessorAPICID();
+ int GetProcessorCacheXSize(long int);
+ bool DoesCPUSupportFeature(long int);
+
+ const char* GetOSName();
+ const char* GetHostname();
+ int GetFullyQualifiedDomainName(std::string& fqdn);
+ const char* GetOSRelease();
+ const char* GetOSVersion();
+ const char* GetOSPlatform();
+
+ bool Is64Bits();
+
+ unsigned int GetNumberOfLogicalCPU(); // per physical cpu
+ unsigned int GetNumberOfPhysicalCPU();
+
+ bool DoesCPUSupportCPUID();
+
+ // Retrieve memory information in MiB.
+ size_t GetTotalVirtualMemory();
+ size_t GetAvailableVirtualMemory();
+ size_t GetTotalPhysicalMemory();
+ size_t GetAvailablePhysicalMemory();
+
+ LongLong GetProcessId();
+
+ // Retrieve memory information in KiB.
+ LongLong GetHostMemoryTotal();
+ LongLong GetHostMemoryAvailable(const char* envVarName);
+ LongLong GetHostMemoryUsed();
+
+ LongLong GetProcMemoryAvailable(const char* hostLimitEnvVarName,
+ const char* procLimitEnvVarName);
+ LongLong GetProcMemoryUsed();
+
+ double GetLoadAverage();
+
+ // enable/disable stack trace signal handler.
+ static void SetStackTraceOnError(int enable);
+
+ // get current stack
+ static std::string GetProgramStack(int firstFrame, int wholePath);
+
+ /** Run the different checks */
+ void RunCPUCheck();
+ void RunOSCheck();
+ void RunMemoryCheck();
+
+public:
+ typedef struct tagID
+ {
+ int Type;
+ int Family;
+ int Model;
+ int Revision;
+ int ExtendedFamily;
+ int ExtendedModel;
+ std::string ProcessorName;
+ std::string Vendor;
+ std::string SerialNumber;
+ std::string ModelName;
+ } ID;
+
+ typedef struct tagCPUPowerManagement
+ {
+ bool HasVoltageID;
+ bool HasFrequencyID;
+ bool HasTempSenseDiode;
+ } CPUPowerManagement;
+
+ typedef struct tagCPUExtendedFeatures
+ {
+ bool Has3DNow;
+ bool Has3DNowPlus;
+ bool SupportsMP;
+ bool HasMMXPlus;
+ bool HasSSEMMX;
+ unsigned int LogicalProcessorsPerPhysical;
+ int APIC_ID;
+ CPUPowerManagement PowerManagement;
+ } CPUExtendedFeatures;
+
+ typedef struct CPUtagFeatures
+ {
+ bool HasFPU;
+ bool HasTSC;
+ bool HasMMX;
+ bool HasSSE;
+ bool HasSSEFP;
+ bool HasSSE2;
+ bool HasIA64;
+ bool HasAPIC;
+ bool HasCMOV;
+ bool HasMTRR;
+ bool HasACPI;
+ bool HasSerial;
+ bool HasThermal;
+ int CPUSpeed;
+ int L1CacheSize;
+ int L2CacheSize;
+ int L3CacheSize;
+ CPUExtendedFeatures ExtendedFeatures;
+ } CPUFeatures;
+
+ enum Manufacturer
+ {
+ AMD,
+ Intel,
+ NSC,
+ UMC,
+ Cyrix,
+ NexGen,
+ IDT,
+ Rise,
+ Transmeta,
+ Sun,
+ IBM,
+ Motorola,
+ HP,
+ Hygon,
+ UnknownManufacturer
+ };
+
+protected:
+ // For windows
+ bool RetrieveCPUFeatures();
+ bool RetrieveCPUIdentity();
+ bool RetrieveCPUCacheDetails();
+ bool RetrieveClassicalCPUCacheDetails();
+ bool RetrieveCPUClockSpeed();
+ bool RetrieveClassicalCPUClockSpeed();
+ bool RetrieveCPUExtendedLevelSupport(int);
+ bool RetrieveExtendedCPUFeatures();
+ bool RetrieveProcessorSerialNumber();
+ bool RetrieveCPUPowerManagement();
+ bool RetrieveClassicalCPUIdentity();
+ bool RetrieveExtendedCPUIdentity();
+
+ // Processor information
+ Manufacturer ChipManufacturer;
+ CPUFeatures Features;
+ ID ChipID;
+ float CPUSpeedInMHz;
+ unsigned int NumberOfLogicalCPU;
+ unsigned int NumberOfPhysicalCPU;
+
+ void CPUCountWindows(); // For windows
+ unsigned char GetAPICId(); // For windows
+ bool IsSMTSupported();
+ static LongLong GetCyclesDifference(DELAY_FUNC, unsigned int); // For windows
+
+ // For Linux and Cygwin, /proc/cpuinfo formats are slightly different
+ bool RetreiveInformationFromCpuInfoFile();
+ std::string ExtractValueFromCpuInfoFile(std::string buffer, const char* word,
+ size_t init = 0);
+
+ bool QueryLinuxMemory();
+ bool QueryCygwinMemory();
+
+ static void Delay(unsigned int);
+ static void DelayOverhead(unsigned int);
+
+ void FindManufacturer(const std::string& family = "");
+
+ // For Mac
+ bool ParseSysCtl();
+ int CallSwVers(const char* arg, std::string& ver);
+ void TrimNewline(std::string&);
+ std::string ExtractValueFromSysCtl(const char* word);
+ std::string SysCtlBuffer;
+
+ // For Solaris
+ bool QuerySolarisMemory();
+ bool QuerySolarisProcessor();
+ std::string ParseValueFromKStat(const char* arguments);
+ std::string RunProcess(std::vector<const char*> args);
+
+ // For Haiku OS
+ bool QueryHaikuInfo();
+
+ // For QNX
+ bool QueryQNXMemory();
+ bool QueryQNXProcessor();
+
+ // For OpenBSD, FreeBSD, NetBSD, DragonFly
+ bool QueryBSDMemory();
+ bool QueryBSDProcessor();
+
+ // For HP-UX
+ bool QueryHPUXMemory();
+ bool QueryHPUXProcessor();
+
+ // For Microsoft Windows
+ bool QueryWindowsMemory();
+
+ // For AIX
+ bool QueryAIXMemory();
+
+ bool QueryProcessorBySysconf();
+ bool QueryProcessor();
+
+ // Evaluate the memory information.
+ bool QueryMemoryBySysconf();
+ bool QueryMemory();
+ size_t TotalVirtualMemory;
+ size_t AvailableVirtualMemory;
+ size_t TotalPhysicalMemory;
+ size_t AvailablePhysicalMemory;
+
+ size_t CurrentPositionInFile;
+
+ // Operating System information
+ bool QueryOSInformation();
+ std::string OSName;
+ std::string Hostname;
+ std::string OSRelease;
+ std::string OSVersion;
+ std::string OSPlatform;
+ bool OSIs64Bit;
+};
+
+SystemInformation::SystemInformation()
+{
+ this->Implementation = new SystemInformationImplementation;
+}
+
+SystemInformation::~SystemInformation()
+{
+ delete this->Implementation;
+}
+
+const char* SystemInformation::GetVendorString()
+{
+ return this->Implementation->GetVendorString();
+}
+
+const char* SystemInformation::GetVendorID()
+{
+ return this->Implementation->GetVendorID();
+}
+
+std::string SystemInformation::GetTypeID()
+{
+ return this->Implementation->GetTypeID();
+}
+
+std::string SystemInformation::GetFamilyID()
+{
+ return this->Implementation->GetFamilyID();
+}
+
+std::string SystemInformation::GetModelID()
+{
+ return this->Implementation->GetModelID();
+}
+
+std::string SystemInformation::GetModelName()
+{
+ return this->Implementation->GetModelName();
+}
+
+std::string SystemInformation::GetSteppingCode()
+{
+ return this->Implementation->GetSteppingCode();
+}
+
+const char* SystemInformation::GetExtendedProcessorName()
+{
+ return this->Implementation->GetExtendedProcessorName();
+}
+
+const char* SystemInformation::GetProcessorSerialNumber()
+{
+ return this->Implementation->GetProcessorSerialNumber();
+}
+
+int SystemInformation::GetProcessorCacheSize()
+{
+ return this->Implementation->GetProcessorCacheSize();
+}
+
+unsigned int SystemInformation::GetLogicalProcessorsPerPhysical()
+{
+ return this->Implementation->GetLogicalProcessorsPerPhysical();
+}
+
+float SystemInformation::GetProcessorClockFrequency()
+{
+ return this->Implementation->GetProcessorClockFrequency();
+}
+
+int SystemInformation::GetProcessorAPICID()
+{
+ return this->Implementation->GetProcessorAPICID();
+}
+
+int SystemInformation::GetProcessorCacheXSize(long int l)
+{
+ return this->Implementation->GetProcessorCacheXSize(l);
+}
+
+bool SystemInformation::DoesCPUSupportFeature(long int i)
+{
+ return this->Implementation->DoesCPUSupportFeature(i);
+}
+
+std::string SystemInformation::GetCPUDescription()
+{
+ std::ostringstream oss;
+ oss << this->GetNumberOfPhysicalCPU() << " core ";
+ if (this->GetModelName().empty()) {
+ oss << this->GetProcessorClockFrequency() << " MHz "
+ << this->GetVendorString() << " " << this->GetExtendedProcessorName();
+ } else {
+ oss << this->GetModelName();
+ }
+
+ // remove extra spaces
+ std::string tmp = oss.str();
+ size_t pos;
+ while ((pos = tmp.find(" ")) != std::string::npos) {
+ tmp.replace(pos, 2, " ");
+ }
+
+ return tmp;
+}
+
+const char* SystemInformation::GetOSName()
+{
+ return this->Implementation->GetOSName();
+}
+
+const char* SystemInformation::GetHostname()
+{
+ return this->Implementation->GetHostname();
+}
+
+std::string SystemInformation::GetFullyQualifiedDomainName()
+{
+ std::string fqdn;
+ this->Implementation->GetFullyQualifiedDomainName(fqdn);
+ return fqdn;
+}
+
+const char* SystemInformation::GetOSRelease()
+{
+ return this->Implementation->GetOSRelease();
+}
+
+const char* SystemInformation::GetOSVersion()
+{
+ return this->Implementation->GetOSVersion();
+}
+
+const char* SystemInformation::GetOSPlatform()
+{
+ return this->Implementation->GetOSPlatform();
+}
+
+int SystemInformation::GetOSIsWindows()
+{
+#if defined(_WIN32)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+int SystemInformation::GetOSIsLinux()
+{
+#if defined(__linux)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+int SystemInformation::GetOSIsApple()
+{
+#if defined(__APPLE__)
+ return 1;
+#else
+ return 0;
+#endif
+}
+
+std::string SystemInformation::GetOSDescription()
+{
+ std::ostringstream oss;
+ oss << this->GetOSName() << " " << this->GetOSRelease() << " "
+ << this->GetOSVersion();
+
+ return oss.str();
+}
+
+bool SystemInformation::Is64Bits()
+{
+ return this->Implementation->Is64Bits();
+}
+
+unsigned int SystemInformation::GetNumberOfLogicalCPU() // per physical cpu
+{
+ return this->Implementation->GetNumberOfLogicalCPU();
+}
+
+unsigned int SystemInformation::GetNumberOfPhysicalCPU()
+{
+ return this->Implementation->GetNumberOfPhysicalCPU();
+}
+
+bool SystemInformation::DoesCPUSupportCPUID()
+{
+ return this->Implementation->DoesCPUSupportCPUID();
+}
+
+// Retrieve memory information in MiB.
+size_t SystemInformation::GetTotalVirtualMemory()
+{
+ return this->Implementation->GetTotalVirtualMemory();
+}
+
+size_t SystemInformation::GetAvailableVirtualMemory()
+{
+ return this->Implementation->GetAvailableVirtualMemory();
+}
+
+size_t SystemInformation::GetTotalPhysicalMemory()
+{
+ return this->Implementation->GetTotalPhysicalMemory();
+}
+
+size_t SystemInformation::GetAvailablePhysicalMemory()
+{
+ return this->Implementation->GetAvailablePhysicalMemory();
+}
+
+std::string SystemInformation::GetMemoryDescription(
+ const char* hostLimitEnvVarName, const char* procLimitEnvVarName)
+{
+ std::ostringstream oss;
+ oss << "Host Total: " << iostreamLongLong(this->GetHostMemoryTotal())
+ << " KiB, Host Available: "
+ << iostreamLongLong(this->GetHostMemoryAvailable(hostLimitEnvVarName))
+ << " KiB, Process Available: "
+ << iostreamLongLong(this->GetProcMemoryAvailable(hostLimitEnvVarName,
+ procLimitEnvVarName))
+ << " KiB";
+ return oss.str();
+}
+
+// host memory info in units of KiB.
+SystemInformation::LongLong SystemInformation::GetHostMemoryTotal()
+{
+ return this->Implementation->GetHostMemoryTotal();
+}
+
+SystemInformation::LongLong SystemInformation::GetHostMemoryAvailable(
+ const char* hostLimitEnvVarName)
+{
+ return this->Implementation->GetHostMemoryAvailable(hostLimitEnvVarName);
+}
+
+SystemInformation::LongLong SystemInformation::GetHostMemoryUsed()
+{
+ return this->Implementation->GetHostMemoryUsed();
+}
+
+// process memory info in units of KiB.
+SystemInformation::LongLong SystemInformation::GetProcMemoryAvailable(
+ const char* hostLimitEnvVarName, const char* procLimitEnvVarName)
+{
+ return this->Implementation->GetProcMemoryAvailable(hostLimitEnvVarName,
+ procLimitEnvVarName);
+}
+
+SystemInformation::LongLong SystemInformation::GetProcMemoryUsed()
+{
+ return this->Implementation->GetProcMemoryUsed();
+}
+
+double SystemInformation::GetLoadAverage()
+{
+ return this->Implementation->GetLoadAverage();
+}
+
+SystemInformation::LongLong SystemInformation::GetProcessId()
+{
+ return this->Implementation->GetProcessId();
+}
+
+void SystemInformation::SetStackTraceOnError(int enable)
+{
+ SystemInformationImplementation::SetStackTraceOnError(enable);
+}
+
+std::string SystemInformation::GetProgramStack(int firstFrame, int wholePath)
+{
+ return SystemInformationImplementation::GetProgramStack(firstFrame,
+ wholePath);
+}
+
+/** Run the different checks */
+void SystemInformation::RunCPUCheck()
+{
+ this->Implementation->RunCPUCheck();
+}
+
+void SystemInformation::RunOSCheck()
+{
+ this->Implementation->RunOSCheck();
+}
+
+void SystemInformation::RunMemoryCheck()
+{
+ this->Implementation->RunMemoryCheck();
+}
+
+// SystemInformationImplementation starts here
+
+#if USE_CPUID
+# define STORE_TLBCACHE_INFO(x, y) x = (x < (y)) ? (y) : x
+# define TLBCACHE_INFO_UNITS (15)
+#endif
+
+#if USE_ASM_INSTRUCTIONS
+# define CLASSICAL_CPU_FREQ_LOOP 10000000
+# define RDTSC_INSTRUCTION _asm _emit 0x0f _asm _emit 0x31
+#endif
+
+#define INITIAL_APIC_ID_BITS 0xFF000000
+// initial APIC ID for the processor this code is running on.
+// Default value = 0xff if HT is not supported
+
+// Hide implementation details in an anonymous namespace.
+namespace {
+// *****************************************************************************
+#if defined(__linux) || defined(__APPLE__)
+int LoadLines(FILE* file, std::vector<std::string>& lines)
+{
+ // Load each line in the given file into a the vector.
+ int nRead = 0;
+ const int bufSize = 1024;
+ char buf[bufSize] = { '\0' };
+ while (!feof(file) && !ferror(file)) {
+ errno = 0;
+ if (fgets(buf, bufSize, file) == nullptr) {
+ if (ferror(file) && (errno == EINTR)) {
+ clearerr(file);
+ }
+ continue;
+ }
+ char* pBuf = buf;
+ while (*pBuf) {
+ if (*pBuf == '\n')
+ *pBuf = '\0';
+ pBuf += 1;
+ }
+ lines.push_back(buf);
+ ++nRead;
+ }
+ if (ferror(file)) {
+ return 0;
+ }
+ return nRead;
+}
+
+# if defined(__linux)
+// *****************************************************************************
+int LoadLines(const char* fileName, std::vector<std::string>& lines)
+{
+ FILE* file = fopen(fileName, "r");
+ if (file == 0) {
+ return 0;
+ }
+ int nRead = LoadLines(file, lines);
+ fclose(file);
+ return nRead;
+}
+# endif
+
+// ****************************************************************************
+template <typename T>
+int NameValue(std::vector<std::string> const& lines, std::string const& name,
+ T& value)
+{
+ size_t nLines = lines.size();
+ for (size_t i = 0; i < nLines; ++i) {
+ size_t at = lines[i].find(name);
+ if (at == std::string::npos) {
+ continue;
+ }
+ std::istringstream is(lines[i].substr(at + name.size()));
+ is >> value;
+ return 0;
+ }
+ return -1;
+}
+#endif
+
+#if defined(__linux)
+// ****************************************************************************
+template <typename T>
+int GetFieldsFromFile(const char* fileName, const char** fieldNames, T* values)
+{
+ std::vector<std::string> fields;
+ if (!LoadLines(fileName, fields)) {
+ return -1;
+ }
+ int i = 0;
+ while (fieldNames[i] != nullptr) {
+ int ierr = NameValue(fields, fieldNames[i], values[i]);
+ if (ierr) {
+ return -(i + 2);
+ }
+ i += 1;
+ }
+ return 0;
+}
+
+// ****************************************************************************
+template <typename T>
+int GetFieldFromFile(const char* fileName, const char* fieldName, T& value)
+{
+ const char* fieldNames[2] = { fieldName, nullptr };
+ T values[1] = { T(0) };
+ int ierr = GetFieldsFromFile(fileName, fieldNames, values);
+ if (ierr) {
+ return ierr;
+ }
+ value = values[0];
+ return 0;
+}
+#endif
+
+// ****************************************************************************
+#if defined(__APPLE__)
+template <typename T>
+int GetFieldsFromCommand(const char* command, const char** fieldNames,
+ T* values)
+{
+ FILE* file = popen(command, "r");
+ if (file == nullptr) {
+ return -1;
+ }
+ std::vector<std::string> fields;
+ int nl = LoadLines(file, fields);
+ pclose(file);
+ if (nl == 0) {
+ return -1;
+ }
+ int i = 0;
+ while (fieldNames[i] != nullptr) {
+ int ierr = NameValue(fields, fieldNames[i], values[i]);
+ if (ierr) {
+ return -(i + 2);
+ }
+ i += 1;
+ }
+ return 0;
+}
+#endif
+
+// ****************************************************************************
+#if !defined(_WIN32) && !defined(__MINGW32__) && !defined(__CYGWIN__)
+void StacktraceSignalHandler(int sigNo, siginfo_t* sigInfo,
+ void* /*sigContext*/)
+{
+# if defined(__linux) || defined(__APPLE__)
+ std::ostringstream oss;
+ oss << std::endl
+ << "========================================================="
+ << std::endl
+ << "Process id " << getpid() << " ";
+ switch (sigNo) {
+ case SIGINT:
+ oss << "Caught SIGINT";
+ break;
+
+ case SIGTERM:
+ oss << "Caught SIGTERM";
+ break;
+
+ case SIGABRT:
+ oss << "Caught SIGABRT";
+ break;
+
+ case SIGFPE:
+ oss << "Caught SIGFPE at " << (sigInfo->si_addr == nullptr ? "0x" : "")
+ << sigInfo->si_addr << " ";
+ switch (sigInfo->si_code) {
+# if defined(FPE_INTDIV)
+ case FPE_INTDIV:
+ oss << "integer division by zero";
+ break;
+# endif
+
+# if defined(FPE_INTOVF)
+ case FPE_INTOVF:
+ oss << "integer overflow";
+ break;
+# endif
+
+ case FPE_FLTDIV:
+ oss << "floating point divide by zero";
+ break;
+
+ case FPE_FLTOVF:
+ oss << "floating point overflow";
+ break;
+
+ case FPE_FLTUND:
+ oss << "floating point underflow";
+ break;
+
+ case FPE_FLTRES:
+ oss << "floating point inexact result";
+ break;
+
+ case FPE_FLTINV:
+ oss << "floating point invalid operation";
+ break;
+
+# if defined(FPE_FLTSUB)
+ case FPE_FLTSUB:
+ oss << "floating point subscript out of range";
+ break;
+# endif
+
+ default:
+ oss << "code " << sigInfo->si_code;
+ break;
+ }
+ break;
+
+ case SIGSEGV:
+ oss << "Caught SIGSEGV at " << (sigInfo->si_addr == nullptr ? "0x" : "")
+ << sigInfo->si_addr << " ";
+ switch (sigInfo->si_code) {
+ case SEGV_MAPERR:
+ oss << "address not mapped to object";
+ break;
+
+ case SEGV_ACCERR:
+ oss << "invalid permission for mapped object";
+ break;
+
+ default:
+ oss << "code " << sigInfo->si_code;
+ break;
+ }
+ break;
+
+ case SIGBUS:
+ oss << "Caught SIGBUS at " << (sigInfo->si_addr == nullptr ? "0x" : "")
+ << sigInfo->si_addr << " ";
+ switch (sigInfo->si_code) {
+ case BUS_ADRALN:
+ oss << "invalid address alignment";
+ break;
+
+# if defined(BUS_ADRERR)
+ case BUS_ADRERR:
+ oss << "nonexistent physical address";
+ break;
+# endif
+
+# if defined(BUS_OBJERR)
+ case BUS_OBJERR:
+ oss << "object-specific hardware error";
+ break;
+# endif
+
+# if defined(BUS_MCEERR_AR)
+ case BUS_MCEERR_AR:
+ oss << "Hardware memory error consumed on a machine check; action "
+ "required.";
+ break;
+# endif
+
+# if defined(BUS_MCEERR_AO)
+ case BUS_MCEERR_AO:
+ oss << "Hardware memory error detected in process but not consumed; "
+ "action optional.";
+ break;
+# endif
+
+ default:
+ oss << "code " << sigInfo->si_code;
+ break;
+ }
+ break;
+
+ case SIGILL:
+ oss << "Caught SIGILL at " << (sigInfo->si_addr == nullptr ? "0x" : "")
+ << sigInfo->si_addr << " ";
+ switch (sigInfo->si_code) {
+ case ILL_ILLOPC:
+ oss << "illegal opcode";
+ break;
+
+# if defined(ILL_ILLOPN)
+ case ILL_ILLOPN:
+ oss << "illegal operand";
+ break;
+# endif
+
+# if defined(ILL_ILLADR)
+ case ILL_ILLADR:
+ oss << "illegal addressing mode.";
+ break;
+# endif
+
+ case ILL_ILLTRP:
+ oss << "illegal trap";
+ break;
+
+ case ILL_PRVOPC:
+ oss << "privileged opcode";
+ break;
+
+# if defined(ILL_PRVREG)
+ case ILL_PRVREG:
+ oss << "privileged register";
+ break;
+# endif
+
+# if defined(ILL_COPROC)
+ case ILL_COPROC:
+ oss << "co-processor error";
+ break;
+# endif
+
+# if defined(ILL_BADSTK)
+ case ILL_BADSTK:
+ oss << "internal stack error";
+ break;
+# endif
+
+ default:
+ oss << "code " << sigInfo->si_code;
+ break;
+ }
+ break;
+
+ default:
+ oss << "Caught " << sigNo << " code " << sigInfo->si_code;
+ break;
+ }
+ oss << std::endl
+ << "Program Stack:" << std::endl
+ << SystemInformationImplementation::GetProgramStack(2, 0)
+ << "========================================================="
+ << std::endl;
+ std::cerr << oss.str() << std::endl;
+
+ // restore the previously registered handlers
+ // and abort
+ SystemInformationImplementation::SetStackTraceOnError(0);
+ abort();
+# else
+ // avoid warning C4100
+ (void)sigNo;
+ (void)sigInfo;
+# endif
+}
+#endif
+
+#if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE)
+# define safes(_arg) ((_arg) ? (_arg) : "???")
+
+// Description:
+// A container for symbol properties. Each instance
+// must be Initialized.
+class SymbolProperties
+{
+public:
+ SymbolProperties();
+
+ // Description:
+ // The SymbolProperties instance must be initialized by
+ // passing a stack address.
+ void Initialize(void* address);
+
+ // Description:
+ // Get the symbol's stack address.
+ void* GetAddress() const { return this->Address; }
+
+ // Description:
+ // If not set paths will be removed. eg, from a binary
+ // or source file.
+ void SetReportPath(int rp) { this->ReportPath = rp; }
+
+ // Description:
+ // Set/Get the name of the binary file that the symbol
+ // is found in.
+ void SetBinary(const char* binary) { this->Binary = safes(binary); }
+
+ std::string GetBinary() const;
+
+ // Description:
+ // Set the name of the function that the symbol is found in.
+ // If c++ demangling is supported it will be demangled.
+ void SetFunction(const char* function)
+ {
+ this->Function = this->Demangle(function);
+ }
+
+ std::string GetFunction() const { return this->Function; }
+
+ // Description:
+ // Set/Get the name of the source file where the symbol
+ // is defined.
+ void SetSourceFile(const char* sourcefile)
+ {
+ this->SourceFile = safes(sourcefile);
+ }
+
+ std::string GetSourceFile() const
+ {
+ return this->GetFileName(this->SourceFile);
+ }
+
+ // Description:
+ // Set/Get the line number where the symbol is defined
+ void SetLineNumber(long linenumber) { this->LineNumber = linenumber; }
+ long GetLineNumber() const { return this->LineNumber; }
+
+ // Description:
+ // Set the address where the binary image is mapped
+ // into memory.
+ void SetBinaryBaseAddress(void* address)
+ {
+ this->BinaryBaseAddress = address;
+ }
+
+private:
+ void* GetRealAddress() const
+ {
+ return (void*)((char*)this->Address - (char*)this->BinaryBaseAddress);
+ }
+
+ std::string GetFileName(const std::string& path) const;
+ std::string Demangle(const char* symbol) const;
+
+private:
+ std::string Binary;
+ void* BinaryBaseAddress;
+ void* Address;
+ std::string SourceFile;
+ std::string Function;
+ long LineNumber;
+ int ReportPath;
+};
+
+std::ostream& operator<<(std::ostream& os, const SymbolProperties& sp)
+{
+# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP)
+ os << std::hex << sp.GetAddress() << " : " << sp.GetFunction() << " [("
+ << sp.GetBinary() << ") " << sp.GetSourceFile() << ":" << std::dec
+ << sp.GetLineNumber() << "]";
+# elif defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE)
+ void* addr = sp.GetAddress();
+ char** syminfo = backtrace_symbols(&addr, 1);
+ os << safes(syminfo[0]);
+ free(syminfo);
+# else
+ (void)os;
+ (void)sp;
+# endif
+ return os;
+}
+
+SymbolProperties::SymbolProperties()
+{
+ // not using an initializer list
+ // to avoid some PGI compiler warnings
+ this->SetBinary("???");
+ this->SetBinaryBaseAddress(nullptr);
+ this->Address = nullptr;
+ this->SetSourceFile("???");
+ this->SetFunction("???");
+ this->SetLineNumber(-1);
+ this->SetReportPath(0);
+ // avoid PGI compiler warnings
+ this->GetRealAddress();
+ this->GetFunction();
+ this->GetSourceFile();
+ this->GetLineNumber();
+}
+
+std::string SymbolProperties::GetFileName(const std::string& path) const
+{
+ std::string file(path);
+ if (!this->ReportPath) {
+ size_t at = file.rfind("/");
+ if (at != std::string::npos) {
+ file = file.substr(at + 1);
+ }
+ }
+ return file;
+}
+
+std::string SymbolProperties::GetBinary() const
+{
+// only linux has proc fs
+# if defined(__linux__)
+ if (this->Binary == "/proc/self/exe") {
+ std::string binary;
+ char buf[1024] = { '\0' };
+ ssize_t ll = 0;
+ if ((ll = readlink("/proc/self/exe", buf, 1024)) > 0 && ll < 1024) {
+ buf[ll] = '\0';
+ binary = buf;
+ } else {
+ binary = "/proc/self/exe";
+ }
+ return this->GetFileName(binary);
+ }
+# endif
+ return this->GetFileName(this->Binary);
+}
+
+std::string SymbolProperties::Demangle(const char* symbol) const
+{
+ std::string result = safes(symbol);
+# if defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE)
+ int status = 0;
+ size_t bufferLen = 1024;
+ char* buffer = (char*)malloc(1024);
+ char* demangledSymbol =
+ abi::__cxa_demangle(symbol, buffer, &bufferLen, &status);
+ if (!status) {
+ result = demangledSymbol;
+ }
+ free(buffer);
+# else
+ (void)symbol;
+# endif
+ return result;
+}
+
+void SymbolProperties::Initialize(void* address)
+{
+ this->Address = address;
+# if defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP)
+ // first fallback option can demangle c++ functions
+ Dl_info info;
+ int ierr = dladdr(this->Address, &info);
+ if (ierr && info.dli_sname && info.dli_saddr) {
+ this->SetBinary(info.dli_fname);
+ this->SetFunction(info.dli_sname);
+ }
+# else
+// second fallback use builtin backtrace_symbols
+// to decode the bactrace.
+# endif
+}
+#endif // don't define this class if we're not using it
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+# define KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes
+#endif
+#if defined(_MSC_VER) && _MSC_VER < 1310
+# undef KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes
+#endif
+#if defined(KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes)
+double calculateCPULoad(unsigned __int64 idleTicks,
+ unsigned __int64 totalTicks)
+{
+ static double previousLoad = -0.0;
+ static unsigned __int64 previousIdleTicks = 0;
+ static unsigned __int64 previousTotalTicks = 0;
+
+ unsigned __int64 const idleTicksSinceLastTime =
+ idleTicks - previousIdleTicks;
+ unsigned __int64 const totalTicksSinceLastTime =
+ totalTicks - previousTotalTicks;
+
+ double load;
+ if (previousTotalTicks == 0 || totalTicksSinceLastTime == 0) {
+ // No new information. Use previous result.
+ load = previousLoad;
+ } else {
+ // Calculate load since last time.
+ load = 1.0 - double(idleTicksSinceLastTime) / totalTicksSinceLastTime;
+
+ // Smooth if possible.
+ if (previousLoad > 0) {
+ load = 0.25 * load + 0.75 * previousLoad;
+ }
+ }
+
+ previousLoad = load;
+ previousIdleTicks = idleTicks;
+ previousTotalTicks = totalTicks;
+
+ return load;
+}
+
+unsigned __int64 fileTimeToUInt64(FILETIME const& ft)
+{
+ LARGE_INTEGER out;
+ out.HighPart = ft.dwHighDateTime;
+ out.LowPart = ft.dwLowDateTime;
+ return out.QuadPart;
+}
+#endif
+
+} // anonymous namespace
+
+SystemInformationImplementation::SystemInformationImplementation()
+{
+ this->TotalVirtualMemory = 0;
+ this->AvailableVirtualMemory = 0;
+ this->TotalPhysicalMemory = 0;
+ this->AvailablePhysicalMemory = 0;
+ this->CurrentPositionInFile = 0;
+ this->ChipManufacturer = UnknownManufacturer;
+ memset(&this->Features, 0, sizeof(CPUFeatures));
+ this->ChipID.Type = 0;
+ this->ChipID.Family = 0;
+ this->ChipID.Model = 0;
+ this->ChipID.Revision = 0;
+ this->ChipID.ExtendedFamily = 0;
+ this->ChipID.ExtendedModel = 0;
+ this->CPUSpeedInMHz = 0;
+ this->NumberOfLogicalCPU = 0;
+ this->NumberOfPhysicalCPU = 0;
+ this->OSName = "";
+ this->Hostname = "";
+ this->OSRelease = "";
+ this->OSVersion = "";
+ this->OSPlatform = "";
+ this->OSIs64Bit = (sizeof(void*) == 8);
+}
+
+SystemInformationImplementation::~SystemInformationImplementation()
+{
+}
+
+void SystemInformationImplementation::RunCPUCheck()
+{
+#ifdef _WIN32
+ // Check to see if this processor supports CPUID.
+ bool supportsCPUID = DoesCPUSupportCPUID();
+
+ if (supportsCPUID) {
+ // Retrieve the CPU details.
+ RetrieveCPUIdentity();
+ this->FindManufacturer();
+ RetrieveCPUFeatures();
+ }
+
+ // These two may be called without support for the CPUID instruction.
+ // (But if the instruction is there, they should be called *after*
+ // the above call to RetrieveCPUIdentity... that's why the two if
+ // blocks exist with the same "if (supportsCPUID)" logic...
+ //
+ if (!RetrieveCPUClockSpeed()) {
+ RetrieveClassicalCPUClockSpeed();
+ }
+
+ if (supportsCPUID) {
+ // Retrieve cache information.
+ if (!RetrieveCPUCacheDetails()) {
+ RetrieveClassicalCPUCacheDetails();
+ }
+
+ // Retrieve the extended CPU details.
+ if (!RetrieveExtendedCPUIdentity()) {
+ RetrieveClassicalCPUIdentity();
+ }
+
+ RetrieveExtendedCPUFeatures();
+ RetrieveCPUPowerManagement();
+
+ // Now attempt to retrieve the serial number (if possible).
+ RetrieveProcessorSerialNumber();
+ }
+
+ this->CPUCountWindows();
+
+#elif defined(__APPLE__)
+ this->ParseSysCtl();
+#elif defined(__SVR4) && defined(__sun)
+ this->QuerySolarisProcessor();
+#elif defined(__HAIKU__)
+ this->QueryHaikuInfo();
+#elif defined(__QNX__)
+ this->QueryQNXProcessor();
+#elif defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__DragonFly__)
+ this->QueryBSDProcessor();
+#elif defined(__hpux)
+ this->QueryHPUXProcessor();
+#elif defined(__linux) || defined(__CYGWIN__)
+ this->RetreiveInformationFromCpuInfoFile();
+#else
+ this->QueryProcessor();
+#endif
+}
+
+void SystemInformationImplementation::RunOSCheck()
+{
+ this->QueryOSInformation();
+}
+
+void SystemInformationImplementation::RunMemoryCheck()
+{
+#if defined(__APPLE__)
+ this->ParseSysCtl();
+#elif defined(__SVR4) && defined(__sun)
+ this->QuerySolarisMemory();
+#elif defined(__HAIKU__)
+ this->QueryHaikuInfo();
+#elif defined(__QNX__)
+ this->QueryQNXMemory();
+#elif defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__DragonFly__)
+ this->QueryBSDMemory();
+#elif defined(__CYGWIN__)
+ this->QueryCygwinMemory();
+#elif defined(_WIN32)
+ this->QueryWindowsMemory();
+#elif defined(__hpux)
+ this->QueryHPUXMemory();
+#elif defined(__linux)
+ this->QueryLinuxMemory();
+#elif defined(_AIX)
+ this->QueryAIXMemory();
+#else
+ this->QueryMemory();
+#endif
+}
+
+/** Get the vendor string */
+const char* SystemInformationImplementation::GetVendorString()
+{
+ return this->ChipID.Vendor.c_str();
+}
+
+/** Get the OS Name */
+const char* SystemInformationImplementation::GetOSName()
+{
+ return this->OSName.c_str();
+}
+
+/** Get the hostname */
+const char* SystemInformationImplementation::GetHostname()
+{
+ if (this->Hostname.empty()) {
+ this->Hostname = "localhost";
+#if defined(_WIN32)
+ WORD wVersionRequested;
+ WSADATA wsaData;
+ char name[255];
+ wVersionRequested = MAKEWORD(2, 0);
+ if (WSAStartup(wVersionRequested, &wsaData) == 0) {
+ gethostname(name, sizeof(name));
+ WSACleanup();
+ }
+ this->Hostname = name;
+#else
+ struct utsname unameInfo;
+ int errorFlag = uname(&unameInfo);
+ if (errorFlag == 0) {
+ this->Hostname = unameInfo.nodename;
+ }
+#endif
+ }
+ return this->Hostname.c_str();
+}
+
+/** Get the FQDN */
+int SystemInformationImplementation::GetFullyQualifiedDomainName(
+ std::string& fqdn)
+{
+ // in the event of absolute failure return localhost.
+ fqdn = "localhost";
+
+#if defined(_WIN32)
+ int ierr;
+ // TODO - a more robust implementation for windows, see comments
+ // in unix implementation.
+ WSADATA wsaData;
+ WORD ver = MAKEWORD(2, 0);
+ ierr = WSAStartup(ver, &wsaData);
+ if (ierr) {
+ return -1;
+ }
+
+ char base[256] = { '\0' };
+ ierr = gethostname(base, 256);
+ if (ierr) {
+ WSACleanup();
+ return -2;
+ }
+ fqdn = base;
+
+ HOSTENT* hent = gethostbyname(base);
+ if (hent) {
+ fqdn = hent->h_name;
+ }
+
+ WSACleanup();
+ return 0;
+
+#elif defined(KWSYS_SYSTEMINFORMATION_IMPLEMENT_FQDN)
+ // gethostname typical returns an alias for loopback interface
+ // we want the fully qualified domain name. Because there are
+ // any number of interfaces on this system we look for the
+ // first of these that contains the name returned by gethostname
+ // and is longer. failing that we return gethostname and indicate
+ // with a failure code. Return of a failure code is not necessarily
+ // an indication of an error. for instance gethostname may return
+ // the fully qualified domain name, or there may not be one if the
+ // system lives on a private network such as in the case of a cluster
+ // node.
+
+ int ierr = 0;
+ char base[NI_MAXHOST];
+ ierr = gethostname(base, NI_MAXHOST);
+ if (ierr) {
+ return -1;
+ }
+ size_t baseSize = strlen(base);
+ fqdn = base;
+
+ struct ifaddrs* ifas;
+ struct ifaddrs* ifa;
+ ierr = getifaddrs(&ifas);
+ if (ierr) {
+ return -2;
+ }
+
+ for (ifa = ifas; ifa != nullptr; ifa = ifa->ifa_next) {
+ int fam = ifa->ifa_addr ? ifa->ifa_addr->sa_family : -1;
+ // Skip Loopback interfaces
+ if (((fam == AF_INET) || (fam == AF_INET6)) &&
+ !(ifa->ifa_flags & IFF_LOOPBACK)) {
+ char host[NI_MAXHOST] = { '\0' };
+
+ const size_t addrlen = (fam == AF_INET ? sizeof(struct sockaddr_in)
+ : sizeof(struct sockaddr_in6));
+
+ ierr = getnameinfo(ifa->ifa_addr, static_cast<socklen_t>(addrlen), host,
+ NI_MAXHOST, nullptr, 0, NI_NAMEREQD);
+ if (ierr) {
+ // don't report the failure now since we may succeed on another
+ // interface. If all attempts fail then return the failure code.
+ ierr = -3;
+ continue;
+ }
+
+ std::string candidate = host;
+ if ((candidate.find(base) != std::string::npos) &&
+ baseSize < candidate.size()) {
+ // success, stop now.
+ ierr = 0;
+ fqdn = candidate;
+ break;
+ }
+ }
+ }
+ freeifaddrs(ifas);
+
+ return ierr;
+#else
+ /* TODO: Implement on more platforms. */
+ fqdn = this->GetHostname();
+ return -1;
+#endif
+}
+
+/** Get the OS release */
+const char* SystemInformationImplementation::GetOSRelease()
+{
+ return this->OSRelease.c_str();
+}
+
+/** Get the OS version */
+const char* SystemInformationImplementation::GetOSVersion()
+{
+ return this->OSVersion.c_str();
+}
+
+/** Get the OS platform */
+const char* SystemInformationImplementation::GetOSPlatform()
+{
+ return this->OSPlatform.c_str();
+}
+
+/** Get the vendor ID */
+const char* SystemInformationImplementation::GetVendorID()
+{
+ // Return the vendor ID.
+ switch (this->ChipManufacturer) {
+ case Intel:
+ return "Intel Corporation";
+ case AMD:
+ return "Advanced Micro Devices";
+ case NSC:
+ return "National Semiconductor";
+ case Cyrix:
+ return "Cyrix Corp., VIA Inc.";
+ case NexGen:
+ return "NexGen Inc., Advanced Micro Devices";
+ case IDT:
+ return "IDT\\Centaur, Via Inc.";
+ case UMC:
+ return "United Microelectronics Corp.";
+ case Rise:
+ return "Rise";
+ case Transmeta:
+ return "Transmeta";
+ case Sun:
+ return "Sun Microelectronics";
+ case IBM:
+ return "IBM";
+ case Motorola:
+ return "Motorola";
+ case HP:
+ return "Hewlett-Packard";
+ case Hygon:
+ return "Chengdu Haiguang IC Design Co., Ltd.";
+ case UnknownManufacturer:
+ default:
+ return "Unknown Manufacturer";
+ }
+}
+
+/** Return the type ID of the CPU */
+std::string SystemInformationImplementation::GetTypeID()
+{
+ std::ostringstream str;
+ str << this->ChipID.Type;
+ return str.str();
+}
+
+/** Return the family of the CPU present */
+std::string SystemInformationImplementation::GetFamilyID()
+{
+ std::ostringstream str;
+ str << this->ChipID.Family;
+ return str.str();
+}
+
+// Return the model of CPU present */
+std::string SystemInformationImplementation::GetModelID()
+{
+ std::ostringstream str;
+ str << this->ChipID.Model;
+ return str.str();
+}
+
+// Return the model name of CPU present */
+std::string SystemInformationImplementation::GetModelName()
+{
+ return this->ChipID.ModelName;
+}
+
+/** Return the stepping code of the CPU present. */
+std::string SystemInformationImplementation::GetSteppingCode()
+{
+ std::ostringstream str;
+ str << this->ChipID.Revision;
+ return str.str();
+}
+
+/** Return the stepping code of the CPU present. */
+const char* SystemInformationImplementation::GetExtendedProcessorName()
+{
+ return this->ChipID.ProcessorName.c_str();
+}
+
+/** Return the serial number of the processor
+ * in hexadecimal: xxxx-xxxx-xxxx-xxxx-xxxx-xxxx. */
+const char* SystemInformationImplementation::GetProcessorSerialNumber()
+{
+ return this->ChipID.SerialNumber.c_str();
+}
+
+/** Return the logical processors per physical */
+unsigned int SystemInformationImplementation::GetLogicalProcessorsPerPhysical()
+{
+ return this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical;
+}
+
+/** Return the processor clock frequency. */
+float SystemInformationImplementation::GetProcessorClockFrequency()
+{
+ return this->CPUSpeedInMHz;
+}
+
+/** Return the APIC ID. */
+int SystemInformationImplementation::GetProcessorAPICID()
+{
+ return this->Features.ExtendedFeatures.APIC_ID;
+}
+
+/** Return the L1 cache size. */
+int SystemInformationImplementation::GetProcessorCacheSize()
+{
+ return this->Features.L1CacheSize;
+}
+
+/** Return the chosen cache size. */
+int SystemInformationImplementation::GetProcessorCacheXSize(long int dwCacheID)
+{
+ switch (dwCacheID) {
+ case SystemInformation::CPU_FEATURE_L1CACHE:
+ return this->Features.L1CacheSize;
+ case SystemInformation::CPU_FEATURE_L2CACHE:
+ return this->Features.L2CacheSize;
+ case SystemInformation::CPU_FEATURE_L3CACHE:
+ return this->Features.L3CacheSize;
+ }
+ return -1;
+}
+
+bool SystemInformationImplementation::DoesCPUSupportFeature(long int dwFeature)
+{
+ bool bHasFeature = false;
+
+ // Check for MMX instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_MMX) != 0) &&
+ this->Features.HasMMX)
+ bHasFeature = true;
+
+ // Check for MMX+ instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_MMX_PLUS) != 0) &&
+ this->Features.ExtendedFeatures.HasMMXPlus)
+ bHasFeature = true;
+
+ // Check for SSE FP instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_SSE) != 0) &&
+ this->Features.HasSSE)
+ bHasFeature = true;
+
+ // Check for SSE FP instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_SSE_FP) != 0) &&
+ this->Features.HasSSEFP)
+ bHasFeature = true;
+
+ // Check for SSE MMX instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_SSE_MMX) != 0) &&
+ this->Features.ExtendedFeatures.HasSSEMMX)
+ bHasFeature = true;
+
+ // Check for SSE2 instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_SSE2) != 0) &&
+ this->Features.HasSSE2)
+ bHasFeature = true;
+
+ // Check for 3DNow! instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_AMD_3DNOW) != 0) &&
+ this->Features.ExtendedFeatures.Has3DNow)
+ bHasFeature = true;
+
+ // Check for 3DNow+ instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_AMD_3DNOW_PLUS) != 0) &&
+ this->Features.ExtendedFeatures.Has3DNowPlus)
+ bHasFeature = true;
+
+ // Check for IA64 instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_IA64) != 0) &&
+ this->Features.HasIA64)
+ bHasFeature = true;
+
+ // Check for MP capable.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_MP_CAPABLE) != 0) &&
+ this->Features.ExtendedFeatures.SupportsMP)
+ bHasFeature = true;
+
+ // Check for a serial number for the processor.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_SERIALNUMBER) != 0) &&
+ this->Features.HasSerial)
+ bHasFeature = true;
+
+ // Check for a local APIC in the processor.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_APIC) != 0) &&
+ this->Features.HasAPIC)
+ bHasFeature = true;
+
+ // Check for CMOV instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_CMOV) != 0) &&
+ this->Features.HasCMOV)
+ bHasFeature = true;
+
+ // Check for MTRR instructions.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_MTRR) != 0) &&
+ this->Features.HasMTRR)
+ bHasFeature = true;
+
+ // Check for L1 cache size.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_L1CACHE) != 0) &&
+ (this->Features.L1CacheSize != -1))
+ bHasFeature = true;
+
+ // Check for L2 cache size.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_L2CACHE) != 0) &&
+ (this->Features.L2CacheSize != -1))
+ bHasFeature = true;
+
+ // Check for L3 cache size.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_L3CACHE) != 0) &&
+ (this->Features.L3CacheSize != -1))
+ bHasFeature = true;
+
+ // Check for ACPI capability.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_ACPI) != 0) &&
+ this->Features.HasACPI)
+ bHasFeature = true;
+
+ // Check for thermal monitor support.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_THERMALMONITOR) != 0) &&
+ this->Features.HasThermal)
+ bHasFeature = true;
+
+ // Check for temperature sensing diode support.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_TEMPSENSEDIODE) != 0) &&
+ this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode)
+ bHasFeature = true;
+
+ // Check for frequency ID support.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_FREQUENCYID) != 0) &&
+ this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID)
+ bHasFeature = true;
+
+ // Check for voltage ID support.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_VOLTAGEID_FREQUENCY) !=
+ 0) &&
+ this->Features.ExtendedFeatures.PowerManagement.HasVoltageID)
+ bHasFeature = true;
+
+ // Check for FPU support.
+ if (((dwFeature & SystemInformation::CPU_FEATURE_FPU) != 0) &&
+ this->Features.HasFPU)
+ bHasFeature = true;
+
+ return bHasFeature;
+}
+
+void SystemInformationImplementation::Delay(unsigned int uiMS)
+{
+#ifdef _WIN32
+ LARGE_INTEGER Frequency, StartCounter, EndCounter;
+ __int64 x;
+
+ // Get the frequency of the high performance counter.
+ if (!QueryPerformanceFrequency(&Frequency))
+ return;
+ x = Frequency.QuadPart / 1000 * uiMS;
+
+ // Get the starting position of the counter.
+ QueryPerformanceCounter(&StartCounter);
+
+ do {
+ // Get the ending position of the counter.
+ QueryPerformanceCounter(&EndCounter);
+ } while (EndCounter.QuadPart - StartCounter.QuadPart < x);
+#endif
+ (void)uiMS;
+}
+
+bool SystemInformationImplementation::DoesCPUSupportCPUID()
+{
+#if USE_CPUID
+ int dummy[4] = { 0, 0, 0, 0 };
+
+# if USE_ASM_INSTRUCTIONS
+ return call_cpuid(0, dummy);
+# else
+ call_cpuid(0, dummy);
+ return dummy[0] || dummy[1] || dummy[2] || dummy[3];
+# endif
+#else
+ // Assume no cpuid instruction.
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::RetrieveCPUFeatures()
+{
+#if USE_CPUID
+ int cpuinfo[4] = { 0, 0, 0, 0 };
+
+ if (!call_cpuid(1, cpuinfo)) {
+ return false;
+ }
+
+ // Retrieve the features of CPU present.
+ this->Features.HasFPU =
+ ((cpuinfo[3] & 0x00000001) != 0); // FPU Present --> Bit 0
+ this->Features.HasTSC =
+ ((cpuinfo[3] & 0x00000010) != 0); // TSC Present --> Bit 4
+ this->Features.HasAPIC =
+ ((cpuinfo[3] & 0x00000200) != 0); // APIC Present --> Bit 9
+ this->Features.HasMTRR =
+ ((cpuinfo[3] & 0x00001000) != 0); // MTRR Present --> Bit 12
+ this->Features.HasCMOV =
+ ((cpuinfo[3] & 0x00008000) != 0); // CMOV Present --> Bit 15
+ this->Features.HasSerial =
+ ((cpuinfo[3] & 0x00040000) != 0); // Serial Present --> Bit 18
+ this->Features.HasACPI =
+ ((cpuinfo[3] & 0x00400000) != 0); // ACPI Capable --> Bit 22
+ this->Features.HasMMX =
+ ((cpuinfo[3] & 0x00800000) != 0); // MMX Present --> Bit 23
+ this->Features.HasSSE =
+ ((cpuinfo[3] & 0x02000000) != 0); // SSE Present --> Bit 25
+ this->Features.HasSSE2 =
+ ((cpuinfo[3] & 0x04000000) != 0); // SSE2 Present --> Bit 26
+ this->Features.HasThermal =
+ ((cpuinfo[3] & 0x20000000) != 0); // Thermal Monitor Present --> Bit 29
+ this->Features.HasIA64 =
+ ((cpuinfo[3] & 0x40000000) != 0); // IA64 Present --> Bit 30
+
+# if USE_ASM_INSTRUCTIONS
+ // Retrieve extended SSE capabilities if SSE is available.
+ if (this->Features.HasSSE) {
+
+ // Attempt to __try some SSE FP instructions.
+ __try {
+ // Perform: orps xmm0, xmm0
+ _asm
+ {
+ _emit 0x0f
+ _emit 0x56
+ _emit 0xc0
+ }
+
+ // SSE FP capable processor.
+ this->Features.HasSSEFP = true;
+ } __except (1) {
+ // bad instruction - processor or OS cannot handle SSE FP.
+ this->Features.HasSSEFP = false;
+ }
+ } else {
+ // Set the advanced SSE capabilities to not available.
+ this->Features.HasSSEFP = false;
+ }
+# else
+ this->Features.HasSSEFP = false;
+# endif
+
+ // Retrieve Intel specific extended features.
+ if (this->ChipManufacturer == Intel) {
+ bool SupportsSMT =
+ ((cpuinfo[3] & 0x10000000) != 0); // Intel specific: SMT --> Bit 28
+
+ if ((SupportsSMT) && (this->Features.HasAPIC)) {
+ // Retrieve APIC information if there is one present.
+ this->Features.ExtendedFeatures.APIC_ID =
+ ((cpuinfo[1] & 0xFF000000) >> 24);
+ }
+ }
+
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+/** Find the manufacturer given the vendor id */
+void SystemInformationImplementation::FindManufacturer(
+ const std::string& family)
+{
+ if (this->ChipID.Vendor == "GenuineIntel")
+ this->ChipManufacturer = Intel; // Intel Corp.
+ else if (this->ChipID.Vendor == "UMC UMC UMC ")
+ this->ChipManufacturer = UMC; // United Microelectronics Corp.
+ else if (this->ChipID.Vendor == "AuthenticAMD")
+ this->ChipManufacturer = AMD; // Advanced Micro Devices
+ else if (this->ChipID.Vendor == "AMD ISBETTER")
+ this->ChipManufacturer = AMD; // Advanced Micro Devices (1994)
+ else if (this->ChipID.Vendor == "HygonGenuine")
+ this->ChipManufacturer = Hygon; // Chengdu Haiguang IC Design Co., Ltd.
+ else if (this->ChipID.Vendor == "CyrixInstead")
+ this->ChipManufacturer = Cyrix; // Cyrix Corp., VIA Inc.
+ else if (this->ChipID.Vendor == "NexGenDriven")
+ this->ChipManufacturer = NexGen; // NexGen Inc. (now AMD)
+ else if (this->ChipID.Vendor == "CentaurHauls")
+ this->ChipManufacturer = IDT; // IDT/Centaur (now VIA)
+ else if (this->ChipID.Vendor == "RiseRiseRise")
+ this->ChipManufacturer = Rise; // Rise
+ else if (this->ChipID.Vendor == "GenuineTMx86")
+ this->ChipManufacturer = Transmeta; // Transmeta
+ else if (this->ChipID.Vendor == "TransmetaCPU")
+ this->ChipManufacturer = Transmeta; // Transmeta
+ else if (this->ChipID.Vendor == "Geode By NSC")
+ this->ChipManufacturer = NSC; // National Semiconductor
+ else if (this->ChipID.Vendor == "Sun")
+ this->ChipManufacturer = Sun; // Sun Microelectronics
+ else if (this->ChipID.Vendor == "IBM")
+ this->ChipManufacturer = IBM; // IBM Microelectronics
+ else if (this->ChipID.Vendor == "Hewlett-Packard")
+ this->ChipManufacturer = HP; // Hewlett-Packard
+ else if (this->ChipID.Vendor == "Motorola")
+ this->ChipManufacturer = Motorola; // Motorola Microelectronics
+ else if (family.substr(0, 7) == "PA-RISC")
+ this->ChipManufacturer = HP; // Hewlett-Packard
+ else
+ this->ChipManufacturer = UnknownManufacturer; // Unknown manufacturer
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveCPUIdentity()
+{
+#if USE_CPUID
+ int localCPUVendor[4];
+ int localCPUSignature[4];
+
+ if (!call_cpuid(0, localCPUVendor)) {
+ return false;
+ }
+ if (!call_cpuid(1, localCPUSignature)) {
+ return false;
+ }
+
+ // Process the returned information.
+ // ; eax = 0 --> eax: maximum value of CPUID instruction.
+ // ; ebx: part 1 of 3; CPU signature.
+ // ; edx: part 2 of 3; CPU signature.
+ // ; ecx: part 3 of 3; CPU signature.
+ char vbuf[13];
+ memcpy(&(vbuf[0]), &(localCPUVendor[1]), sizeof(int));
+ memcpy(&(vbuf[4]), &(localCPUVendor[3]), sizeof(int));
+ memcpy(&(vbuf[8]), &(localCPUVendor[2]), sizeof(int));
+ vbuf[12] = '\0';
+ this->ChipID.Vendor = vbuf;
+
+ // Retrieve the family of CPU present.
+ // ; eax = 1 --> eax: CPU ID - bits 31..16 - unused, bits 15..12 - type,
+ // bits 11..8 - family, bits 7..4 - model, bits 3..0 - mask revision
+ // ; ebx: 31..24 - default APIC ID, 23..16 - logical processor ID,
+ // 15..8 - CFLUSH chunk size , 7..0 - brand ID
+ // ; edx: CPU feature flags
+ this->ChipID.ExtendedFamily =
+ ((localCPUSignature[0] & 0x0FF00000) >> 20); // Bits 27..20 Used
+ this->ChipID.ExtendedModel =
+ ((localCPUSignature[0] & 0x000F0000) >> 16); // Bits 19..16 Used
+ this->ChipID.Type =
+ ((localCPUSignature[0] & 0x0000F000) >> 12); // Bits 15..12 Used
+ this->ChipID.Family =
+ ((localCPUSignature[0] & 0x00000F00) >> 8); // Bits 11..8 Used
+ this->ChipID.Model =
+ ((localCPUSignature[0] & 0x000000F0) >> 4); // Bits 7..4 Used
+ this->ChipID.Revision =
+ ((localCPUSignature[0] & 0x0000000F) >> 0); // Bits 3..0 Used
+
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveCPUCacheDetails()
+{
+#if USE_CPUID
+ int L1Cache[4] = { 0, 0, 0, 0 };
+ int L2Cache[4] = { 0, 0, 0, 0 };
+
+ // Check to see if what we are about to do is supported...
+ if (RetrieveCPUExtendedLevelSupport(0x80000005)) {
+ if (!call_cpuid(0x80000005, L1Cache)) {
+ return false;
+ }
+ // Save the L1 data cache size (in KB) from ecx: bits 31..24 as well as
+ // data cache size from edx: bits 31..24.
+ this->Features.L1CacheSize = ((L1Cache[2] & 0xFF000000) >> 24);
+ this->Features.L1CacheSize += ((L1Cache[3] & 0xFF000000) >> 24);
+ } else {
+ // Store -1 to indicate the cache could not be queried.
+ this->Features.L1CacheSize = -1;
+ }
+
+ // Check to see if what we are about to do is supported...
+ if (RetrieveCPUExtendedLevelSupport(0x80000006)) {
+ if (!call_cpuid(0x80000006, L2Cache)) {
+ return false;
+ }
+ // Save the L2 unified cache size (in KB) from ecx: bits 31..16.
+ this->Features.L2CacheSize = ((L2Cache[2] & 0xFFFF0000) >> 16);
+ } else {
+ // Store -1 to indicate the cache could not be queried.
+ this->Features.L2CacheSize = -1;
+ }
+
+ // Define L3 as being not present as we cannot test for it.
+ this->Features.L3CacheSize = -1;
+
+#endif
+
+ // Return failure if we cannot detect either cache with this method.
+ return ((this->Features.L1CacheSize == -1) &&
+ (this->Features.L2CacheSize == -1))
+ ? false
+ : true;
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveClassicalCPUCacheDetails()
+{
+#if USE_CPUID
+ int TLBCode = -1, TLBData = -1, L1Code = -1, L1Data = -1, L1Trace = -1,
+ L2Unified = -1, L3Unified = -1;
+ int TLBCacheData[4] = { 0, 0, 0, 0 };
+ int TLBPassCounter = 0;
+ int TLBCacheUnit = 0;
+
+ do {
+ if (!call_cpuid(2, TLBCacheData)) {
+ return false;
+ }
+
+ int bob = ((TLBCacheData[0] & 0x00FF0000) >> 16);
+ (void)bob;
+ // Process the returned TLB and cache information.
+ for (int nCounter = 0; nCounter < TLBCACHE_INFO_UNITS; nCounter++) {
+ // First of all - decide which unit we are dealing with.
+ switch (nCounter) {
+ // eax: bits 8..15 : bits 16..23 : bits 24..31
+ case 0:
+ TLBCacheUnit = ((TLBCacheData[0] & 0x0000FF00) >> 8);
+ break;
+ case 1:
+ TLBCacheUnit = ((TLBCacheData[0] & 0x00FF0000) >> 16);
+ break;
+ case 2:
+ TLBCacheUnit = ((TLBCacheData[0] & 0xFF000000) >> 24);
+ break;
+
+ // ebx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31
+ case 3:
+ TLBCacheUnit = ((TLBCacheData[1] & 0x000000FF) >> 0);
+ break;
+ case 4:
+ TLBCacheUnit = ((TLBCacheData[1] & 0x0000FF00) >> 8);
+ break;
+ case 5:
+ TLBCacheUnit = ((TLBCacheData[1] & 0x00FF0000) >> 16);
+ break;
+ case 6:
+ TLBCacheUnit = ((TLBCacheData[1] & 0xFF000000) >> 24);
+ break;
+
+ // ecx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31
+ case 7:
+ TLBCacheUnit = ((TLBCacheData[2] & 0x000000FF) >> 0);
+ break;
+ case 8:
+ TLBCacheUnit = ((TLBCacheData[2] & 0x0000FF00) >> 8);
+ break;
+ case 9:
+ TLBCacheUnit = ((TLBCacheData[2] & 0x00FF0000) >> 16);
+ break;
+ case 10:
+ TLBCacheUnit = ((TLBCacheData[2] & 0xFF000000) >> 24);
+ break;
+
+ // edx: bits 0..7 : bits 8..15 : bits 16..23 : bits 24..31
+ case 11:
+ TLBCacheUnit = ((TLBCacheData[3] & 0x000000FF) >> 0);
+ break;
+ case 12:
+ TLBCacheUnit = ((TLBCacheData[3] & 0x0000FF00) >> 8);
+ break;
+ case 13:
+ TLBCacheUnit = ((TLBCacheData[3] & 0x00FF0000) >> 16);
+ break;
+ case 14:
+ TLBCacheUnit = ((TLBCacheData[3] & 0xFF000000) >> 24);
+ break;
+
+ // Default case - an error has occurred.
+ default:
+ return false;
+ }
+
+ // Now process the resulting unit to see what it means....
+ switch (TLBCacheUnit) {
+ case 0x00:
+ break;
+ case 0x01:
+ STORE_TLBCACHE_INFO(TLBCode, 4);
+ break;
+ case 0x02:
+ STORE_TLBCACHE_INFO(TLBCode, 4096);
+ break;
+ case 0x03:
+ STORE_TLBCACHE_INFO(TLBData, 4);
+ break;
+ case 0x04:
+ STORE_TLBCACHE_INFO(TLBData, 4096);
+ break;
+ case 0x06:
+ STORE_TLBCACHE_INFO(L1Code, 8);
+ break;
+ case 0x08:
+ STORE_TLBCACHE_INFO(L1Code, 16);
+ break;
+ case 0x0a:
+ STORE_TLBCACHE_INFO(L1Data, 8);
+ break;
+ case 0x0c:
+ STORE_TLBCACHE_INFO(L1Data, 16);
+ break;
+ case 0x10:
+ STORE_TLBCACHE_INFO(L1Data, 16);
+ break; // <-- FIXME: IA-64 Only
+ case 0x15:
+ STORE_TLBCACHE_INFO(L1Code, 16);
+ break; // <-- FIXME: IA-64 Only
+ case 0x1a:
+ STORE_TLBCACHE_INFO(L2Unified, 96);
+ break; // <-- FIXME: IA-64 Only
+ case 0x22:
+ STORE_TLBCACHE_INFO(L3Unified, 512);
+ break;
+ case 0x23:
+ STORE_TLBCACHE_INFO(L3Unified, 1024);
+ break;
+ case 0x25:
+ STORE_TLBCACHE_INFO(L3Unified, 2048);
+ break;
+ case 0x29:
+ STORE_TLBCACHE_INFO(L3Unified, 4096);
+ break;
+ case 0x39:
+ STORE_TLBCACHE_INFO(L2Unified, 128);
+ break;
+ case 0x3c:
+ STORE_TLBCACHE_INFO(L2Unified, 256);
+ break;
+ case 0x40:
+ STORE_TLBCACHE_INFO(L2Unified, 0);
+ break; // <-- FIXME: No integrated L2 cache (P6 core) or L3 cache (P4
+ // core).
+ case 0x41:
+ STORE_TLBCACHE_INFO(L2Unified, 128);
+ break;
+ case 0x42:
+ STORE_TLBCACHE_INFO(L2Unified, 256);
+ break;
+ case 0x43:
+ STORE_TLBCACHE_INFO(L2Unified, 512);
+ break;
+ case 0x44:
+ STORE_TLBCACHE_INFO(L2Unified, 1024);
+ break;
+ case 0x45:
+ STORE_TLBCACHE_INFO(L2Unified, 2048);
+ break;
+ case 0x50:
+ STORE_TLBCACHE_INFO(TLBCode, 4096);
+ break;
+ case 0x51:
+ STORE_TLBCACHE_INFO(TLBCode, 4096);
+ break;
+ case 0x52:
+ STORE_TLBCACHE_INFO(TLBCode, 4096);
+ break;
+ case 0x5b:
+ STORE_TLBCACHE_INFO(TLBData, 4096);
+ break;
+ case 0x5c:
+ STORE_TLBCACHE_INFO(TLBData, 4096);
+ break;
+ case 0x5d:
+ STORE_TLBCACHE_INFO(TLBData, 4096);
+ break;
+ case 0x66:
+ STORE_TLBCACHE_INFO(L1Data, 8);
+ break;
+ case 0x67:
+ STORE_TLBCACHE_INFO(L1Data, 16);
+ break;
+ case 0x68:
+ STORE_TLBCACHE_INFO(L1Data, 32);
+ break;
+ case 0x70:
+ STORE_TLBCACHE_INFO(L1Trace, 12);
+ break;
+ case 0x71:
+ STORE_TLBCACHE_INFO(L1Trace, 16);
+ break;
+ case 0x72:
+ STORE_TLBCACHE_INFO(L1Trace, 32);
+ break;
+ case 0x77:
+ STORE_TLBCACHE_INFO(L1Code, 16);
+ break; // <-- FIXME: IA-64 Only
+ case 0x79:
+ STORE_TLBCACHE_INFO(L2Unified, 128);
+ break;
+ case 0x7a:
+ STORE_TLBCACHE_INFO(L2Unified, 256);
+ break;
+ case 0x7b:
+ STORE_TLBCACHE_INFO(L2Unified, 512);
+ break;
+ case 0x7c:
+ STORE_TLBCACHE_INFO(L2Unified, 1024);
+ break;
+ case 0x7e:
+ STORE_TLBCACHE_INFO(L2Unified, 256);
+ break;
+ case 0x81:
+ STORE_TLBCACHE_INFO(L2Unified, 128);
+ break;
+ case 0x82:
+ STORE_TLBCACHE_INFO(L2Unified, 256);
+ break;
+ case 0x83:
+ STORE_TLBCACHE_INFO(L2Unified, 512);
+ break;
+ case 0x84:
+ STORE_TLBCACHE_INFO(L2Unified, 1024);
+ break;
+ case 0x85:
+ STORE_TLBCACHE_INFO(L2Unified, 2048);
+ break;
+ case 0x88:
+ STORE_TLBCACHE_INFO(L3Unified, 2048);
+ break; // <-- FIXME: IA-64 Only
+ case 0x89:
+ STORE_TLBCACHE_INFO(L3Unified, 4096);
+ break; // <-- FIXME: IA-64 Only
+ case 0x8a:
+ STORE_TLBCACHE_INFO(L3Unified, 8192);
+ break; // <-- FIXME: IA-64 Only
+ case 0x8d:
+ STORE_TLBCACHE_INFO(L3Unified, 3096);
+ break; // <-- FIXME: IA-64 Only
+ case 0x90:
+ STORE_TLBCACHE_INFO(TLBCode, 262144);
+ break; // <-- FIXME: IA-64 Only
+ case 0x96:
+ STORE_TLBCACHE_INFO(TLBCode, 262144);
+ break; // <-- FIXME: IA-64 Only
+ case 0x9b:
+ STORE_TLBCACHE_INFO(TLBCode, 262144);
+ break; // <-- FIXME: IA-64 Only
+
+ // Default case - an error has occurred.
+ default:
+ return false;
+ }
+ }
+
+ // Increment the TLB pass counter.
+ TLBPassCounter++;
+ } while ((TLBCacheData[0] & 0x000000FF) > TLBPassCounter);
+
+ // Ok - we now have the maximum TLB, L1, L2, and L3 sizes...
+ if ((L1Code == -1) && (L1Data == -1) && (L1Trace == -1)) {
+ this->Features.L1CacheSize = -1;
+ } else if ((L1Code == -1) && (L1Data == -1) && (L1Trace != -1)) {
+ this->Features.L1CacheSize = L1Trace;
+ } else if ((L1Code != -1) && (L1Data == -1)) {
+ this->Features.L1CacheSize = L1Code;
+ } else if ((L1Code == -1) && (L1Data != -1)) {
+ this->Features.L1CacheSize = L1Data;
+ } else if ((L1Code != -1) && (L1Data != -1)) {
+ this->Features.L1CacheSize = L1Code + L1Data;
+ } else {
+ this->Features.L1CacheSize = -1;
+ }
+
+ // Ok - we now have the maximum TLB, L1, L2, and L3 sizes...
+ if (L2Unified == -1) {
+ this->Features.L2CacheSize = -1;
+ } else {
+ this->Features.L2CacheSize = L2Unified;
+ }
+
+ // Ok - we now have the maximum TLB, L1, L2, and L3 sizes...
+ if (L3Unified == -1) {
+ this->Features.L3CacheSize = -1;
+ } else {
+ this->Features.L3CacheSize = L3Unified;
+ }
+
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveCPUClockSpeed()
+{
+ bool retrieved = false;
+
+#if defined(_WIN32)
+ unsigned int uiRepetitions = 1;
+ unsigned int uiMSecPerRepetition = 50;
+ __int64 i64Total = 0;
+ __int64 i64Overhead = 0;
+
+ // Check if the TSC implementation works at all
+ if (this->Features.HasTSC &&
+ GetCyclesDifference(SystemInformationImplementation::Delay,
+ uiMSecPerRepetition) > 0) {
+ for (unsigned int nCounter = 0; nCounter < uiRepetitions; nCounter++) {
+ i64Total += GetCyclesDifference(SystemInformationImplementation::Delay,
+ uiMSecPerRepetition);
+ i64Overhead += GetCyclesDifference(
+ SystemInformationImplementation::DelayOverhead, uiMSecPerRepetition);
+ }
+
+ // Calculate the MHz speed.
+ i64Total -= i64Overhead;
+ i64Total /= uiRepetitions;
+ i64Total /= uiMSecPerRepetition;
+ i64Total /= 1000;
+
+ // Save the CPU speed.
+ this->CPUSpeedInMHz = (float)i64Total;
+
+ retrieved = true;
+ }
+
+ // If RDTSC is not supported, we fallback to trying to read this value
+ // from the registry:
+ if (!retrieved) {
+ HKEY hKey = nullptr;
+ LONG err =
+ RegOpenKeyExW(HKEY_LOCAL_MACHINE,
+ L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", 0,
+ KEY_READ, &hKey);
+
+ if (ERROR_SUCCESS == err) {
+ DWORD dwType = 0;
+ DWORD data = 0;
+ DWORD dwSize = sizeof(DWORD);
+
+ err =
+ RegQueryValueExW(hKey, L"~MHz", 0, &dwType, (LPBYTE)&data, &dwSize);
+
+ if (ERROR_SUCCESS == err) {
+ this->CPUSpeedInMHz = (float)data;
+ retrieved = true;
+ }
+
+ RegCloseKey(hKey);
+ hKey = nullptr;
+ }
+ }
+#endif
+
+ return retrieved;
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveClassicalCPUClockSpeed()
+{
+#if USE_ASM_INSTRUCTIONS
+ LARGE_INTEGER liStart, liEnd, liCountsPerSecond;
+ double dFrequency, dDifference;
+
+ // Attempt to get a starting tick count.
+ QueryPerformanceCounter(&liStart);
+
+ __try {
+ _asm {
+ mov eax, 0x80000000
+ mov ebx, CLASSICAL_CPU_FREQ_LOOP
+ Timer_Loop:
+ bsf ecx,eax
+ dec ebx
+ jnz Timer_Loop
+ }
+ } __except (1) {
+ return false;
+ }
+
+ // Attempt to get a starting tick count.
+ QueryPerformanceCounter(&liEnd);
+
+ // Get the difference... NB: This is in seconds....
+ QueryPerformanceFrequency(&liCountsPerSecond);
+ dDifference = (((double)liEnd.QuadPart - (double)liStart.QuadPart) /
+ (double)liCountsPerSecond.QuadPart);
+
+ // Calculate the clock speed.
+ if (this->ChipID.Family == 3) {
+ // 80386 processors.... Loop time is 115 cycles!
+ dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 115) / dDifference) / 1000000);
+ } else if (this->ChipID.Family == 4) {
+ // 80486 processors.... Loop time is 47 cycles!
+ dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 47) / dDifference) / 1000000);
+ } else if (this->ChipID.Family == 5) {
+ // Pentium processors.... Loop time is 43 cycles!
+ dFrequency = (((CLASSICAL_CPU_FREQ_LOOP * 43) / dDifference) / 1000000);
+ }
+
+ // Save the clock speed.
+ this->Features.CPUSpeed = (int)dFrequency;
+
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveCPUExtendedLevelSupport(
+ int CPULevelToCheck)
+{
+ int cpuinfo[4] = { 0, 0, 0, 0 };
+
+ // The extended CPUID is supported by various vendors starting with the
+ // following CPU models:
+ //
+ // Manufacturer & Chip Name | Family Model Revision
+ //
+ // AMD K6, K6-2 | 5 6 x
+ // Cyrix GXm, Cyrix III "Joshua" | 5 4 x
+ // IDT C6-2 | 5 8 x
+ // VIA Cyrix III | 6 5 x
+ // Transmeta Crusoe | 5 x x
+ // Intel Pentium 4 | f x x
+ //
+
+ // We check to see if a supported processor is present...
+ if (this->ChipManufacturer == AMD) {
+ if (this->ChipID.Family < 5)
+ return false;
+ if ((this->ChipID.Family == 5) && (this->ChipID.Model < 6))
+ return false;
+ } else if (this->ChipManufacturer == Cyrix) {
+ if (this->ChipID.Family < 5)
+ return false;
+ if ((this->ChipID.Family == 5) && (this->ChipID.Model < 4))
+ return false;
+ if ((this->ChipID.Family == 6) && (this->ChipID.Model < 5))
+ return false;
+ } else if (this->ChipManufacturer == IDT) {
+ if (this->ChipID.Family < 5)
+ return false;
+ if ((this->ChipID.Family == 5) && (this->ChipID.Model < 8))
+ return false;
+ } else if (this->ChipManufacturer == Transmeta) {
+ if (this->ChipID.Family < 5)
+ return false;
+ } else if (this->ChipManufacturer == Intel) {
+ if (this->ChipID.Family < 0xf) {
+ return false;
+ }
+ }
+
+#if USE_CPUID
+ if (!call_cpuid(0x80000000, cpuinfo)) {
+ return false;
+ }
+#endif
+
+ // Now we have to check the level wanted vs level returned...
+ int nLevelWanted = (CPULevelToCheck & 0x7FFFFFFF);
+ int nLevelReturn = (cpuinfo[0] & 0x7FFFFFFF);
+
+ // Check to see if the level provided is supported...
+ if (nLevelWanted > nLevelReturn) {
+ return false;
+ }
+
+ return true;
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveExtendedCPUFeatures()
+{
+
+ // Check that we are not using an Intel processor as it does not support
+ // this.
+ if (this->ChipManufacturer == Intel) {
+ return false;
+ }
+
+ // Check to see if what we are about to do is supported...
+ if (!RetrieveCPUExtendedLevelSupport(static_cast<int>(0x80000001))) {
+ return false;
+ }
+
+#if USE_CPUID
+ int localCPUExtendedFeatures[4] = { 0, 0, 0, 0 };
+
+ if (!call_cpuid(0x80000001, localCPUExtendedFeatures)) {
+ return false;
+ }
+
+ // Retrieve the extended features of CPU present.
+ this->Features.ExtendedFeatures.Has3DNow =
+ ((localCPUExtendedFeatures[3] & 0x80000000) !=
+ 0); // 3DNow Present --> Bit 31.
+ this->Features.ExtendedFeatures.Has3DNowPlus =
+ ((localCPUExtendedFeatures[3] & 0x40000000) !=
+ 0); // 3DNow+ Present -- > Bit 30.
+ this->Features.ExtendedFeatures.HasSSEMMX =
+ ((localCPUExtendedFeatures[3] & 0x00400000) !=
+ 0); // SSE MMX Present --> Bit 22.
+ this->Features.ExtendedFeatures.SupportsMP =
+ ((localCPUExtendedFeatures[3] & 0x00080000) !=
+ 0); // MP Capable -- > Bit 19.
+
+ // Retrieve AMD specific extended features.
+ if (this->ChipManufacturer == AMD || this->ChipManufacturer == Hygon) {
+ this->Features.ExtendedFeatures.HasMMXPlus =
+ ((localCPUExtendedFeatures[3] & 0x00400000) !=
+ 0); // AMD specific: MMX-SSE --> Bit 22
+ }
+
+ // Retrieve Cyrix specific extended features.
+ if (this->ChipManufacturer == Cyrix) {
+ this->Features.ExtendedFeatures.HasMMXPlus =
+ ((localCPUExtendedFeatures[3] & 0x01000000) !=
+ 0); // Cyrix specific: Extended MMX --> Bit 24
+ }
+
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveProcessorSerialNumber()
+{
+ // Check to see if the processor supports the processor serial number.
+ if (!this->Features.HasSerial) {
+ return false;
+ }
+
+#if USE_CPUID
+ int SerialNumber[4];
+
+ if (!call_cpuid(3, SerialNumber)) {
+ return false;
+ }
+
+ // Process the returned information.
+ // ; eax = 3 --> ebx: top 32 bits are the processor signature bits --> NB:
+ // Transmeta only ?!?
+ // ; ecx: middle 32 bits are the processor signature bits
+ // ; edx: bottom 32 bits are the processor signature bits
+ char sn[128];
+ sprintf(sn, "%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x-%.2x%.2x",
+ ((SerialNumber[1] & 0xff000000) >> 24),
+ ((SerialNumber[1] & 0x00ff0000) >> 16),
+ ((SerialNumber[1] & 0x0000ff00) >> 8),
+ ((SerialNumber[1] & 0x000000ff) >> 0),
+ ((SerialNumber[2] & 0xff000000) >> 24),
+ ((SerialNumber[2] & 0x00ff0000) >> 16),
+ ((SerialNumber[2] & 0x0000ff00) >> 8),
+ ((SerialNumber[2] & 0x000000ff) >> 0),
+ ((SerialNumber[3] & 0xff000000) >> 24),
+ ((SerialNumber[3] & 0x00ff0000) >> 16),
+ ((SerialNumber[3] & 0x0000ff00) >> 8),
+ ((SerialNumber[3] & 0x000000ff) >> 0));
+ this->ChipID.SerialNumber = sn;
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveCPUPowerManagement()
+{
+ // Check to see if what we are about to do is supported...
+ if (!RetrieveCPUExtendedLevelSupport(static_cast<int>(0x80000007))) {
+ this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID = false;
+ this->Features.ExtendedFeatures.PowerManagement.HasVoltageID = false;
+ this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode = false;
+ return false;
+ }
+
+#if USE_CPUID
+ int localCPUPowerManagement[4] = { 0, 0, 0, 0 };
+
+ if (!call_cpuid(0x80000007, localCPUPowerManagement)) {
+ return false;
+ }
+
+ // Check for the power management capabilities of the CPU.
+ this->Features.ExtendedFeatures.PowerManagement.HasTempSenseDiode =
+ ((localCPUPowerManagement[3] & 0x00000001) != 0);
+ this->Features.ExtendedFeatures.PowerManagement.HasFrequencyID =
+ ((localCPUPowerManagement[3] & 0x00000002) != 0);
+ this->Features.ExtendedFeatures.PowerManagement.HasVoltageID =
+ ((localCPUPowerManagement[3] & 0x00000004) != 0);
+
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+#if USE_CPUID
+// Used only in USE_CPUID implementation below.
+static void SystemInformationStripLeadingSpace(std::string& str)
+{
+ // Because some manufacturers have leading white space - we have to
+ // post-process the name.
+ std::string::size_type pos = str.find_first_not_of(" ");
+ if (pos != std::string::npos) {
+ str = str.substr(pos);
+ }
+}
+#endif
+
+/** */
+bool SystemInformationImplementation::RetrieveExtendedCPUIdentity()
+{
+ // Check to see if what we are about to do is supported...
+ if (!RetrieveCPUExtendedLevelSupport(static_cast<int>(0x80000002)))
+ return false;
+ if (!RetrieveCPUExtendedLevelSupport(static_cast<int>(0x80000003)))
+ return false;
+ if (!RetrieveCPUExtendedLevelSupport(static_cast<int>(0x80000004)))
+ return false;
+
+#if USE_CPUID
+ int CPUExtendedIdentity[12];
+
+ if (!call_cpuid(0x80000002, CPUExtendedIdentity)) {
+ return false;
+ }
+ if (!call_cpuid(0x80000003, CPUExtendedIdentity + 4)) {
+ return false;
+ }
+ if (!call_cpuid(0x80000004, CPUExtendedIdentity + 8)) {
+ return false;
+ }
+
+ // Process the returned information.
+ char nbuf[49];
+ memcpy(&(nbuf[0]), &(CPUExtendedIdentity[0]), sizeof(int));
+ memcpy(&(nbuf[4]), &(CPUExtendedIdentity[1]), sizeof(int));
+ memcpy(&(nbuf[8]), &(CPUExtendedIdentity[2]), sizeof(int));
+ memcpy(&(nbuf[12]), &(CPUExtendedIdentity[3]), sizeof(int));
+ memcpy(&(nbuf[16]), &(CPUExtendedIdentity[4]), sizeof(int));
+ memcpy(&(nbuf[20]), &(CPUExtendedIdentity[5]), sizeof(int));
+ memcpy(&(nbuf[24]), &(CPUExtendedIdentity[6]), sizeof(int));
+ memcpy(&(nbuf[28]), &(CPUExtendedIdentity[7]), sizeof(int));
+ memcpy(&(nbuf[32]), &(CPUExtendedIdentity[8]), sizeof(int));
+ memcpy(&(nbuf[36]), &(CPUExtendedIdentity[9]), sizeof(int));
+ memcpy(&(nbuf[40]), &(CPUExtendedIdentity[10]), sizeof(int));
+ memcpy(&(nbuf[44]), &(CPUExtendedIdentity[11]), sizeof(int));
+ nbuf[48] = '\0';
+ this->ChipID.ProcessorName = nbuf;
+ this->ChipID.ModelName = nbuf;
+
+ // Because some manufacturers have leading white space - we have to
+ // post-process the name.
+ SystemInformationStripLeadingSpace(this->ChipID.ProcessorName);
+ return true;
+#else
+ return false;
+#endif
+}
+
+/** */
+bool SystemInformationImplementation::RetrieveClassicalCPUIdentity()
+{
+ // Start by decided which manufacturer we are using....
+ switch (this->ChipManufacturer) {
+ case Intel:
+ // Check the family / model / revision to determine the CPU ID.
+ switch (this->ChipID.Family) {
+ case 3:
+ this->ChipID.ProcessorName = "Newer i80386 family";
+ break;
+ case 4:
+ switch (this->ChipID.Model) {
+ case 0:
+ this->ChipID.ProcessorName = "i80486DX-25/33";
+ break;
+ case 1:
+ this->ChipID.ProcessorName = "i80486DX-50";
+ break;
+ case 2:
+ this->ChipID.ProcessorName = "i80486SX";
+ break;
+ case 3:
+ this->ChipID.ProcessorName = "i80486DX2";
+ break;
+ case 4:
+ this->ChipID.ProcessorName = "i80486SL";
+ break;
+ case 5:
+ this->ChipID.ProcessorName = "i80486SX2";
+ break;
+ case 7:
+ this->ChipID.ProcessorName = "i80486DX2 WriteBack";
+ break;
+ case 8:
+ this->ChipID.ProcessorName = "i80486DX4";
+ break;
+ case 9:
+ this->ChipID.ProcessorName = "i80486DX4 WriteBack";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown 80486 family";
+ return false;
+ }
+ break;
+ case 5:
+ switch (this->ChipID.Model) {
+ case 0:
+ this->ChipID.ProcessorName = "P5 A-Step";
+ break;
+ case 1:
+ this->ChipID.ProcessorName = "P5";
+ break;
+ case 2:
+ this->ChipID.ProcessorName = "P54C";
+ break;
+ case 3:
+ this->ChipID.ProcessorName = "P24T OverDrive";
+ break;
+ case 4:
+ this->ChipID.ProcessorName = "P55C";
+ break;
+ case 7:
+ this->ChipID.ProcessorName = "P54C";
+ break;
+ case 8:
+ this->ChipID.ProcessorName = "P55C (0.25micron)";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Pentium family";
+ return false;
+ }
+ break;
+ case 6:
+ switch (this->ChipID.Model) {
+ case 0:
+ this->ChipID.ProcessorName = "P6 A-Step";
+ break;
+ case 1:
+ this->ChipID.ProcessorName = "P6";
+ break;
+ case 3:
+ this->ChipID.ProcessorName = "Pentium II (0.28 micron)";
+ break;
+ case 5:
+ this->ChipID.ProcessorName = "Pentium II (0.25 micron)";
+ break;
+ case 6:
+ this->ChipID.ProcessorName = "Pentium II With On-Die L2 Cache";
+ break;
+ case 7:
+ this->ChipID.ProcessorName = "Pentium III (0.25 micron)";
+ break;
+ case 8:
+ this->ChipID.ProcessorName =
+ "Pentium III (0.18 micron) With 256 KB On-Die L2 Cache ";
+ break;
+ case 0xa:
+ this->ChipID.ProcessorName =
+ "Pentium III (0.18 micron) With 1 Or 2 MB On-Die L2 Cache ";
+ break;
+ case 0xb:
+ this->ChipID.ProcessorName = "Pentium III (0.13 micron) With "
+ "256 Or 512 KB On-Die L2 Cache ";
+ break;
+ case 23:
+ this->ChipID.ProcessorName =
+ "Intel(R) Core(TM)2 Duo CPU T9500 @ 2.60GHz";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown P6 family";
+ return false;
+ }
+ break;
+ case 7:
+ this->ChipID.ProcessorName = "Intel Merced (IA-64)";
+ break;
+ case 0xf:
+ // Check the extended family bits...
+ switch (this->ChipID.ExtendedFamily) {
+ case 0:
+ switch (this->ChipID.Model) {
+ case 0:
+ this->ChipID.ProcessorName = "Pentium IV (0.18 micron)";
+ break;
+ case 1:
+ this->ChipID.ProcessorName = "Pentium IV (0.18 micron)";
+ break;
+ case 2:
+ this->ChipID.ProcessorName = "Pentium IV (0.13 micron)";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Pentium 4 family";
+ return false;
+ }
+ break;
+ case 1:
+ this->ChipID.ProcessorName = "Intel McKinley (IA-64)";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Pentium";
+ }
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Intel family";
+ return false;
+ }
+ break;
+
+ case AMD:
+ // Check the family / model / revision to determine the CPU ID.
+ switch (this->ChipID.Family) {
+ case 4:
+ switch (this->ChipID.Model) {
+ case 3:
+ this->ChipID.ProcessorName = "80486DX2";
+ break;
+ case 7:
+ this->ChipID.ProcessorName = "80486DX2 WriteBack";
+ break;
+ case 8:
+ this->ChipID.ProcessorName = "80486DX4";
+ break;
+ case 9:
+ this->ChipID.ProcessorName = "80486DX4 WriteBack";
+ break;
+ case 0xe:
+ this->ChipID.ProcessorName = "5x86";
+ break;
+ case 0xf:
+ this->ChipID.ProcessorName = "5x86WB";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown 80486 family";
+ return false;
+ }
+ break;
+ case 5:
+ switch (this->ChipID.Model) {
+ case 0:
+ this->ChipID.ProcessorName = "SSA5 (PR75, PR90 = PR100)";
+ break;
+ case 1:
+ this->ChipID.ProcessorName = "5k86 (PR120 = PR133)";
+ break;
+ case 2:
+ this->ChipID.ProcessorName = "5k86 (PR166)";
+ break;
+ case 3:
+ this->ChipID.ProcessorName = "5k86 (PR200)";
+ break;
+ case 6:
+ this->ChipID.ProcessorName = "K6 (0.30 micron)";
+ break;
+ case 7:
+ this->ChipID.ProcessorName = "K6 (0.25 micron)";
+ break;
+ case 8:
+ this->ChipID.ProcessorName = "K6-2";
+ break;
+ case 9:
+ this->ChipID.ProcessorName = "K6-III";
+ break;
+ case 0xd:
+ this->ChipID.ProcessorName = "K6-2+ or K6-III+ (0.18 micron)";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown 80586 family";
+ return false;
+ }
+ break;
+ case 6:
+ switch (this->ChipID.Model) {
+ case 1:
+ this->ChipID.ProcessorName = "Athlon- (0.25 micron)";
+ break;
+ case 2:
+ this->ChipID.ProcessorName = "Athlon- (0.18 micron)";
+ break;
+ case 3:
+ this->ChipID.ProcessorName = "Duron- (SF core)";
+ break;
+ case 4:
+ this->ChipID.ProcessorName = "Athlon- (Thunderbird core)";
+ break;
+ case 6:
+ this->ChipID.ProcessorName = "Athlon- (Palomino core)";
+ break;
+ case 7:
+ this->ChipID.ProcessorName = "Duron- (Morgan core)";
+ break;
+ case 8:
+ if (this->Features.ExtendedFeatures.SupportsMP)
+ this->ChipID.ProcessorName = "Athlon - MP (Thoroughbred core)";
+ else
+ this->ChipID.ProcessorName = "Athlon - XP (Thoroughbred core)";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown K7 family";
+ return false;
+ }
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown AMD family";
+ return false;
+ }
+ break;
+
+ case Hygon:
+ this->ChipID.ProcessorName = "Unknown Hygon family";
+ return false;
+
+ case Transmeta:
+ switch (this->ChipID.Family) {
+ case 5:
+ switch (this->ChipID.Model) {
+ case 4:
+ this->ChipID.ProcessorName = "Crusoe TM3x00 and TM5x00";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Crusoe family";
+ return false;
+ }
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Transmeta family";
+ return false;
+ }
+ break;
+
+ case Rise:
+ switch (this->ChipID.Family) {
+ case 5:
+ switch (this->ChipID.Model) {
+ case 0:
+ this->ChipID.ProcessorName = "mP6 (0.25 micron)";
+ break;
+ case 2:
+ this->ChipID.ProcessorName = "mP6 (0.18 micron)";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Rise family";
+ return false;
+ }
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Rise family";
+ return false;
+ }
+ break;
+
+ case UMC:
+ switch (this->ChipID.Family) {
+ case 4:
+ switch (this->ChipID.Model) {
+ case 1:
+ this->ChipID.ProcessorName = "U5D";
+ break;
+ case 2:
+ this->ChipID.ProcessorName = "U5S";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown UMC family";
+ return false;
+ }
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown UMC family";
+ return false;
+ }
+ break;
+
+ case IDT:
+ switch (this->ChipID.Family) {
+ case 5:
+ switch (this->ChipID.Model) {
+ case 4:
+ this->ChipID.ProcessorName = "C6";
+ break;
+ case 8:
+ this->ChipID.ProcessorName = "C2";
+ break;
+ case 9:
+ this->ChipID.ProcessorName = "C3";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown IDT\\Centaur family";
+ return false;
+ }
+ break;
+ case 6:
+ switch (this->ChipID.Model) {
+ case 6:
+ this->ChipID.ProcessorName = "VIA Cyrix III - Samuel";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown IDT\\Centaur family";
+ return false;
+ }
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown IDT\\Centaur family";
+ return false;
+ }
+ break;
+
+ case Cyrix:
+ switch (this->ChipID.Family) {
+ case 4:
+ switch (this->ChipID.Model) {
+ case 4:
+ this->ChipID.ProcessorName = "MediaGX GX = GXm";
+ break;
+ case 9:
+ this->ChipID.ProcessorName = "5x86";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Cx5x86 family";
+ return false;
+ }
+ break;
+ case 5:
+ switch (this->ChipID.Model) {
+ case 2:
+ this->ChipID.ProcessorName = "Cx6x86";
+ break;
+ case 4:
+ this->ChipID.ProcessorName = "MediaGX GXm";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Cx6x86 family";
+ return false;
+ }
+ break;
+ case 6:
+ switch (this->ChipID.Model) {
+ case 0:
+ this->ChipID.ProcessorName = "6x86MX";
+ break;
+ case 5:
+ this->ChipID.ProcessorName = "Cyrix M2 Core";
+ break;
+ case 6:
+ this->ChipID.ProcessorName = "WinChip C5A Core";
+ break;
+ case 7:
+ this->ChipID.ProcessorName = "WinChip C5B\\C5C Core";
+ break;
+ case 8:
+ this->ChipID.ProcessorName = "WinChip C5C-T Core";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown 6x86MX\\Cyrix III family";
+ return false;
+ }
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown Cyrix family";
+ return false;
+ }
+ break;
+
+ case NexGen:
+ switch (this->ChipID.Family) {
+ case 5:
+ switch (this->ChipID.Model) {
+ case 0:
+ this->ChipID.ProcessorName = "Nx586 or Nx586FPU";
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown NexGen family";
+ return false;
+ }
+ break;
+ default:
+ this->ChipID.ProcessorName = "Unknown NexGen family";
+ return false;
+ }
+ break;
+
+ case NSC:
+ this->ChipID.ProcessorName = "Cx486SLC \\ DLC \\ Cx486S A-Step";
+ break;
+
+ case Sun:
+ case IBM:
+ case Motorola:
+ case HP:
+ case UnknownManufacturer:
+ default:
+ this->ChipID.ProcessorName =
+ "Unknown family"; // We cannot identify the processor.
+ return false;
+ }
+
+ return true;
+}
+
+/** Extract a value from the CPUInfo file */
+std::string SystemInformationImplementation::ExtractValueFromCpuInfoFile(
+ std::string buffer, const char* word, size_t init)
+{
+ size_t pos = buffer.find(word, init);
+ if (pos != std::string::npos) {
+ this->CurrentPositionInFile = pos;
+ pos = buffer.find(":", pos);
+ size_t pos2 = buffer.find("\n", pos);
+ if (pos != std::string::npos && pos2 != std::string::npos) {
+ // It may happen that the beginning matches, but this is still not the
+ // requested key.
+ // An example is looking for "cpu" when "cpu family" comes first. So we
+ // check that
+ // we have only spaces from here to pos, otherwise we search again.
+ for (size_t i = this->CurrentPositionInFile + strlen(word); i < pos;
+ ++i) {
+ if (buffer[i] != ' ' && buffer[i] != '\t') {
+ return this->ExtractValueFromCpuInfoFile(buffer, word, pos2);
+ }
+ }
+ return buffer.substr(pos + 2, pos2 - pos - 2);
+ }
+ }
+ this->CurrentPositionInFile = std::string::npos;
+ return "";
+}
+
+/** Query for the cpu status */
+bool SystemInformationImplementation::RetreiveInformationFromCpuInfoFile()
+{
+ this->NumberOfLogicalCPU = 0;
+ this->NumberOfPhysicalCPU = 0;
+ std::string buffer;
+
+ FILE* fd = fopen("/proc/cpuinfo", "r");
+ if (!fd) {
+ std::cout << "Problem opening /proc/cpuinfo" << std::endl;
+ return false;
+ }
+
+ size_t fileSize = 0;
+ while (!feof(fd)) {
+ buffer += static_cast<char>(fgetc(fd));
+ fileSize++;
+ }
+ fclose(fd);
+ buffer.resize(fileSize - 2);
+ // Number of logical CPUs (combination of multiple processors, multi-core
+ // and SMT)
+ size_t pos = buffer.find("processor\t");
+ while (pos != std::string::npos) {
+ this->NumberOfLogicalCPU++;
+ pos = buffer.find("processor\t", pos + 1);
+ }
+
+#ifdef __linux
+ // Count sockets.
+ std::set<int> PhysicalIDs;
+ std::string idc = this->ExtractValueFromCpuInfoFile(buffer, "physical id");
+ while (this->CurrentPositionInFile != std::string::npos) {
+ int id = atoi(idc.c_str());
+ PhysicalIDs.insert(id);
+ idc = this->ExtractValueFromCpuInfoFile(buffer, "physical id",
+ this->CurrentPositionInFile + 1);
+ }
+ uint64_t NumberOfSockets = PhysicalIDs.size();
+ NumberOfSockets = std::max(NumberOfSockets, (uint64_t)1);
+ // Physical ids returned by Linux don't distinguish cores.
+ // We want to record the total number of cores in this->NumberOfPhysicalCPU
+ // (checking only the first proc)
+ std::string Cores = this->ExtractValueFromCpuInfoFile(buffer, "cpu cores");
+ unsigned int NumberOfCoresPerSocket = (unsigned int)atoi(Cores.c_str());
+ NumberOfCoresPerSocket = std::max(NumberOfCoresPerSocket, 1u);
+ this->NumberOfPhysicalCPU =
+ NumberOfCoresPerSocket * (unsigned int)NumberOfSockets;
+
+#else // __CYGWIN__
+ // does not have "physical id" entries, neither "cpu cores"
+ // this has to be fixed for hyper-threading.
+ std::string cpucount =
+ this->ExtractValueFromCpuInfoFile(buffer, "cpu count");
+ this->NumberOfPhysicalCPU = this->NumberOfLogicalCPU =
+ atoi(cpucount.c_str());
+#endif
+ // gotta have one, and if this is 0 then we get a / by 0n
+ // better to have a bad answer than a crash
+ if (this->NumberOfPhysicalCPU <= 0) {
+ this->NumberOfPhysicalCPU = 1;
+ }
+ // LogicalProcessorsPerPhysical>1 => SMT.
+ this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical =
+ this->NumberOfLogicalCPU / this->NumberOfPhysicalCPU;
+
+ // CPU speed (checking only the first processor)
+ std::string CPUSpeed = this->ExtractValueFromCpuInfoFile(buffer, "cpu MHz");
+ if (!CPUSpeed.empty()) {
+ this->CPUSpeedInMHz = static_cast<float>(atof(CPUSpeed.c_str()));
+ }
+#ifdef __linux
+ else {
+ // Linux Sparc: CPU speed is in Hz and encoded in hexadecimal
+ CPUSpeed = this->ExtractValueFromCpuInfoFile(buffer, "Cpu0ClkTck");
+ this->CPUSpeedInMHz =
+ static_cast<float>(strtoull(CPUSpeed.c_str(), 0, 16)) / 1000000.0f;
+ }
+#endif
+
+ // Chip family
+ std::string familyStr =
+ this->ExtractValueFromCpuInfoFile(buffer, "cpu family");
+ if (familyStr.empty()) {
+ familyStr = this->ExtractValueFromCpuInfoFile(buffer, "CPU architecture");
+ }
+ this->ChipID.Family = atoi(familyStr.c_str());
+
+ // Chip Vendor
+ this->ChipID.Vendor = this->ExtractValueFromCpuInfoFile(buffer, "vendor_id");
+ this->FindManufacturer(familyStr);
+
+ // second try for setting family
+ if (this->ChipID.Family == 0 && this->ChipManufacturer == HP) {
+ if (familyStr == "PA-RISC 1.1a")
+ this->ChipID.Family = 0x11a;
+ else if (familyStr == "PA-RISC 2.0")
+ this->ChipID.Family = 0x200;
+ // If you really get CMake to work on a machine not belonging to
+ // any of those families I owe you a dinner if you get it to
+ // contribute nightly builds regularly.
+ }
+
+ // Chip Model
+ this->ChipID.Model =
+ atoi(this->ExtractValueFromCpuInfoFile(buffer, "model").c_str());
+ if (!this->RetrieveClassicalCPUIdentity()) {
+ // Some platforms (e.g. PA-RISC) tell us their CPU name here.
+ // Note: x86 does not.
+ std::string cpuname = this->ExtractValueFromCpuInfoFile(buffer, "cpu");
+ if (!cpuname.empty()) {
+ this->ChipID.ProcessorName = cpuname;
+ }
+ }
+
+ // Chip revision
+ std::string cpurev = this->ExtractValueFromCpuInfoFile(buffer, "stepping");
+ if (cpurev.empty()) {
+ cpurev = this->ExtractValueFromCpuInfoFile(buffer, "CPU revision");
+ }
+ this->ChipID.Revision = atoi(cpurev.c_str());
+
+ // Chip Model Name
+ this->ChipID.ModelName =
+ this->ExtractValueFromCpuInfoFile(buffer, "model name");
+
+ // L1 Cache size
+ // Different architectures may show different names for the caches.
+ // Sum up everything we find.
+ std::vector<const char*> cachename;
+ cachename.clear();
+
+ cachename.push_back("cache size"); // e.g. x86
+ cachename.push_back("I-cache"); // e.g. PA-RISC
+ cachename.push_back("D-cache"); // e.g. PA-RISC
+
+ this->Features.L1CacheSize = 0;
+ for (size_t index = 0; index < cachename.size(); index++) {
+ std::string cacheSize =
+ this->ExtractValueFromCpuInfoFile(buffer, cachename[index]);
+ if (!cacheSize.empty()) {
+ pos = cacheSize.find(" KB");
+ if (pos != std::string::npos) {
+ cacheSize = cacheSize.substr(0, pos);
+ }
+ this->Features.L1CacheSize += atoi(cacheSize.c_str());
+ }
+ }
+
+ // processor feature flags (probably x86 specific)
+ std::string cpuflags = this->ExtractValueFromCpuInfoFile(buffer, "flags");
+ if (!cpurev.empty()) {
+ // now we can match every flags as space + flag + space
+ cpuflags = " " + cpuflags + " ";
+ if ((cpuflags.find(" fpu ") != std::string::npos)) {
+ this->Features.HasFPU = true;
+ }
+ if ((cpuflags.find(" tsc ") != std::string::npos)) {
+ this->Features.HasTSC = true;
+ }
+ if ((cpuflags.find(" mmx ") != std::string::npos)) {
+ this->Features.HasMMX = true;
+ }
+ if ((cpuflags.find(" sse ") != std::string::npos)) {
+ this->Features.HasSSE = true;
+ }
+ if ((cpuflags.find(" sse2 ") != std::string::npos)) {
+ this->Features.HasSSE2 = true;
+ }
+ if ((cpuflags.find(" apic ") != std::string::npos)) {
+ this->Features.HasAPIC = true;
+ }
+ if ((cpuflags.find(" cmov ") != std::string::npos)) {
+ this->Features.HasCMOV = true;
+ }
+ if ((cpuflags.find(" mtrr ") != std::string::npos)) {
+ this->Features.HasMTRR = true;
+ }
+ if ((cpuflags.find(" acpi ") != std::string::npos)) {
+ this->Features.HasACPI = true;
+ }
+ if ((cpuflags.find(" 3dnow ") != std::string::npos)) {
+ this->Features.ExtendedFeatures.Has3DNow = true;
+ }
+ }
+
+ return true;
+}
+
+bool SystemInformationImplementation::QueryProcessorBySysconf()
+{
+#if defined(_SC_NPROC_ONLN) && !defined(_SC_NPROCESSORS_ONLN)
+// IRIX names this slightly different
+# define _SC_NPROCESSORS_ONLN _SC_NPROC_ONLN
+#endif
+
+#ifdef _SC_NPROCESSORS_ONLN
+ long c = sysconf(_SC_NPROCESSORS_ONLN);
+ if (c <= 0) {
+ return false;
+ }
+
+ this->NumberOfPhysicalCPU = static_cast<unsigned int>(c);
+ this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryProcessor()
+{
+ return this->QueryProcessorBySysconf();
+}
+
+/**
+Get total system RAM in units of KiB.
+*/
+SystemInformation::LongLong
+SystemInformationImplementation::GetHostMemoryTotal()
+{
+#if defined(_WIN32)
+# if defined(_MSC_VER) && _MSC_VER < 1300
+ MEMORYSTATUS stat;
+ stat.dwLength = sizeof(stat);
+ GlobalMemoryStatus(&stat);
+ return stat.dwTotalPhys / 1024;
+# else
+ MEMORYSTATUSEX statex;
+ statex.dwLength = sizeof(statex);
+ GlobalMemoryStatusEx(&statex);
+ return statex.ullTotalPhys / 1024;
+# endif
+#elif defined(__linux)
+ SystemInformation::LongLong memTotal = 0;
+ int ierr = GetFieldFromFile("/proc/meminfo", "MemTotal:", memTotal);
+ if (ierr) {
+ return -1;
+ }
+ return memTotal;
+#elif defined(__APPLE__)
+ uint64_t mem;
+ size_t len = sizeof(mem);
+ int ierr = sysctlbyname("hw.memsize", &mem, &len, nullptr, 0);
+ if (ierr) {
+ return -1;
+ }
+ return mem / 1024;
+#else
+ return 0;
+#endif
+}
+
+/**
+Get total system RAM in units of KiB. This may differ from the
+host total if a host-wide resource limit is applied.
+*/
+SystemInformation::LongLong
+SystemInformationImplementation::GetHostMemoryAvailable(
+ const char* hostLimitEnvVarName)
+{
+ SystemInformation::LongLong memTotal = this->GetHostMemoryTotal();
+
+ // the following mechanism is provided for systems that
+ // apply resource limits across groups of processes.
+ // this is of use on certain SMP systems (eg. SGI UV)
+ // where the host has a large amount of ram but a given user's
+ // access to it is severely restricted. The system will
+ // apply a limit across a set of processes. Units are in KiB.
+ if (hostLimitEnvVarName) {
+ const char* hostLimitEnvVarValue = getenv(hostLimitEnvVarName);
+ if (hostLimitEnvVarValue) {
+ SystemInformation::LongLong hostLimit =
+ atoLongLong(hostLimitEnvVarValue);
+ if (hostLimit > 0) {
+ memTotal = min(hostLimit, memTotal);
+ }
+ }
+ }
+
+ return memTotal;
+}
+
+/**
+Get total system RAM in units of KiB. This may differ from the
+host total if a per-process resource limit is applied.
+*/
+SystemInformation::LongLong
+SystemInformationImplementation::GetProcMemoryAvailable(
+ const char* hostLimitEnvVarName, const char* procLimitEnvVarName)
+{
+ SystemInformation::LongLong memAvail =
+ this->GetHostMemoryAvailable(hostLimitEnvVarName);
+
+ // the following mechanism is provide for systems where rlimits
+ // are not employed. Units are in KiB.
+ if (procLimitEnvVarName) {
+ const char* procLimitEnvVarValue = getenv(procLimitEnvVarName);
+ if (procLimitEnvVarValue) {
+ SystemInformation::LongLong procLimit =
+ atoLongLong(procLimitEnvVarValue);
+ if (procLimit > 0) {
+ memAvail = min(procLimit, memAvail);
+ }
+ }
+ }
+
+#if defined(__linux)
+ int ierr;
+ ResourceLimitType rlim;
+ ierr = GetResourceLimit(RLIMIT_DATA, &rlim);
+ if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) {
+ memAvail =
+ min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail);
+ }
+
+ ierr = GetResourceLimit(RLIMIT_AS, &rlim);
+ if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) {
+ memAvail =
+ min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail);
+ }
+#elif defined(__APPLE__)
+ struct rlimit rlim;
+ int ierr;
+ ierr = getrlimit(RLIMIT_DATA, &rlim);
+ if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) {
+ memAvail =
+ min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail);
+ }
+
+ ierr = getrlimit(RLIMIT_RSS, &rlim);
+ if ((ierr == 0) && (rlim.rlim_cur != RLIM_INFINITY)) {
+ memAvail =
+ min((SystemInformation::LongLong)rlim.rlim_cur / 1024, memAvail);
+ }
+#endif
+
+ return memAvail;
+}
+
+/**
+Get RAM used by all processes in the host, in units of KiB.
+*/
+SystemInformation::LongLong
+SystemInformationImplementation::GetHostMemoryUsed()
+{
+#if defined(_WIN32)
+# if defined(_MSC_VER) && _MSC_VER < 1300
+ MEMORYSTATUS stat;
+ stat.dwLength = sizeof(stat);
+ GlobalMemoryStatus(&stat);
+ return (stat.dwTotalPhys - stat.dwAvailPhys) / 1024;
+# else
+ MEMORYSTATUSEX statex;
+ statex.dwLength = sizeof(statex);
+ GlobalMemoryStatusEx(&statex);
+ return (statex.ullTotalPhys - statex.ullAvailPhys) / 1024;
+# endif
+#elif defined(__linux)
+ // First try to use MemAvailable, but it only works on newer kernels
+ const char* names2[3] = { "MemTotal:", "MemAvailable:", nullptr };
+ SystemInformation::LongLong values2[2] = { SystemInformation::LongLong(0) };
+ int ierr = GetFieldsFromFile("/proc/meminfo", names2, values2);
+ if (ierr) {
+ const char* names4[5] = { "MemTotal:", "MemFree:", "Buffers:", "Cached:",
+ nullptr };
+ SystemInformation::LongLong values4[4] = { SystemInformation::LongLong(
+ 0) };
+ ierr = GetFieldsFromFile("/proc/meminfo", names4, values4);
+ if (ierr) {
+ return ierr;
+ }
+ SystemInformation::LongLong& memTotal = values4[0];
+ SystemInformation::LongLong& memFree = values4[1];
+ SystemInformation::LongLong& memBuffers = values4[2];
+ SystemInformation::LongLong& memCached = values4[3];
+ return memTotal - memFree - memBuffers - memCached;
+ }
+ SystemInformation::LongLong& memTotal = values2[0];
+ SystemInformation::LongLong& memAvail = values2[1];
+ return memTotal - memAvail;
+#elif defined(__APPLE__)
+ SystemInformation::LongLong psz = getpagesize();
+ if (psz < 1) {
+ return -1;
+ }
+ const char* names[3] = { "Pages wired down:", "Pages active:", nullptr };
+ SystemInformation::LongLong values[2] = { SystemInformation::LongLong(0) };
+ int ierr = GetFieldsFromCommand("vm_stat", names, values);
+ if (ierr) {
+ return -1;
+ }
+ SystemInformation::LongLong& vmWired = values[0];
+ SystemInformation::LongLong& vmActive = values[1];
+ return ((vmActive + vmWired) * psz) / 1024;
+#else
+ return 0;
+#endif
+}
+
+/**
+Get system RAM used by the process associated with the given
+process id in units of KiB.
+*/
+SystemInformation::LongLong
+SystemInformationImplementation::GetProcMemoryUsed()
+{
+#if defined(_WIN32) && defined(KWSYS_SYS_HAS_PSAPI)
+ long pid = GetCurrentProcessId();
+ HANDLE hProc;
+ hProc = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, false, pid);
+ if (hProc == 0) {
+ return -1;
+ }
+ PROCESS_MEMORY_COUNTERS pmc;
+ int ok = GetProcessMemoryInfo(hProc, &pmc, sizeof(pmc));
+ CloseHandle(hProc);
+ if (!ok) {
+ return -2;
+ }
+ return pmc.WorkingSetSize / 1024;
+#elif defined(__linux)
+ SystemInformation::LongLong memUsed = 0;
+ int ierr = GetFieldFromFile("/proc/self/status", "VmRSS:", memUsed);
+ if (ierr) {
+ return -1;
+ }
+ return memUsed;
+#elif defined(__APPLE__)
+ SystemInformation::LongLong memUsed = 0;
+ pid_t pid = getpid();
+ std::ostringstream oss;
+ oss << "ps -o rss= -p " << pid;
+ FILE* file = popen(oss.str().c_str(), "r");
+ if (file == nullptr) {
+ return -1;
+ }
+ oss.str("");
+ while (!feof(file) && !ferror(file)) {
+ char buf[256] = { '\0' };
+ errno = 0;
+ size_t nRead = fread(buf, 1, 256, file);
+ if (ferror(file) && (errno == EINTR)) {
+ clearerr(file);
+ }
+ if (nRead)
+ oss << buf;
+ }
+ int ierr = ferror(file);
+ pclose(file);
+ if (ierr) {
+ return -2;
+ }
+ std::istringstream iss(oss.str());
+ iss >> memUsed;
+ return memUsed;
+#else
+ return 0;
+#endif
+}
+
+double SystemInformationImplementation::GetLoadAverage()
+{
+#if defined(KWSYS_CXX_HAS_GETLOADAVG)
+ double loadavg[3] = { 0.0, 0.0, 0.0 };
+ if (getloadavg(loadavg, 3) > 0) {
+ return loadavg[0];
+ }
+ return -0.0;
+#elif defined(KWSYS_SYSTEMINFORMATION_USE_GetSystemTimes)
+ // Old windows.h headers do not provide GetSystemTimes.
+ typedef BOOL(WINAPI * GetSystemTimesType)(LPFILETIME, LPFILETIME,
+ LPFILETIME);
+ static GetSystemTimesType pGetSystemTimes =
+ (GetSystemTimesType)GetProcAddress(GetModuleHandleW(L"kernel32"),
+ "GetSystemTimes");
+ FILETIME idleTime, kernelTime, userTime;
+ if (pGetSystemTimes && pGetSystemTimes(&idleTime, &kernelTime, &userTime)) {
+ unsigned __int64 const idleTicks = fileTimeToUInt64(idleTime);
+ unsigned __int64 const totalTicks =
+ fileTimeToUInt64(kernelTime) + fileTimeToUInt64(userTime);
+ return calculateCPULoad(idleTicks, totalTicks) * GetNumberOfPhysicalCPU();
+ }
+ return -0.0;
+#else
+ // Not implemented on this platform.
+ return -0.0;
+#endif
+}
+
+/**
+Get the process id of the running process.
+*/
+SystemInformation::LongLong SystemInformationImplementation::GetProcessId()
+{
+#if defined(_WIN32)
+ return GetCurrentProcessId();
+#elif defined(__linux) || defined(__APPLE__) || defined(__OpenBSD__) || \
+ defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__)
+ return getpid();
+#else
+ return -1;
+#endif
+}
+
+/**
+ * Used in GetProgramStack(...) below
+ */
+#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x0600 && defined(_MSC_VER) && \
+ _MSC_VER >= 1800
+# define KWSYS_SYSTEMINFORMATION_HAS_DBGHELP
+# define TRACE_MAX_STACK_FRAMES 1024
+# define TRACE_MAX_FUNCTION_NAME_LENGTH 1024
+# pragma warning(push)
+# pragma warning(disable : 4091) /* 'typedef ': ignored on left of '' */
+# include "dbghelp.h"
+# pragma warning(pop)
+#endif
+
+/**
+return current program stack in a string
+demangle cxx symbols if possible.
+*/
+std::string SystemInformationImplementation::GetProgramStack(int firstFrame,
+ int wholePath)
+{
+ std::ostringstream oss;
+ std::string programStack = "";
+
+#ifdef KWSYS_SYSTEMINFORMATION_HAS_DBGHELP
+ (void)wholePath;
+
+ void* stack[TRACE_MAX_STACK_FRAMES];
+ HANDLE process = GetCurrentProcess();
+ SymInitialize(process, nullptr, TRUE);
+ WORD numberOfFrames =
+ CaptureStackBackTrace(firstFrame, TRACE_MAX_STACK_FRAMES, stack, nullptr);
+ SYMBOL_INFO* symbol = static_cast<SYMBOL_INFO*>(
+ malloc(sizeof(SYMBOL_INFO) +
+ (TRACE_MAX_FUNCTION_NAME_LENGTH - 1) * sizeof(TCHAR)));
+ symbol->MaxNameLen = TRACE_MAX_FUNCTION_NAME_LENGTH;
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ DWORD displacement;
+ IMAGEHLP_LINE64 line;
+ line.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+ for (int i = 0; i < numberOfFrames; i++) {
+ DWORD64 address = reinterpret_cast<DWORD64>(stack[i]);
+ SymFromAddr(process, address, nullptr, symbol);
+ if (SymGetLineFromAddr64(process, address, &displacement, &line)) {
+ oss << " at " << symbol->Name << " in " << line.FileName << " line "
+ << line.LineNumber << std::endl;
+ } else {
+ oss << " at " << symbol->Name << std::endl;
+ }
+ }
+ free(symbol);
+
+#else
+ programStack += ""
+# if !defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE)
+ "WARNING: The stack could not be examined "
+ "because backtrace is not supported.\n"
+# elif !defined(KWSYS_SYSTEMINFORMATION_HAS_DEBUG_BUILD)
+ "WARNING: The stack trace will not use advanced "
+ "capabilities because this is a release build.\n"
+# else
+# if !defined(KWSYS_SYSTEMINFORMATION_HAS_SYMBOL_LOOKUP)
+ "WARNING: Function names will not be demangled "
+ "because dladdr is not available.\n"
+# endif
+# if !defined(KWSYS_SYSTEMINFORMATION_HAS_CPP_DEMANGLE)
+ "WARNING: Function names will not be demangled "
+ "because cxxabi is not available.\n"
+# endif
+# endif
+ ;
+
+# if defined(KWSYS_SYSTEMINFORMATION_HAS_BACKTRACE)
+ void* stackSymbols[256];
+ int nFrames = backtrace(stackSymbols, 256);
+ for (int i = firstFrame; i < nFrames; ++i) {
+ SymbolProperties symProps;
+ symProps.SetReportPath(wholePath);
+ symProps.Initialize(stackSymbols[i]);
+ oss << symProps << std::endl;
+ }
+# else
+ (void)firstFrame;
+ (void)wholePath;
+# endif
+#endif
+
+ programStack += oss.str();
+
+ return programStack;
+}
+
+/**
+when set print stack trace in response to common signals.
+*/
+void SystemInformationImplementation::SetStackTraceOnError(int enable)
+{
+#if !defined(_WIN32) && !defined(__MINGW32__) && !defined(__CYGWIN__)
+ static int saOrigValid = 0;
+ static struct sigaction saABRTOrig;
+ static struct sigaction saSEGVOrig;
+ static struct sigaction saTERMOrig;
+ static struct sigaction saINTOrig;
+ static struct sigaction saILLOrig;
+ static struct sigaction saBUSOrig;
+ static struct sigaction saFPEOrig;
+
+ if (enable && !saOrigValid) {
+ // save the current actions
+ sigaction(SIGABRT, nullptr, &saABRTOrig);
+ sigaction(SIGSEGV, nullptr, &saSEGVOrig);
+ sigaction(SIGTERM, nullptr, &saTERMOrig);
+ sigaction(SIGINT, nullptr, &saINTOrig);
+ sigaction(SIGILL, nullptr, &saILLOrig);
+ sigaction(SIGBUS, nullptr, &saBUSOrig);
+ sigaction(SIGFPE, nullptr, &saFPEOrig);
+
+ // enable read, disable write
+ saOrigValid = 1;
+
+ // install ours
+ struct sigaction sa;
+ sa.sa_sigaction = (SigAction)StacktraceSignalHandler;
+ sa.sa_flags = SA_SIGINFO | SA_RESETHAND;
+# ifdef SA_RESTART
+ sa.sa_flags |= SA_RESTART;
+# endif
+ sigemptyset(&sa.sa_mask);
+
+ sigaction(SIGABRT, &sa, nullptr);
+ sigaction(SIGSEGV, &sa, nullptr);
+ sigaction(SIGTERM, &sa, nullptr);
+ sigaction(SIGINT, &sa, nullptr);
+ sigaction(SIGILL, &sa, nullptr);
+ sigaction(SIGBUS, &sa, nullptr);
+ sigaction(SIGFPE, &sa, nullptr);
+ } else if (!enable && saOrigValid) {
+ // restore previous actions
+ sigaction(SIGABRT, &saABRTOrig, nullptr);
+ sigaction(SIGSEGV, &saSEGVOrig, nullptr);
+ sigaction(SIGTERM, &saTERMOrig, nullptr);
+ sigaction(SIGINT, &saINTOrig, nullptr);
+ sigaction(SIGILL, &saILLOrig, nullptr);
+ sigaction(SIGBUS, &saBUSOrig, nullptr);
+ sigaction(SIGFPE, &saFPEOrig, nullptr);
+
+ // enable write, disable read
+ saOrigValid = 0;
+ }
+#else
+ // avoid warning C4100
+ (void)enable;
+#endif
+}
+
+bool SystemInformationImplementation::QueryWindowsMemory()
+{
+#if defined(_WIN32)
+# if defined(_MSC_VER) && _MSC_VER < 1300
+ MEMORYSTATUS ms;
+ unsigned long tv, tp, av, ap;
+ ms.dwLength = sizeof(ms);
+ GlobalMemoryStatus(&ms);
+# define MEM_VAL(value) dw##value
+# else
+ MEMORYSTATUSEX ms;
+ DWORDLONG tv, tp, av, ap;
+ ms.dwLength = sizeof(ms);
+ if (0 == GlobalMemoryStatusEx(&ms)) {
+ return 0;
+ }
+# define MEM_VAL(value) ull##value
+# endif
+ tv = ms.MEM_VAL(TotalPageFile);
+ tp = ms.MEM_VAL(TotalPhys);
+ av = ms.MEM_VAL(AvailPageFile);
+ ap = ms.MEM_VAL(AvailPhys);
+ this->TotalVirtualMemory = tv >> 10 >> 10;
+ this->TotalPhysicalMemory = tp >> 10 >> 10;
+ this->AvailableVirtualMemory = av >> 10 >> 10;
+ this->AvailablePhysicalMemory = ap >> 10 >> 10;
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryLinuxMemory()
+{
+#if defined(__linux)
+ unsigned long tv = 0;
+ unsigned long tp = 0;
+ unsigned long av = 0;
+ unsigned long ap = 0;
+
+ char buffer[1024]; // for reading lines
+
+ int linuxMajor = 0;
+ int linuxMinor = 0;
+
+ // Find the Linux kernel version first
+ struct utsname unameInfo;
+ int errorFlag = uname(&unameInfo);
+ if (errorFlag != 0) {
+ std::cout << "Problem calling uname(): " << strerror(errno) << std::endl;
+ return false;
+ }
+
+ if (strlen(unameInfo.release) >= 3) {
+ // release looks like "2.6.3-15mdk-i686-up-4GB"
+ char majorChar = unameInfo.release[0];
+ char minorChar = unameInfo.release[2];
+
+ if (isdigit(majorChar)) {
+ linuxMajor = majorChar - '0';
+ }
+
+ if (isdigit(minorChar)) {
+ linuxMinor = minorChar - '0';
+ }
+ }
+
+ FILE* fd = fopen("/proc/meminfo", "r");
+ if (!fd) {
+ std::cout << "Problem opening /proc/meminfo" << std::endl;
+ return false;
+ }
+
+ if (linuxMajor >= 3 || ((linuxMajor >= 2) && (linuxMinor >= 6))) {
+ // new /proc/meminfo format since kernel 2.6.x
+ // Rigorously, this test should check from the developing version 2.5.x
+ // that introduced the new format...
+
+ enum
+ {
+ mMemTotal,
+ mMemFree,
+ mBuffers,
+ mCached,
+ mSwapTotal,
+ mSwapFree
+ };
+ const char* format[6] = { "MemTotal:%lu kB", "MemFree:%lu kB",
+ "Buffers:%lu kB", "Cached:%lu kB",
+ "SwapTotal:%lu kB", "SwapFree:%lu kB" };
+ bool have[6] = { false, false, false, false, false, false };
+ unsigned long value[6];
+ int count = 0;
+ while (fgets(buffer, static_cast<int>(sizeof(buffer)), fd)) {
+ for (int i = 0; i < 6; ++i) {
+ if (!have[i] && sscanf(buffer, format[i], &value[i]) == 1) {
+ have[i] = true;
+ ++count;
+ }
+ }
+ }
+ if (count == 6) {
+ this->TotalPhysicalMemory = value[mMemTotal] / 1024;
+ this->AvailablePhysicalMemory =
+ (value[mMemFree] + value[mBuffers] + value[mCached]) / 1024;
+ this->TotalVirtualMemory = value[mSwapTotal] / 1024;
+ this->AvailableVirtualMemory = value[mSwapFree] / 1024;
+ } else {
+ std::cout << "Problem parsing /proc/meminfo" << std::endl;
+ fclose(fd);
+ return false;
+ }
+ } else {
+ // /proc/meminfo format for kernel older than 2.6.x
+
+ unsigned long temp;
+ unsigned long cachedMem;
+ unsigned long buffersMem;
+ // Skip "total: used:..."
+ char* r = fgets(buffer, static_cast<int>(sizeof(buffer)), fd);
+ int status = 0;
+ if (r == buffer) {
+ status += fscanf(fd, "Mem: %lu %lu %lu %lu %lu %lu\n", &tp, &temp, &ap,
+ &temp, &buffersMem, &cachedMem);
+ }
+ if (status == 6) {
+ status += fscanf(fd, "Swap: %lu %lu %lu\n", &tv, &temp, &av);
+ }
+ if (status == 9) {
+ this->TotalVirtualMemory = tv >> 10 >> 10;
+ this->TotalPhysicalMemory = tp >> 10 >> 10;
+ this->AvailableVirtualMemory = av >> 10 >> 10;
+ this->AvailablePhysicalMemory =
+ (ap + buffersMem + cachedMem) >> 10 >> 10;
+ } else {
+ std::cout << "Problem parsing /proc/meminfo" << std::endl;
+ fclose(fd);
+ return false;
+ }
+ }
+ fclose(fd);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryCygwinMemory()
+{
+#ifdef __CYGWIN__
+ // _SC_PAGE_SIZE does return the mmap() granularity on Cygwin,
+ // see http://cygwin.com/ml/cygwin/2006-06/msg00350.html
+ // Therefore just use 4096 as the page size of Windows.
+ long m = sysconf(_SC_PHYS_PAGES);
+ if (m < 0) {
+ return false;
+ }
+ this->TotalPhysicalMemory = m >> 8;
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryAIXMemory()
+{
+#if defined(_AIX) && defined(_SC_AIX_REALMEM)
+ long c = sysconf(_SC_AIX_REALMEM);
+ if (c <= 0) {
+ return false;
+ }
+
+ this->TotalPhysicalMemory = c / 1024;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryMemoryBySysconf()
+{
+#if defined(_SC_PHYS_PAGES) && defined(_SC_PAGESIZE)
+ // Assume the mmap() granularity as returned by _SC_PAGESIZE is also
+ // the system page size. The only known system where this isn't true
+ // is Cygwin.
+ long p = sysconf(_SC_PHYS_PAGES);
+ long m = sysconf(_SC_PAGESIZE);
+
+ if (p < 0 || m < 0) {
+ return false;
+ }
+
+ // assume pagesize is a power of 2 and smaller 1 MiB
+ size_t pagediv = (1024 * 1024 / m);
+
+ this->TotalPhysicalMemory = p;
+ this->TotalPhysicalMemory /= pagediv;
+
+# if defined(_SC_AVPHYS_PAGES)
+ p = sysconf(_SC_AVPHYS_PAGES);
+ if (p < 0) {
+ return false;
+ }
+
+ this->AvailablePhysicalMemory = p;
+ this->AvailablePhysicalMemory /= pagediv;
+# endif
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/** Query for the memory status */
+bool SystemInformationImplementation::QueryMemory()
+{
+ return this->QueryMemoryBySysconf();
+}
+
+/** */
+size_t SystemInformationImplementation::GetTotalVirtualMemory()
+{
+ return this->TotalVirtualMemory;
+}
+
+/** */
+size_t SystemInformationImplementation::GetAvailableVirtualMemory()
+{
+ return this->AvailableVirtualMemory;
+}
+
+size_t SystemInformationImplementation::GetTotalPhysicalMemory()
+{
+ return this->TotalPhysicalMemory;
+}
+
+/** */
+size_t SystemInformationImplementation::GetAvailablePhysicalMemory()
+{
+ return this->AvailablePhysicalMemory;
+}
+
+/** Get Cycle differences */
+SystemInformation::LongLong
+SystemInformationImplementation::GetCyclesDifference(DELAY_FUNC DelayFunction,
+ unsigned int uiParameter)
+{
+#if defined(_MSC_VER) && (_MSC_VER >= 1400)
+ unsigned __int64 stamp1, stamp2;
+
+ stamp1 = __rdtsc();
+ DelayFunction(uiParameter);
+ stamp2 = __rdtsc();
+
+ return stamp2 - stamp1;
+#elif USE_ASM_INSTRUCTIONS
+
+ unsigned int edx1, eax1;
+ unsigned int edx2, eax2;
+
+ // Calculate the frequency of the CPU instructions.
+ __try {
+ _asm {
+ push uiParameter ; push parameter param
+ mov ebx, DelayFunction ; store func in ebx
+
+ RDTSC_INSTRUCTION
+
+ mov esi, eax ; esi = eax
+ mov edi, edx ; edi = edx
+
+ call ebx ; call the delay functions
+
+ RDTSC_INSTRUCTION
+
+ pop ebx
+
+ mov edx2, edx ; edx2 = edx
+ mov eax2, eax ; eax2 = eax
+
+ mov edx1, edi ; edx2 = edi
+ mov eax1, esi ; eax2 = esi
+ }
+ } __except (1) {
+ return -1;
+ }
+
+ return ((((__int64)edx2 << 32) + eax2) - (((__int64)edx1 << 32) + eax1));
+
+#else
+ (void)DelayFunction;
+ (void)uiParameter;
+ return -1;
+#endif
+}
+
+/** Compute the delay overhead */
+void SystemInformationImplementation::DelayOverhead(unsigned int uiMS)
+{
+#if defined(_WIN32)
+ LARGE_INTEGER Frequency, StartCounter, EndCounter;
+ __int64 x;
+
+ // Get the frequency of the high performance counter.
+ if (!QueryPerformanceFrequency(&Frequency)) {
+ return;
+ }
+ x = Frequency.QuadPart / 1000 * uiMS;
+
+ // Get the starting position of the counter.
+ QueryPerformanceCounter(&StartCounter);
+
+ do {
+ // Get the ending position of the counter.
+ QueryPerformanceCounter(&EndCounter);
+ } while (EndCounter.QuadPart - StartCounter.QuadPart == x);
+#endif
+ (void)uiMS;
+}
+
+/** Works only for windows */
+bool SystemInformationImplementation::IsSMTSupported()
+{
+ return this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical > 1;
+}
+
+/** Return the APIC Id. Works only for windows. */
+unsigned char SystemInformationImplementation::GetAPICId()
+{
+ int Regs[4] = { 0, 0, 0, 0 };
+
+#if USE_CPUID
+ if (!this->IsSMTSupported()) {
+ return static_cast<unsigned char>(-1); // HT not supported
+ } // Logical processor = 1
+ call_cpuid(1, Regs);
+#endif
+
+ return static_cast<unsigned char>((Regs[1] & INITIAL_APIC_ID_BITS) >> 24);
+}
+
+/** Count the number of CPUs. Works only on windows. */
+void SystemInformationImplementation::CPUCountWindows()
+{
+#if defined(_WIN32)
+ this->NumberOfPhysicalCPU = 0;
+ this->NumberOfLogicalCPU = 0;
+
+ typedef BOOL(WINAPI * GetLogicalProcessorInformationType)(
+ PSYSTEM_LOGICAL_PROCESSOR_INFORMATION, PDWORD);
+ static GetLogicalProcessorInformationType pGetLogicalProcessorInformation =
+ (GetLogicalProcessorInformationType)GetProcAddress(
+ GetModuleHandleW(L"kernel32"), "GetLogicalProcessorInformation");
+
+ if (!pGetLogicalProcessorInformation) {
+ // Fallback to approximate implementation on ancient Windows versions.
+ SYSTEM_INFO info;
+ ZeroMemory(&info, sizeof(info));
+ GetSystemInfo(&info);
+ this->NumberOfPhysicalCPU =
+ static_cast<unsigned int>(info.dwNumberOfProcessors);
+ this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU;
+ return;
+ }
+
+ std::vector<SYSTEM_LOGICAL_PROCESSOR_INFORMATION> ProcInfo;
+ {
+ DWORD Length = 0;
+ DWORD rc = pGetLogicalProcessorInformation(nullptr, &Length);
+ assert(FALSE == rc);
+ (void)rc; // Silence unused variable warning in Borland C++ 5.81
+ assert(GetLastError() == ERROR_INSUFFICIENT_BUFFER);
+ ProcInfo.resize(Length / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION));
+ rc = pGetLogicalProcessorInformation(&ProcInfo[0], &Length);
+ assert(rc != FALSE);
+ (void)rc; // Silence unused variable warning in Borland C++ 5.81
+ }
+
+ typedef std::vector<SYSTEM_LOGICAL_PROCESSOR_INFORMATION>::iterator
+ pinfoIt_t;
+ for (pinfoIt_t it = ProcInfo.begin(); it != ProcInfo.end(); ++it) {
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION PInfo = *it;
+ if (PInfo.Relationship != RelationProcessorCore) {
+ continue;
+ }
+
+ std::bitset<std::numeric_limits<ULONG_PTR>::digits> ProcMask(
+ (unsigned long long)PInfo.ProcessorMask);
+ unsigned int count = (unsigned int)ProcMask.count();
+ if (count == 0) { // I think this should never happen, but just to be safe.
+ continue;
+ }
+ this->NumberOfPhysicalCPU++;
+ this->NumberOfLogicalCPU += (unsigned int)count;
+ this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical = count;
+ }
+ this->NumberOfPhysicalCPU = std::max(1u, this->NumberOfPhysicalCPU);
+ this->NumberOfLogicalCPU = std::max(1u, this->NumberOfLogicalCPU);
+#else
+#endif
+}
+
+/** Return the number of logical CPUs on the system */
+unsigned int SystemInformationImplementation::GetNumberOfLogicalCPU()
+{
+ return this->NumberOfLogicalCPU;
+}
+
+/** Return the number of physical CPUs on the system */
+unsigned int SystemInformationImplementation::GetNumberOfPhysicalCPU()
+{
+ return this->NumberOfPhysicalCPU;
+}
+
+/** For Mac use sysctlbyname calls to find system info */
+bool SystemInformationImplementation::ParseSysCtl()
+{
+#if defined(__APPLE__)
+ char retBuf[128];
+ int err = 0;
+ uint64_t value = 0;
+ size_t len = sizeof(value);
+ sysctlbyname("hw.memsize", &value, &len, nullptr, 0);
+ this->TotalPhysicalMemory = static_cast<size_t>(value / 1048576);
+
+ // Parse values for Mac
+ this->AvailablePhysicalMemory = 0;
+ vm_statistics_data_t vmstat;
+ mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
+ if (host_statistics(mach_host_self(), HOST_VM_INFO, (host_info_t)&vmstat,
+ &count) == KERN_SUCCESS) {
+ len = sizeof(value);
+ err = sysctlbyname("hw.pagesize", &value, &len, nullptr, 0);
+ int64_t available_memory =
+ (vmstat.free_count + vmstat.inactive_count) * value;
+ this->AvailablePhysicalMemory =
+ static_cast<size_t>(available_memory / 1048576);
+ }
+
+# ifdef VM_SWAPUSAGE
+ // Virtual memory.
+ int mib[2] = { CTL_VM, VM_SWAPUSAGE };
+ unsigned int miblen =
+ static_cast<unsigned int>(sizeof(mib) / sizeof(mib[0]));
+ struct xsw_usage swap;
+ len = sizeof(swap);
+ err = sysctl(mib, miblen, &swap, &len, nullptr, 0);
+ if (err == 0) {
+ this->AvailableVirtualMemory =
+ static_cast<size_t>(swap.xsu_avail / 1048576);
+ this->TotalVirtualMemory = static_cast<size_t>(swap.xsu_total / 1048576);
+ }
+# else
+ this->AvailableVirtualMemory = 0;
+ this->TotalVirtualMemory = 0;
+# endif
+
+ // CPU Info
+ len = sizeof(this->NumberOfPhysicalCPU);
+ sysctlbyname("hw.physicalcpu", &this->NumberOfPhysicalCPU, &len, nullptr, 0);
+ len = sizeof(this->NumberOfLogicalCPU);
+ sysctlbyname("hw.logicalcpu", &this->NumberOfLogicalCPU, &len, nullptr, 0);
+
+ int cores_per_package = 0;
+ len = sizeof(cores_per_package);
+ err = sysctlbyname("machdep.cpu.cores_per_package", &cores_per_package, &len,
+ nullptr, 0);
+ // That name was not found, default to 1
+ this->Features.ExtendedFeatures.LogicalProcessorsPerPhysical =
+ err != 0 ? 1 : static_cast<unsigned char>(cores_per_package);
+
+ len = sizeof(value);
+ sysctlbyname("hw.cpufrequency", &value, &len, nullptr, 0);
+ this->CPUSpeedInMHz = static_cast<float>(value) / 1000000;
+
+ // Chip family
+ len = sizeof(this->ChipID.Family);
+ // Seems only the intel chips will have this name so if this fails it is
+ // probably a PPC machine
+ err =
+ sysctlbyname("machdep.cpu.family", &this->ChipID.Family, &len, nullptr, 0);
+ if (err != 0) // Go back to names we know but are less descriptive
+ {
+ this->ChipID.Family = 0;
+ ::memset(retBuf, 0, 128);
+ len = 32;
+ err = sysctlbyname("hw.machine", &retBuf, &len, nullptr, 0);
+ std::string machineBuf(retBuf);
+ if (machineBuf.find_first_of("Power") != std::string::npos) {
+ this->ChipID.Vendor = "IBM";
+ len = sizeof(this->ChipID.Family);
+ err = sysctlbyname("hw.cputype", &this->ChipID.Family, &len, nullptr, 0);
+ len = sizeof(this->ChipID.Model);
+ err =
+ sysctlbyname("hw.cpusubtype", &this->ChipID.Model, &len, nullptr, 0);
+ this->FindManufacturer();
+ }
+ } else // Should be an Intel Chip.
+ {
+ len = sizeof(this->ChipID.Family);
+ err = sysctlbyname("machdep.cpu.family", &this->ChipID.Family, &len,
+ nullptr, 0);
+
+ ::memset(retBuf, 0, 128);
+ len = 128;
+ err = sysctlbyname("machdep.cpu.vendor", retBuf, &len, nullptr, 0);
+ // Chip Vendor
+ this->ChipID.Vendor = retBuf;
+ this->FindManufacturer();
+
+ // Chip Model
+ len = sizeof(value);
+ err = sysctlbyname("machdep.cpu.model", &value, &len, nullptr, 0);
+ this->ChipID.Model = static_cast<int>(value);
+
+ // Chip Stepping
+ len = sizeof(value);
+ value = 0;
+ err = sysctlbyname("machdep.cpu.stepping", &value, &len, nullptr, 0);
+ if (!err) {
+ this->ChipID.Revision = static_cast<int>(value);
+ }
+
+ // feature string
+ char* buf = nullptr;
+ size_t allocSize = 128;
+
+ err = 0;
+ len = 0;
+
+ // sysctlbyname() will return with err==0 && len==0 if the buffer is too
+ // small
+ while (err == 0 && len == 0) {
+ delete[] buf;
+ allocSize *= 2;
+ buf = new char[allocSize];
+ if (!buf) {
+ break;
+ }
+ buf[0] = ' ';
+ len = allocSize - 2; // keep space for leading and trailing space
+ err = sysctlbyname("machdep.cpu.features", buf + 1, &len, nullptr, 0);
+ }
+ if (!err && buf && len) {
+ // now we can match every flags as space + flag + space
+ buf[len + 1] = ' ';
+ std::string cpuflags(buf, len + 2);
+
+ if ((cpuflags.find(" FPU ") != std::string::npos)) {
+ this->Features.HasFPU = true;
+ }
+ if ((cpuflags.find(" TSC ") != std::string::npos)) {
+ this->Features.HasTSC = true;
+ }
+ if ((cpuflags.find(" MMX ") != std::string::npos)) {
+ this->Features.HasMMX = true;
+ }
+ if ((cpuflags.find(" SSE ") != std::string::npos)) {
+ this->Features.HasSSE = true;
+ }
+ if ((cpuflags.find(" SSE2 ") != std::string::npos)) {
+ this->Features.HasSSE2 = true;
+ }
+ if ((cpuflags.find(" APIC ") != std::string::npos)) {
+ this->Features.HasAPIC = true;
+ }
+ if ((cpuflags.find(" CMOV ") != std::string::npos)) {
+ this->Features.HasCMOV = true;
+ }
+ if ((cpuflags.find(" MTRR ") != std::string::npos)) {
+ this->Features.HasMTRR = true;
+ }
+ if ((cpuflags.find(" ACPI ") != std::string::npos)) {
+ this->Features.HasACPI = true;
+ }
+ }
+ delete[] buf;
+ }
+
+ // brand string
+ ::memset(retBuf, 0, sizeof(retBuf));
+ len = sizeof(retBuf);
+ err = sysctlbyname("machdep.cpu.brand_string", retBuf, &len, nullptr, 0);
+ if (!err) {
+ this->ChipID.ProcessorName = retBuf;
+ this->ChipID.ModelName = retBuf;
+ }
+
+ // Cache size
+ len = sizeof(value);
+ err = sysctlbyname("hw.l1icachesize", &value, &len, nullptr, 0);
+ this->Features.L1CacheSize = static_cast<int>(value);
+ len = sizeof(value);
+ err = sysctlbyname("hw.l2cachesize", &value, &len, nullptr, 0);
+ this->Features.L2CacheSize = static_cast<int>(value);
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+/** Extract a value from sysctl command */
+std::string SystemInformationImplementation::ExtractValueFromSysCtl(
+ const char* word)
+{
+ size_t pos = this->SysCtlBuffer.find(word);
+ if (pos != std::string::npos) {
+ pos = this->SysCtlBuffer.find(": ", pos);
+ size_t pos2 = this->SysCtlBuffer.find("\n", pos);
+ if (pos != std::string::npos && pos2 != std::string::npos) {
+ return this->SysCtlBuffer.substr(pos + 2, pos2 - pos - 2);
+ }
+ }
+ return "";
+}
+
+/** Run a given process */
+std::string SystemInformationImplementation::RunProcess(
+ std::vector<const char*> args)
+{
+ std::string buffer;
+
+ // Run the application
+ kwsysProcess* gp = kwsysProcess_New();
+ kwsysProcess_SetCommand(gp, args.data());
+ kwsysProcess_SetOption(gp, kwsysProcess_Option_HideWindow, 1);
+
+ kwsysProcess_Execute(gp);
+
+ char* data = nullptr;
+ int length;
+ double timeout = 255;
+ int pipe; // pipe id as returned by kwsysProcess_WaitForData()
+
+ while ((static_cast<void>(
+ pipe = kwsysProcess_WaitForData(gp, &data, &length, &timeout)),
+ (pipe == kwsysProcess_Pipe_STDOUT ||
+ pipe == kwsysProcess_Pipe_STDERR))) // wait for 1s
+ {
+ buffer.append(data, length);
+ }
+ kwsysProcess_WaitForExit(gp, nullptr);
+
+ int result = 0;
+ switch (kwsysProcess_GetState(gp)) {
+ case kwsysProcess_State_Exited: {
+ result = kwsysProcess_GetExitValue(gp);
+ } break;
+ case kwsysProcess_State_Error: {
+ std::cerr << "Error: Could not run " << args[0] << ":\n";
+ std::cerr << kwsysProcess_GetErrorString(gp) << "\n";
+ } break;
+ case kwsysProcess_State_Exception: {
+ std::cerr << "Error: " << args[0] << " terminated with an exception: "
+ << kwsysProcess_GetExceptionString(gp) << "\n";
+ } break;
+ case kwsysProcess_State_Starting:
+ case kwsysProcess_State_Executing:
+ case kwsysProcess_State_Expired:
+ case kwsysProcess_State_Killed: {
+ // Should not get here.
+ std::cerr << "Unexpected ending state after running " << args[0]
+ << std::endl;
+ } break;
+ }
+ kwsysProcess_Delete(gp);
+ if (result) {
+ std::cerr << "Error " << args[0] << " returned :" << result << "\n";
+ }
+ return buffer;
+}
+
+std::string SystemInformationImplementation::ParseValueFromKStat(
+ const char* arguments)
+{
+ std::vector<std::string> args_string;
+ std::string command = arguments;
+ size_t start = std::string::npos;
+ size_t pos = command.find(' ', 0);
+ while (pos != std::string::npos) {
+ bool inQuotes = false;
+ // Check if we are between quotes
+ size_t b0 = command.find('"', 0);
+ size_t b1 = command.find('"', b0 + 1);
+ while (b0 != std::string::npos && b1 != std::string::npos && b1 > b0) {
+ if (pos > b0 && pos < b1) {
+ inQuotes = true;
+ break;
+ }
+ b0 = command.find('"', b1 + 1);
+ b1 = command.find('"', b0 + 1);
+ }
+
+ if (!inQuotes) {
+ args_string.push_back(command.substr(start + 1, pos - start - 1));
+ std::string& arg = args_string.back();
+
+ // Remove the quotes if any
+ arg.erase(std::remove(arg.begin(), arg.end(), '"'), arg.end());
+ start = pos;
+ }
+ pos = command.find(' ', pos + 1);
+ }
+ args_string.push_back(command.substr(start + 1, command.size() - start - 1));
+
+ std::vector<const char*> args;
+ args.reserve(3 + args_string.size());
+ args.push_back("kstat");
+ args.push_back("-p");
+ for (size_t i = 0; i < args_string.size(); ++i) {
+ args.push_back(args_string[i].c_str());
+ }
+ args.push_back(nullptr);
+
+ std::string buffer = this->RunProcess(args);
+
+ std::string value;
+ for (size_t i = buffer.size() - 1; i > 0; i--) {
+ if (buffer[i] == ' ' || buffer[i] == '\t') {
+ break;
+ }
+ if (buffer[i] != '\n' && buffer[i] != '\r') {
+ value.insert(0u, 1, buffer[i]);
+ }
+ }
+ return value;
+}
+
+/** Querying for system information from Solaris */
+bool SystemInformationImplementation::QuerySolarisMemory()
+{
+#if defined(__SVR4) && defined(__sun)
+// Solaris allows querying this value by sysconf, but if this is
+// a 32 bit process on a 64 bit host the returned memory will be
+// limited to 4GiB. So if this is a 32 bit process or if the sysconf
+// method fails use the kstat interface.
+# if SIZEOF_VOID_P == 8
+ if (this->QueryMemoryBySysconf()) {
+ return true;
+ }
+# endif
+
+ char* tail;
+ unsigned long totalMemory =
+ strtoul(this->ParseValueFromKStat("-s physmem").c_str(), &tail, 0);
+ this->TotalPhysicalMemory = totalMemory / 128;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QuerySolarisProcessor()
+{
+ if (!this->QueryProcessorBySysconf()) {
+ return false;
+ }
+
+ // Parse values
+ this->CPUSpeedInMHz = static_cast<float>(
+ atoi(this->ParseValueFromKStat("-s clock_MHz").c_str()));
+
+ // Chip family
+ this->ChipID.Family = 0;
+
+ // Chip Model
+ this->ChipID.ProcessorName = this->ParseValueFromKStat("-s cpu_type");
+ this->ChipID.Model = 0;
+
+ // Chip Vendor
+ if (this->ChipID.ProcessorName != "i386") {
+ this->ChipID.Vendor = "Sun";
+ this->FindManufacturer();
+ }
+
+ return true;
+}
+
+/** Querying for system information from Haiku OS */
+bool SystemInformationImplementation::QueryHaikuInfo()
+{
+#if defined(__HAIKU__)
+
+ // CPU count
+ system_info info;
+ get_system_info(&info);
+ this->NumberOfPhysicalCPU = info.cpu_count;
+
+ // CPU speed
+ uint32 topologyNodeCount = 0;
+ cpu_topology_node_info* topology = 0;
+ get_cpu_topology_info(0, &topologyNodeCount);
+ if (topologyNodeCount != 0)
+ topology = new cpu_topology_node_info[topologyNodeCount];
+ get_cpu_topology_info(topology, &topologyNodeCount);
+
+ for (uint32 i = 0; i < topologyNodeCount; i++) {
+ if (topology[i].type == B_TOPOLOGY_CORE) {
+ this->CPUSpeedInMHz =
+ topology[i].data.core.default_frequency / 1000000.0f;
+ break;
+ }
+ }
+
+ delete[] topology;
+
+ // Physical Memory
+ this->TotalPhysicalMemory = (info.max_pages * B_PAGE_SIZE) / (1024 * 1024);
+ this->AvailablePhysicalMemory = this->TotalPhysicalMemory -
+ ((info.used_pages * B_PAGE_SIZE) / (1024 * 1024));
+
+ // NOTE: get_system_info_etc is currently a private call so just set to 0
+ // until it becomes public
+ this->TotalVirtualMemory = 0;
+ this->AvailableVirtualMemory = 0;
+
+ // Retrieve cpuid_info union for cpu 0
+ cpuid_info cpu_info;
+ get_cpuid(&cpu_info, 0, 0);
+
+ // Chip Vendor
+ // Use a temporary buffer so that we can add NULL termination to the string
+ char vbuf[13];
+ strncpy(vbuf, cpu_info.eax_0.vendor_id, 12);
+ vbuf[12] = '\0';
+ this->ChipID.Vendor = vbuf;
+
+ this->FindManufacturer();
+
+ // Retrieve cpuid_info union for cpu 0 this time using a register value of 1
+ get_cpuid(&cpu_info, 1, 0);
+
+ this->NumberOfLogicalCPU = cpu_info.eax_1.logical_cpus;
+
+ // Chip type
+ this->ChipID.Type = cpu_info.eax_1.type;
+
+ // Chip family
+ this->ChipID.Family = cpu_info.eax_1.family;
+
+ // Chip Model
+ this->ChipID.Model = cpu_info.eax_1.model;
+
+ // Chip Revision
+ this->ChipID.Revision = cpu_info.eax_1.stepping;
+
+ // Chip Extended Family
+ this->ChipID.ExtendedFamily = cpu_info.eax_1.extended_family;
+
+ // Chip Extended Model
+ this->ChipID.ExtendedModel = cpu_info.eax_1.extended_model;
+
+ // Get ChipID.ProcessorName from other information already gathered
+ this->RetrieveClassicalCPUIdentity();
+
+ // Cache size
+ this->Features.L1CacheSize = 0;
+ this->Features.L2CacheSize = 0;
+
+ return true;
+
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryQNXMemory()
+{
+#if defined(__QNX__)
+ std::string buffer;
+ std::vector<const char*> args;
+ args.clear();
+
+ args.push_back("showmem");
+ args.push_back("-S");
+ args.push_back(0);
+ buffer = this->RunProcess(args);
+ args.clear();
+
+ size_t pos = buffer.find("System RAM:");
+ if (pos == std::string::npos)
+ return false;
+ pos = buffer.find(":", pos);
+ size_t pos2 = buffer.find("M (", pos);
+ if (pos2 == std::string::npos)
+ return false;
+
+ pos++;
+ while (buffer[pos] == ' ')
+ pos++;
+
+ this->TotalPhysicalMemory = atoi(buffer.substr(pos, pos2 - pos).c_str());
+ return true;
+#endif
+ return false;
+}
+
+bool SystemInformationImplementation::QueryBSDMemory()
+{
+#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__DragonFly__)
+ int ctrl[2] = { CTL_HW, HW_PHYSMEM };
+# if defined(HW_PHYSMEM64)
+ int64_t k;
+ ctrl[1] = HW_PHYSMEM64;
+# else
+ int k;
+# endif
+ size_t sz = sizeof(k);
+
+ if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) {
+ return false;
+ }
+
+ this->TotalPhysicalMemory = k >> 10 >> 10;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryQNXProcessor()
+{
+#if defined(__QNX__)
+ // the output on my QNX 6.4.1 looks like this:
+ // Processor1: 686 Pentium II Stepping 3 2175MHz FPU
+ std::string buffer;
+ std::vector<const char*> args;
+ args.clear();
+
+ args.push_back("pidin");
+ args.push_back("info");
+ args.push_back(0);
+ buffer = this->RunProcess(args);
+ args.clear();
+
+ size_t pos = buffer.find("Processor1:");
+ if (pos == std::string::npos)
+ return false;
+
+ size_t pos2 = buffer.find("MHz", pos);
+ if (pos2 == std::string::npos)
+ return false;
+
+ size_t pos3 = pos2;
+ while (buffer[pos3] != ' ')
+ --pos3;
+
+ this->CPUSpeedInMHz = atoi(buffer.substr(pos3 + 1, pos2 - pos3 - 1).c_str());
+
+ pos2 = buffer.find(" Stepping", pos);
+ if (pos2 != std::string::npos) {
+ pos2 = buffer.find(" ", pos2 + 1);
+ if (pos2 != std::string::npos && pos2 < pos3) {
+ this->ChipID.Revision =
+ atoi(buffer.substr(pos2 + 1, pos3 - pos2).c_str());
+ }
+ }
+
+ this->NumberOfPhysicalCPU = 0;
+ do {
+ pos = buffer.find("\nProcessor", pos + 1);
+ ++this->NumberOfPhysicalCPU;
+ } while (pos != std::string::npos);
+ this->NumberOfLogicalCPU = 1;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryBSDProcessor()
+{
+#if defined(__OpenBSD__) || defined(__FreeBSD__) || defined(__NetBSD__) || \
+ defined(__DragonFly__)
+ int k;
+ size_t sz = sizeof(k);
+ int ctrl[2] = { CTL_HW, HW_NCPU };
+
+ if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) {
+ return false;
+ }
+
+ this->NumberOfPhysicalCPU = k;
+ this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU;
+
+# if defined(HW_CPUSPEED)
+ ctrl[1] = HW_CPUSPEED;
+
+ if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) {
+ return false;
+ }
+
+ this->CPUSpeedInMHz = (float)k;
+# endif
+
+# if defined(CPU_SSE)
+ ctrl[0] = CTL_MACHDEP;
+ ctrl[1] = CPU_SSE;
+
+ if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) {
+ return false;
+ }
+
+ this->Features.HasSSE = (k > 0);
+# endif
+
+# if defined(CPU_SSE2)
+ ctrl[0] = CTL_MACHDEP;
+ ctrl[1] = CPU_SSE2;
+
+ if (sysctl(ctrl, 2, &k, &sz, nullptr, 0) != 0) {
+ return false;
+ }
+
+ this->Features.HasSSE2 = (k > 0);
+# endif
+
+# if defined(CPU_CPUVENDOR)
+ ctrl[0] = CTL_MACHDEP;
+ ctrl[1] = CPU_CPUVENDOR;
+ char vbuf[25];
+ ::memset(vbuf, 0, sizeof(vbuf));
+ sz = sizeof(vbuf) - 1;
+ if (sysctl(ctrl, 2, vbuf, &sz, nullptr, 0) != 0) {
+ return false;
+ }
+
+ this->ChipID.Vendor = vbuf;
+ this->FindManufacturer();
+# endif
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryHPUXMemory()
+{
+#if defined(__hpux)
+ unsigned long tv = 0;
+ unsigned long tp = 0;
+ unsigned long av = 0;
+ unsigned long ap = 0;
+ struct pst_static pst;
+ struct pst_dynamic pdy;
+
+ unsigned long ps = 0;
+ if (pstat_getstatic(&pst, sizeof(pst), (size_t)1, 0) == -1) {
+ return false;
+ }
+
+ ps = pst.page_size;
+ tp = pst.physical_memory * ps;
+ tv = (pst.physical_memory + pst.pst_maxmem) * ps;
+ if (pstat_getdynamic(&pdy, sizeof(pdy), (size_t)1, 0) == -1) {
+ return false;
+ }
+
+ ap = tp - pdy.psd_rm * ps;
+ av = tv - pdy.psd_vm;
+ this->TotalVirtualMemory = tv >> 10 >> 10;
+ this->TotalPhysicalMemory = tp >> 10 >> 10;
+ this->AvailableVirtualMemory = av >> 10 >> 10;
+ this->AvailablePhysicalMemory = ap >> 10 >> 10;
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool SystemInformationImplementation::QueryHPUXProcessor()
+{
+#if defined(__hpux)
+# if defined(KWSYS_SYS_HAS_MPCTL_H)
+ int c = mpctl(MPC_GETNUMSPUS_SYS, 0, 0);
+ if (c <= 0) {
+ return false;
+ }
+
+ this->NumberOfPhysicalCPU = c;
+ this->NumberOfLogicalCPU = this->NumberOfPhysicalCPU;
+
+ long t = sysconf(_SC_CPU_VERSION);
+
+ if (t == -1) {
+ return false;
+ }
+
+ switch (t) {
+ case CPU_PA_RISC1_0:
+ this->ChipID.Vendor = "Hewlett-Packard";
+ this->ChipID.Family = 0x100;
+ break;
+ case CPU_PA_RISC1_1:
+ this->ChipID.Vendor = "Hewlett-Packard";
+ this->ChipID.Family = 0x110;
+ break;
+ case CPU_PA_RISC2_0:
+ this->ChipID.Vendor = "Hewlett-Packard";
+ this->ChipID.Family = 0x200;
+ break;
+# if defined(CPU_HP_INTEL_EM_1_0) || defined(CPU_IA64_ARCHREV_0)
+# ifdef CPU_HP_INTEL_EM_1_0
+ case CPU_HP_INTEL_EM_1_0:
+# endif
+# ifdef CPU_IA64_ARCHREV_0
+ case CPU_IA64_ARCHREV_0:
+# endif
+ this->ChipID.Vendor = "GenuineIntel";
+ this->Features.HasIA64 = true;
+ break;
+# endif
+ default:
+ return false;
+ }
+
+ this->FindManufacturer();
+
+ return true;
+# else
+ return false;
+# endif
+#else
+ return false;
+#endif
+}
+
+/** Query the operating system information */
+bool SystemInformationImplementation::QueryOSInformation()
+{
+#if defined(_WIN32)
+
+ this->OSName = "Windows";
+
+ OSVERSIONINFOEXW osvi;
+ BOOL bIsWindows64Bit;
+ BOOL bOsVersionInfoEx;
+ char operatingSystem[256];
+
+ // Try calling GetVersionEx using the OSVERSIONINFOEX structure.
+ ZeroMemory(&osvi, sizeof(OSVERSIONINFOEXW));
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEXW);
+# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# pragma warning(push)
+# ifdef __INTEL_COMPILER
+# pragma warning(disable : 1478)
+# elif defined __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
+# else
+# pragma warning(disable : 4996)
+# endif
+# endif
+ bOsVersionInfoEx = GetVersionExW((OSVERSIONINFOW*)&osvi);
+ if (!bOsVersionInfoEx) {
+ osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOW);
+ if (!GetVersionExW((OSVERSIONINFOW*)&osvi)) {
+ return false;
+ }
+ }
+# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# ifdef __clang__
+# pragma clang diagnostic pop
+# else
+# pragma warning(pop)
+# endif
+# endif
+
+ switch (osvi.dwPlatformId) {
+ case VER_PLATFORM_WIN32_NT:
+ // Test for the product.
+ if (osvi.dwMajorVersion <= 4) {
+ this->OSRelease = "NT";
+ }
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) {
+ this->OSRelease = "2000";
+ }
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) {
+ this->OSRelease = "XP";
+ }
+ // XP Professional x64
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) {
+ this->OSRelease = "XP";
+ }
+# ifdef VER_NT_WORKSTATION
+ // Test for product type.
+ if (bOsVersionInfoEx) {
+ if (osvi.wProductType == VER_NT_WORKSTATION) {
+ if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 0) {
+ this->OSRelease = "Vista";
+ }
+ if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 1) {
+ this->OSRelease = "7";
+ }
+// VER_SUITE_PERSONAL may not be defined
+# ifdef VER_SUITE_PERSONAL
+ else {
+ if (osvi.wSuiteMask & VER_SUITE_PERSONAL) {
+ this->OSRelease += " Personal";
+ } else {
+ this->OSRelease += " Professional";
+ }
+ }
+# endif
+ } else if (osvi.wProductType == VER_NT_SERVER) {
+ // Check for .NET Server instead of Windows XP.
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) {
+ this->OSRelease = ".NET";
+ }
+
+ // Continue with the type detection.
+ if (osvi.wSuiteMask & VER_SUITE_DATACENTER) {
+ this->OSRelease += " DataCenter Server";
+ } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) {
+ this->OSRelease += " Advanced Server";
+ } else {
+ this->OSRelease += " Server";
+ }
+ }
+
+ sprintf(operatingSystem, "%ls (Build %ld)", osvi.szCSDVersion,
+ osvi.dwBuildNumber & 0xFFFF);
+ this->OSVersion = operatingSystem;
+ } else
+# endif // VER_NT_WORKSTATION
+ {
+ HKEY hKey;
+ wchar_t szProductType[80];
+ DWORD dwBufLen;
+
+ // Query the registry to retrieve information.
+ RegOpenKeyExW(HKEY_LOCAL_MACHINE,
+ L"SYSTEM\\CurrentControlSet\\Control\\ProductOptions", 0,
+ KEY_QUERY_VALUE, &hKey);
+ RegQueryValueExW(hKey, L"ProductType", nullptr, nullptr,
+ (LPBYTE)szProductType, &dwBufLen);
+ RegCloseKey(hKey);
+
+ if (lstrcmpiW(L"WINNT", szProductType) == 0) {
+ this->OSRelease += " Professional";
+ }
+ if (lstrcmpiW(L"LANMANNT", szProductType) == 0) {
+ // Decide between Windows 2000 Advanced Server and Windows .NET
+ // Enterprise Server.
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) {
+ this->OSRelease += " Standard Server";
+ } else {
+ this->OSRelease += " Server";
+ }
+ }
+ if (lstrcmpiW(L"SERVERNT", szProductType) == 0) {
+ // Decide between Windows 2000 Advanced Server and Windows .NET
+ // Enterprise Server.
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) {
+ this->OSRelease += " Enterprise Server";
+ } else {
+ this->OSRelease += " Advanced Server";
+ }
+ }
+ }
+
+ // Display version, service pack (if any), and build number.
+ if (osvi.dwMajorVersion <= 4) {
+ // NB: NT 4.0 and earlier.
+ sprintf(operatingSystem, "version %ld.%ld %ls (Build %ld)",
+ osvi.dwMajorVersion, osvi.dwMinorVersion, osvi.szCSDVersion,
+ osvi.dwBuildNumber & 0xFFFF);
+ this->OSVersion = operatingSystem;
+ } else if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) {
+ // Windows XP and .NET server.
+ typedef BOOL(CALLBACK * LPFNPROC)(HANDLE, BOOL*);
+ HINSTANCE hKernelDLL;
+ LPFNPROC DLLProc;
+
+ // Load the Kernel32 DLL.
+ hKernelDLL = LoadLibraryW(L"kernel32");
+ if (hKernelDLL != nullptr) {
+ // Only XP and .NET Server support IsWOW64Process so... Load
+ // dynamically!
+ DLLProc = (LPFNPROC)GetProcAddress(hKernelDLL, "IsWow64Process");
+
+ // If the function address is valid, call the function.
+ if (DLLProc != nullptr)
+ (DLLProc)(GetCurrentProcess(), &bIsWindows64Bit);
+ else
+ bIsWindows64Bit = false;
+
+ // Free the DLL module.
+ FreeLibrary(hKernelDLL);
+ }
+ } else {
+ // Windows 2000 and everything else.
+ sprintf(operatingSystem, "%ls (Build %ld)", osvi.szCSDVersion,
+ osvi.dwBuildNumber & 0xFFFF);
+ this->OSVersion = operatingSystem;
+ }
+ break;
+
+ case VER_PLATFORM_WIN32_WINDOWS:
+ // Test for the product.
+ if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 0) {
+ this->OSRelease = "95";
+ if (osvi.szCSDVersion[1] == 'C') {
+ this->OSRelease += "OSR 2.5";
+ } else if (osvi.szCSDVersion[1] == 'B') {
+ this->OSRelease += "OSR 2";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 10) {
+ this->OSRelease = "98";
+ if (osvi.szCSDVersion[1] == 'A') {
+ this->OSRelease += "SE";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 90) {
+ this->OSRelease = "Me";
+ }
+ break;
+
+ case VER_PLATFORM_WIN32s:
+ this->OSRelease = "Win32s";
+ break;
+
+ default:
+ this->OSRelease = "Unknown";
+ break;
+ }
+
+ // Get the hostname
+ WORD wVersionRequested;
+ WSADATA wsaData;
+ char name[255];
+ wVersionRequested = MAKEWORD(2, 0);
+
+ if (WSAStartup(wVersionRequested, &wsaData) == 0) {
+ gethostname(name, sizeof(name));
+ WSACleanup();
+ }
+ this->Hostname = name;
+
+ const char* arch = getenv("PROCESSOR_ARCHITECTURE");
+ const char* wow64 = getenv("PROCESSOR_ARCHITEW6432");
+ if (arch) {
+ this->OSPlatform = arch;
+ }
+
+ if (wow64) {
+ // the PROCESSOR_ARCHITEW6432 is only defined when running 32bit programs
+ // on 64bit OS
+ this->OSIs64Bit = true;
+ } else if (arch) {
+ // all values other than x86 map to 64bit architectures
+ this->OSIs64Bit = (strncmp(arch, "x86", 3) != 0);
+ }
+
+#else
+
+ struct utsname unameInfo;
+ int errorFlag = uname(&unameInfo);
+ if (errorFlag == 0) {
+ this->OSName = unameInfo.sysname;
+ this->Hostname = unameInfo.nodename;
+ this->OSRelease = unameInfo.release;
+ this->OSVersion = unameInfo.version;
+ this->OSPlatform = unameInfo.machine;
+
+ // This is still insufficient to capture 64bit architecture such
+ // powerpc and possible mips and sparc
+ if (this->OSPlatform.find_first_of("64") != std::string::npos) {
+ this->OSIs64Bit = true;
+ }
+ }
+
+# ifdef __APPLE__
+ this->OSName = "Unknown Apple OS";
+ this->OSRelease = "Unknown product version";
+ this->OSVersion = "Unknown build version";
+
+ this->CallSwVers("-productName", this->OSName);
+ this->CallSwVers("-productVersion", this->OSRelease);
+ this->CallSwVers("-buildVersion", this->OSVersion);
+# endif
+
+#endif
+
+ return true;
+}
+
+int SystemInformationImplementation::CallSwVers(const char* arg,
+ std::string& ver)
+{
+#ifdef __APPLE__
+ std::vector<const char*> args;
+ args.push_back("sw_vers");
+ args.push_back(arg);
+ args.push_back(nullptr);
+ ver = this->RunProcess(args);
+ this->TrimNewline(ver);
+#else
+ // avoid C4100
+ (void)arg;
+ (void)ver;
+#endif
+ return 0;
+}
+
+void SystemInformationImplementation::TrimNewline(std::string& output)
+{
+ // remove \r
+ std::string::size_type pos = 0;
+ while ((pos = output.find("\r", pos)) != std::string::npos) {
+ output.erase(pos);
+ }
+
+ // remove \n
+ pos = 0;
+ while ((pos = output.find("\n", pos)) != std::string::npos) {
+ output.erase(pos);
+ }
+}
+
+/** Return true if the machine is 64 bits */
+bool SystemInformationImplementation::Is64Bits()
+{
+ return this->OSIs64Bit;
+}
+}
diff --git a/test/API/driver/kwsys/SystemInformation.hxx.in b/test/API/driver/kwsys/SystemInformation.hxx.in
new file mode 100644
index 0000000..fc42e9d
--- /dev/null
+++ b/test/API/driver/kwsys/SystemInformation.hxx.in
@@ -0,0 +1,170 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_SystemInformation_h
+#define @KWSYS_NAMESPACE@_SystemInformation_h
+
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <stddef.h> /* size_t */
+#include <string>
+
+namespace @KWSYS_NAMESPACE@ {
+
+// forward declare the implementation class
+class SystemInformationImplementation;
+
+class @KWSYS_NAMESPACE@_EXPORT SystemInformation
+{
+#if @KWSYS_USE_LONG_LONG@
+ typedef long long LongLong;
+#elif @KWSYS_USE___INT64@
+ typedef __int64 LongLong;
+#else
+# error "No Long Long"
+#endif
+ friend class SystemInformationImplementation;
+ SystemInformationImplementation* Implementation;
+
+public:
+ // possible parameter values for DoesCPUSupportFeature()
+ static const long int CPU_FEATURE_MMX = 1 << 0;
+ static const long int CPU_FEATURE_MMX_PLUS = 1 << 1;
+ static const long int CPU_FEATURE_SSE = 1 << 2;
+ static const long int CPU_FEATURE_SSE2 = 1 << 3;
+ static const long int CPU_FEATURE_AMD_3DNOW = 1 << 4;
+ static const long int CPU_FEATURE_AMD_3DNOW_PLUS = 1 << 5;
+ static const long int CPU_FEATURE_IA64 = 1 << 6;
+ static const long int CPU_FEATURE_MP_CAPABLE = 1 << 7;
+ static const long int CPU_FEATURE_HYPERTHREAD = 1 << 8;
+ static const long int CPU_FEATURE_SERIALNUMBER = 1 << 9;
+ static const long int CPU_FEATURE_APIC = 1 << 10;
+ static const long int CPU_FEATURE_SSE_FP = 1 << 11;
+ static const long int CPU_FEATURE_SSE_MMX = 1 << 12;
+ static const long int CPU_FEATURE_CMOV = 1 << 13;
+ static const long int CPU_FEATURE_MTRR = 1 << 14;
+ static const long int CPU_FEATURE_L1CACHE = 1 << 15;
+ static const long int CPU_FEATURE_L2CACHE = 1 << 16;
+ static const long int CPU_FEATURE_L3CACHE = 1 << 17;
+ static const long int CPU_FEATURE_ACPI = 1 << 18;
+ static const long int CPU_FEATURE_THERMALMONITOR = 1 << 19;
+ static const long int CPU_FEATURE_TEMPSENSEDIODE = 1 << 20;
+ static const long int CPU_FEATURE_FREQUENCYID = 1 << 21;
+ static const long int CPU_FEATURE_VOLTAGEID_FREQUENCY = 1 << 22;
+ static const long int CPU_FEATURE_FPU = 1 << 23;
+
+public:
+ SystemInformation();
+ ~SystemInformation();
+
+ SystemInformation(const SystemInformation&) = delete;
+ SystemInformation& operator=(const SystemInformation&) = delete;
+
+ const char* GetVendorString();
+ const char* GetVendorID();
+ std::string GetTypeID();
+ std::string GetFamilyID();
+ std::string GetModelID();
+ std::string GetModelName();
+ std::string GetSteppingCode();
+ const char* GetExtendedProcessorName();
+ const char* GetProcessorSerialNumber();
+ int GetProcessorCacheSize();
+ unsigned int GetLogicalProcessorsPerPhysical();
+ float GetProcessorClockFrequency();
+ int GetProcessorAPICID();
+ int GetProcessorCacheXSize(long int);
+ bool DoesCPUSupportFeature(long int);
+
+ // returns an informative general description of the cpu
+ // on this system.
+ std::string GetCPUDescription();
+
+ const char* GetHostname();
+ std::string GetFullyQualifiedDomainName();
+
+ const char* GetOSName();
+ const char* GetOSRelease();
+ const char* GetOSVersion();
+ const char* GetOSPlatform();
+
+ int GetOSIsWindows();
+ int GetOSIsLinux();
+ int GetOSIsApple();
+
+ // returns an informative general description of the os
+ // on this system.
+ std::string GetOSDescription();
+
+ // returns if the operating system is 64bit or not.
+ bool Is64Bits();
+
+ unsigned int GetNumberOfLogicalCPU();
+ unsigned int GetNumberOfPhysicalCPU();
+
+ bool DoesCPUSupportCPUID();
+
+ // Retrieve id of the current running process
+ LongLong GetProcessId();
+
+ // Retrieve memory information in MiB.
+ size_t GetTotalVirtualMemory();
+ size_t GetAvailableVirtualMemory();
+ size_t GetTotalPhysicalMemory();
+ size_t GetAvailablePhysicalMemory();
+
+ // returns an informative general description if the installed and
+ // available ram on this system. See the GetHostMemoryTotal, and
+ // Get{Host,Proc}MemoryAvailable methods for more information.
+ std::string GetMemoryDescription(const char* hostLimitEnvVarName = nullptr,
+ const char* procLimitEnvVarName = nullptr);
+
+ // Retrieve amount of physical memory installed on the system in KiB
+ // units.
+ LongLong GetHostMemoryTotal();
+
+ // Get total system RAM in units of KiB available colectivley to all
+ // processes in a process group. An example of a process group
+ // are the processes comprising an mpi program which is running in
+ // parallel. The amount of memory reported may differ from the host
+ // total if a host wide resource limit is applied. Such reource limits
+ // are reported to us via an application specified environment variable.
+ LongLong GetHostMemoryAvailable(const char* hostLimitEnvVarName = nullptr);
+
+ // Get total system RAM in units of KiB available to this process.
+ // This may differ from the host available if a per-process resource
+ // limit is applied. per-process memory limits are applied on unix
+ // system via rlimit API. Resource limits that are not imposed via
+ // rlimit API may be reported to us via an application specified
+ // environment variable.
+ LongLong GetProcMemoryAvailable(const char* hostLimitEnvVarName = nullptr,
+ const char* procLimitEnvVarName = nullptr);
+
+ // Get the system RAM used by all processes on the host, in units of KiB.
+ LongLong GetHostMemoryUsed();
+
+ // Get system RAM used by this process id in units of KiB.
+ LongLong GetProcMemoryUsed();
+
+ // Return the load average of the machine or -0.0 if it cannot
+ // be determined.
+ double GetLoadAverage();
+
+ // enable/disable stack trace signal handler. In order to
+ // produce an informative stack trace the application should
+ // be dynamically linked and compiled with debug symbols.
+ static void SetStackTraceOnError(int enable);
+
+ // format and return the current program stack in a string. In
+ // order to produce an informative stack trace the application
+ // should be dynamically linked and compiled with debug symbols.
+ static std::string GetProgramStack(int firstFrame, int wholePath);
+
+ /** Run the different checks */
+ void RunCPUCheck();
+ void RunOSCheck();
+ void RunMemoryCheck();
+};
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/SystemTools.cxx b/test/API/driver/kwsys/SystemTools.cxx
new file mode 100644
index 0000000..ce4d6ef
--- /dev/null
+++ b/test/API/driver/kwsys/SystemTools.cxx
@@ -0,0 +1,4703 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifdef __osf__
+# define _OSF_SOURCE
+# define _POSIX_C_SOURCE 199506L
+# define _XOPEN_SOURCE_EXTENDED
+#endif
+
+#if defined(_WIN32) && \
+ (defined(_MSC_VER) || defined(__WATCOMC__) || defined(__BORLANDC__) || \
+ defined(__MINGW32__))
+# define KWSYS_WINDOWS_DIRS
+#else
+# if defined(__SUNPRO_CC)
+# include <fcntl.h>
+# endif
+#endif
+
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(RegularExpression.hxx)
+#include KWSYS_HEADER(SystemTools.hxx)
+#include KWSYS_HEADER(Directory.hxx)
+#include KWSYS_HEADER(FStream.hxx)
+#include KWSYS_HEADER(Encoding.h)
+#include KWSYS_HEADER(Encoding.hxx)
+
+#include <fstream>
+#include <iostream>
+#include <set>
+#include <sstream>
+#include <utility>
+#include <vector>
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Directory.hxx.in"
+# include "Encoding.hxx.in"
+# include "FStream.hxx.in"
+# include "RegularExpression.hxx.in"
+# include "SystemTools.hxx.in"
+#endif
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4786)
+#endif
+
+#if defined(__sgi) && !defined(__GNUC__)
+# pragma set woff 1375 /* base class destructor not virtual */
+#endif
+
+#include <ctype.h>
+#include <errno.h>
+#ifdef __QNX__
+# include <malloc.h> /* for malloc/free on QNX */
+#endif
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#if defined(_WIN32) && !defined(_MSC_VER) && defined(__GNUC__)
+# include <strings.h> /* for strcasecmp */
+#endif
+
+#ifdef _MSC_VER
+# define umask _umask // Note this is still umask on Borland
+#endif
+
+// support for realpath call
+#ifndef _WIN32
+# include <limits.h>
+# include <pwd.h>
+# include <sys/ioctl.h>
+# include <sys/time.h>
+# include <sys/wait.h>
+# include <unistd.h>
+# include <utime.h>
+# ifndef __VMS
+# include <sys/param.h>
+# include <termios.h>
+# endif
+# include <signal.h> /* sigprocmask */
+#endif
+
+#ifdef __linux
+# include <linux/fs.h>
+#endif
+
+// Windows API.
+#if defined(_WIN32)
+# include <windows.h>
+# include <winioctl.h>
+# ifndef INVALID_FILE_ATTRIBUTES
+# define INVALID_FILE_ATTRIBUTES ((DWORD)-1)
+# endif
+# if defined(_MSC_VER) && _MSC_VER >= 1800
+# define KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# endif
+#elif defined(__CYGWIN__)
+# include <windows.h>
+# undef _WIN32
+#endif
+
+#if !KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H
+extern char** environ;
+#endif
+
+#ifdef __CYGWIN__
+# include <sys/cygwin.h>
+#endif
+
+// getpwnam doesn't exist on Windows and Cray Xt3/Catamount
+// same for TIOCGWINSZ
+#if defined(_WIN32) || defined(__LIBCATAMOUNT__) || \
+ (defined(HAVE_GETPWNAM) && HAVE_GETPWNAM == 0)
+# undef HAVE_GETPWNAM
+# undef HAVE_TTY_INFO
+#else
+# define HAVE_GETPWNAM 1
+# define HAVE_TTY_INFO 1
+#endif
+
+#define VTK_URL_PROTOCOL_REGEX "([a-zA-Z0-9]*)://(.*)"
+#define VTK_URL_REGEX \
+ "([a-zA-Z0-9]*)://(([A-Za-z0-9]+)(:([^:@]+))?@)?([^:@/]+)(:([0-9]+))?/" \
+ "(.+)?"
+
+#ifdef _MSC_VER
+# include <sys/utime.h>
+#else
+# include <utime.h>
+#endif
+
+// This is a hack to prevent warnings about these functions being
+// declared but not referenced.
+#if defined(__sgi) && !defined(__GNUC__)
+# include <sys/termios.h>
+namespace KWSYS_NAMESPACE {
+class SystemToolsHack
+{
+public:
+ enum
+ {
+ Ref1 = sizeof(cfgetospeed(0)),
+ Ref2 = sizeof(cfgetispeed(0)),
+ Ref3 = sizeof(tcgetattr(0, 0)),
+ Ref4 = sizeof(tcsetattr(0, 0, 0)),
+ Ref5 = sizeof(cfsetospeed(0, 0)),
+ Ref6 = sizeof(cfsetispeed(0, 0))
+ };
+};
+}
+#endif
+
+#if defined(_WIN32) && \
+ (defined(_MSC_VER) || defined(__WATCOMC__) || defined(__BORLANDC__) || \
+ defined(__MINGW32__))
+# include <direct.h>
+# include <io.h>
+# define _unlink unlink
+#endif
+
+/* The maximum length of a file name. */
+#if defined(PATH_MAX)
+# define KWSYS_SYSTEMTOOLS_MAXPATH PATH_MAX
+#elif defined(MAXPATHLEN)
+# define KWSYS_SYSTEMTOOLS_MAXPATH MAXPATHLEN
+#else
+# define KWSYS_SYSTEMTOOLS_MAXPATH 16384
+#endif
+#if defined(__WATCOMC__)
+# include <direct.h>
+# define _mkdir mkdir
+# define _rmdir rmdir
+# define _getcwd getcwd
+# define _chdir chdir
+#endif
+
+#if defined(__BEOS__) && !defined(__ZETA__)
+# include <be/kernel/OS.h>
+# include <be/storage/Path.h>
+
+// BeOS 5 doesn't have usleep(), but it has snooze(), which is identical.
+static inline void usleep(unsigned int msec)
+{
+ ::snooze(msec);
+}
+
+// BeOS 5 also doesn't have realpath(), but its C++ API offers something close.
+static inline char* realpath(const char* path, char* resolved_path)
+{
+ const size_t maxlen = KWSYS_SYSTEMTOOLS_MAXPATH;
+ snprintf(resolved_path, maxlen, "%s", path);
+ BPath normalized(resolved_path, nullptr, true);
+ const char* resolved = normalized.Path();
+ if (resolved != nullptr) // nullptr == No such file.
+ {
+ if (snprintf(resolved_path, maxlen, "%s", resolved) < maxlen) {
+ return resolved_path;
+ }
+ }
+ return nullptr; // something went wrong.
+}
+#endif
+
+#ifdef _WIN32
+static time_t windows_filetime_to_posix_time(const FILETIME& ft)
+{
+ LARGE_INTEGER date;
+ date.HighPart = ft.dwHighDateTime;
+ date.LowPart = ft.dwLowDateTime;
+
+ // removes the diff between 1970 and 1601
+ date.QuadPart -= ((LONGLONG)(369 * 365 + 89) * 24 * 3600 * 10000000);
+
+ // converts back from 100-nanoseconds to seconds
+ return date.QuadPart / 10000000;
+}
+#endif
+
+#ifdef KWSYS_WINDOWS_DIRS
+# include <wctype.h>
+
+inline int Mkdir(const std::string& dir)
+{
+ return _wmkdir(
+ KWSYS_NAMESPACE::Encoding::ToWindowsExtendedPath(dir).c_str());
+}
+inline int Rmdir(const std::string& dir)
+{
+ return _wrmdir(
+ KWSYS_NAMESPACE::Encoding::ToWindowsExtendedPath(dir).c_str());
+}
+inline const char* Getcwd(char* buf, unsigned int len)
+{
+ std::vector<wchar_t> w_buf(len);
+ if (_wgetcwd(&w_buf[0], len)) {
+ size_t nlen = kwsysEncoding_wcstombs(buf, &w_buf[0], len);
+ if (nlen == static_cast<size_t>(-1)) {
+ return 0;
+ }
+ if (nlen < len) {
+ // make sure the drive letter is capital
+ if (nlen > 1 && buf[1] == ':') {
+ buf[0] = toupper(buf[0]);
+ }
+ return buf;
+ }
+ }
+ return 0;
+}
+inline int Chdir(const std::string& dir)
+{
+# if defined(__BORLANDC__)
+ return chdir(dir.c_str());
+# else
+ return _wchdir(KWSYS_NAMESPACE::Encoding::ToWide(dir).c_str());
+# endif
+}
+inline void Realpath(const std::string& path, std::string& resolved_path,
+ std::string* errorMessage = 0)
+{
+ std::wstring tmp = KWSYS_NAMESPACE::Encoding::ToWide(path);
+ wchar_t* ptemp;
+ wchar_t fullpath[MAX_PATH];
+ DWORD bufferLen = GetFullPathNameW(
+ tmp.c_str(), sizeof(fullpath) / sizeof(fullpath[0]), fullpath, &ptemp);
+ if (bufferLen < sizeof(fullpath) / sizeof(fullpath[0])) {
+ resolved_path = KWSYS_NAMESPACE::Encoding::ToNarrow(fullpath);
+ KWSYS_NAMESPACE::SystemTools::ConvertToUnixSlashes(resolved_path);
+ } else if (errorMessage) {
+ if (bufferLen) {
+ *errorMessage = "Destination path buffer size too small.";
+ } else if (unsigned int errorId = GetLastError()) {
+ LPSTR message = nullptr;
+ DWORD size = FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ nullptr, errorId, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)&message, 0, nullptr);
+ *errorMessage = std::string(message, size);
+ LocalFree(message);
+ } else {
+ *errorMessage = "Unknown error.";
+ }
+
+ resolved_path = "";
+ } else {
+ resolved_path = path;
+ }
+}
+#else
+# include <sys/types.h>
+
+# include <fcntl.h>
+# include <unistd.h>
+inline int Mkdir(const std::string& dir)
+{
+ return mkdir(dir.c_str(), 00777);
+}
+inline int Rmdir(const std::string& dir)
+{
+ return rmdir(dir.c_str());
+}
+inline const char* Getcwd(char* buf, unsigned int len)
+{
+ return getcwd(buf, len);
+}
+
+inline int Chdir(const std::string& dir)
+{
+ return chdir(dir.c_str());
+}
+inline void Realpath(const std::string& path, std::string& resolved_path,
+ std::string* errorMessage = nullptr)
+{
+ char resolved_name[KWSYS_SYSTEMTOOLS_MAXPATH];
+
+ errno = 0;
+ char* ret = realpath(path.c_str(), resolved_name);
+ if (ret) {
+ resolved_path = ret;
+ } else if (errorMessage) {
+ if (errno) {
+ *errorMessage = strerror(errno);
+ } else {
+ *errorMessage = "Unknown error.";
+ }
+
+ resolved_path = "";
+ } else {
+ // if path resolution fails, return what was passed in
+ resolved_path = path;
+ }
+}
+#endif
+
+#if !defined(_WIN32) && defined(__COMO__)
+// Hack for como strict mode to avoid defining _SVID_SOURCE or _BSD_SOURCE.
+extern "C" {
+extern FILE* popen(__const char* __command, __const char* __modes) __THROW;
+extern int pclose(FILE* __stream) __THROW;
+extern char* realpath(__const char* __restrict __name,
+ char* __restrict __resolved) __THROW;
+extern char* strdup(__const char* __s) __THROW;
+extern int putenv(char* __string) __THROW;
+}
+#endif
+
+namespace KWSYS_NAMESPACE {
+
+double SystemTools::GetTime(void)
+{
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ FILETIME ft;
+ GetSystemTimeAsFileTime(&ft);
+ return (429.4967296 * ft.dwHighDateTime + 0.0000001 * ft.dwLowDateTime -
+ 11644473600.0);
+#else
+ struct timeval t;
+ gettimeofday(&t, nullptr);
+ return 1.0 * double(t.tv_sec) + 0.000001 * double(t.tv_usec);
+#endif
+}
+
+/* Type of character storing the environment. */
+#if defined(_WIN32)
+typedef wchar_t envchar;
+#else
+typedef char envchar;
+#endif
+
+/* Order by environment key only (VAR from VAR=VALUE). */
+struct kwsysEnvCompare
+{
+ bool operator()(const envchar* l, const envchar* r) const
+ {
+#if defined(_WIN32)
+ const wchar_t* leq = wcschr(l, L'=');
+ const wchar_t* req = wcschr(r, L'=');
+ size_t llen = leq ? (leq - l) : wcslen(l);
+ size_t rlen = req ? (req - r) : wcslen(r);
+ if (llen == rlen) {
+ return wcsncmp(l, r, llen) < 0;
+ } else {
+ return wcscmp(l, r) < 0;
+ }
+#else
+ const char* leq = strchr(l, '=');
+ const char* req = strchr(r, '=');
+ size_t llen = leq ? static_cast<size_t>(leq - l) : strlen(l);
+ size_t rlen = req ? static_cast<size_t>(req - r) : strlen(r);
+ if (llen == rlen) {
+ return strncmp(l, r, llen) < 0;
+ } else {
+ return strcmp(l, r) < 0;
+ }
+#endif
+ }
+};
+
+class kwsysEnvSet : public std::set<const envchar*, kwsysEnvCompare>
+{
+public:
+ class Free
+ {
+ const envchar* Env;
+
+ public:
+ Free(const envchar* env)
+ : Env(env)
+ {
+ }
+ ~Free() { free(const_cast<envchar*>(this->Env)); }
+
+ Free(const Free&) = delete;
+ Free& operator=(const Free&) = delete;
+ };
+
+ const envchar* Release(const envchar* env)
+ {
+ const envchar* old = nullptr;
+ iterator i = this->find(env);
+ if (i != this->end()) {
+ old = *i;
+ this->erase(i);
+ }
+ return old;
+ }
+};
+
+#ifdef _WIN32
+struct SystemToolsPathCaseCmp
+{
+ bool operator()(std::string const& l, std::string const& r) const
+ {
+# ifdef _MSC_VER
+ return _stricmp(l.c_str(), r.c_str()) < 0;
+# elif defined(__GNUC__)
+ return strcasecmp(l.c_str(), r.c_str()) < 0;
+# else
+ return SystemTools::Strucmp(l.c_str(), r.c_str()) < 0;
+# endif
+ }
+};
+#endif
+
+/**
+ * SystemTools static variables singleton class.
+ */
+class SystemToolsStatic
+{
+public:
+ typedef std::map<std::string, std::string> StringMap;
+#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP
+ /**
+ * Path translation table from dir to refdir
+ * Each time 'dir' will be found it will be replace by 'refdir'
+ */
+ StringMap TranslationMap;
+#endif
+#ifdef _WIN32
+ static std::string GetCasePathName(std::string const& pathIn);
+ static std::string GetActualCaseForPathCached(std::string const& path);
+ static const char* GetEnvBuffered(const char* key);
+ std::map<std::string, std::string, SystemToolsPathCaseCmp> PathCaseMap;
+ std::map<std::string, std::string> EnvMap;
+#endif
+#ifdef __CYGWIN__
+ StringMap Cyg2Win32Map;
+#endif
+
+ /**
+ * Actual implementation of ReplaceString.
+ */
+ static void ReplaceString(std::string& source, const char* replace,
+ size_t replaceSize, const std::string& with);
+
+ /**
+ * Actual implementation of FileIsFullPath.
+ */
+ static bool FileIsFullPath(const char*, size_t);
+
+ /**
+ * Find a filename (file or directory) in the system PATH, with
+ * optional extra paths.
+ */
+ static std::string FindName(
+ const std::string& name,
+ const std::vector<std::string>& path = std::vector<std::string>(),
+ bool no_system_path = false);
+};
+
+#ifdef _WIN32
+std::string SystemToolsStatic::GetCasePathName(std::string const& pathIn)
+{
+ std::string casePath;
+
+ // First check if the file is relative. We don't fix relative paths since the
+ // real case depends on the root directory and the given path fragment may
+ // have meaning elsewhere in the project.
+ if (!SystemTools::FileIsFullPath(pathIn)) {
+ // This looks unnecessary, but it allows for the return value optimization
+ // since all return paths return the same local variable.
+ casePath = pathIn;
+ return casePath;
+ }
+
+ std::vector<std::string> path_components;
+ SystemTools::SplitPath(pathIn, path_components);
+
+ // Start with root component.
+ std::vector<std::string>::size_type idx = 0;
+ casePath = path_components[idx++];
+ // make sure drive letter is always upper case
+ if (casePath.size() > 1 && casePath[1] == ':') {
+ casePath[0] = toupper(casePath[0]);
+ }
+ const char* sep = "";
+
+ // If network path, fill casePath with server/share so FindFirstFile
+ // will work after that. Maybe someday call other APIs to get
+ // actual case of servers and shares.
+ if (path_components.size() > 2 && path_components[0] == "//") {
+ casePath += path_components[idx++];
+ casePath += "/";
+ casePath += path_components[idx++];
+ sep = "/";
+ }
+
+ // Convert case of all components that exist.
+ bool converting = true;
+ for (; idx < path_components.size(); idx++) {
+ casePath += sep;
+ sep = "/";
+
+ if (converting) {
+ // If path component contains wildcards, we skip matching
+ // because these filenames are not allowed on windows,
+ // and we do not want to match a different file.
+ if (path_components[idx].find('*') != std::string::npos ||
+ path_components[idx].find('?') != std::string::npos) {
+ converting = false;
+ } else {
+ std::string test_str = casePath;
+ test_str += path_components[idx];
+ WIN32_FIND_DATAW findData;
+ HANDLE hFind =
+ ::FindFirstFileW(Encoding::ToWide(test_str).c_str(), &findData);
+ if (INVALID_HANDLE_VALUE != hFind) {
+ path_components[idx] = Encoding::ToNarrow(findData.cFileName);
+ ::FindClose(hFind);
+ } else {
+ converting = false;
+ }
+ }
+ }
+
+ casePath += path_components[idx];
+ }
+ return casePath;
+}
+
+std::string SystemToolsStatic::GetActualCaseForPathCached(std::string const& p)
+{
+ // Check to see if actual case has already been called
+ // for this path, and the result is stored in the PathCaseMap
+ auto& pcm = SystemTools::Statics->PathCaseMap;
+ {
+ auto itr = pcm.find(p);
+ if (itr != pcm.end()) {
+ return itr->second;
+ }
+ }
+ std::string casePath = SystemToolsStatic::GetCasePathName(p);
+ if (casePath.size() <= MAX_PATH) {
+ pcm[p] = casePath;
+ }
+ return casePath;
+}
+#endif
+
+// adds the elements of the env variable path to the arg passed in
+void SystemTools::GetPath(std::vector<std::string>& path, const char* env)
+{
+ size_t const old_size = path.size();
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ const char pathSep = ';';
+#else
+ const char pathSep = ':';
+#endif
+ if (!env) {
+ env = "PATH";
+ }
+ std::string pathEnv;
+ if (!SystemTools::GetEnv(env, pathEnv)) {
+ return;
+ }
+
+ // A hack to make the below algorithm work.
+ if (!pathEnv.empty() && pathEnv.back() != pathSep) {
+ pathEnv += pathSep;
+ }
+ std::string::size_type start = 0;
+ bool done = false;
+ while (!done) {
+ std::string::size_type endpos = pathEnv.find(pathSep, start);
+ if (endpos != std::string::npos) {
+ path.push_back(pathEnv.substr(start, endpos - start));
+ start = endpos + 1;
+ } else {
+ done = true;
+ }
+ }
+ for (std::vector<std::string>::iterator i = path.begin() + old_size;
+ i != path.end(); ++i) {
+ SystemTools::ConvertToUnixSlashes(*i);
+ }
+}
+
+#if defined(_WIN32)
+const char* SystemToolsStatic::GetEnvBuffered(const char* key)
+{
+ std::string env;
+ if (SystemTools::GetEnv(key, env)) {
+ std::string& menv = SystemTools::Statics->EnvMap[key];
+ if (menv != env) {
+ menv = std::move(env);
+ }
+ return menv.c_str();
+ }
+ return nullptr;
+}
+#endif
+
+const char* SystemTools::GetEnv(const char* key)
+{
+#if defined(_WIN32)
+ return SystemToolsStatic::GetEnvBuffered(key);
+#else
+ return getenv(key);
+#endif
+}
+
+const char* SystemTools::GetEnv(const std::string& key)
+{
+#if defined(_WIN32)
+ return SystemToolsStatic::GetEnvBuffered(key.c_str());
+#else
+ return getenv(key.c_str());
+#endif
+}
+
+bool SystemTools::GetEnv(const char* key, std::string& result)
+{
+#if defined(_WIN32)
+ const std::wstring wkey = Encoding::ToWide(key);
+ const wchar_t* wv = _wgetenv(wkey.c_str());
+ if (wv) {
+ result = Encoding::ToNarrow(wv);
+ return true;
+ }
+#else
+ const char* v = getenv(key);
+ if (v) {
+ result = v;
+ return true;
+ }
+#endif
+ return false;
+}
+
+bool SystemTools::GetEnv(const std::string& key, std::string& result)
+{
+ return SystemTools::GetEnv(key.c_str(), result);
+}
+
+bool SystemTools::HasEnv(const char* key)
+{
+#if defined(_WIN32)
+ const std::wstring wkey = Encoding::ToWide(key);
+ const wchar_t* v = _wgetenv(wkey.c_str());
+#else
+ const char* v = getenv(key);
+#endif
+ return v != nullptr;
+}
+
+bool SystemTools::HasEnv(const std::string& key)
+{
+ return SystemTools::HasEnv(key.c_str());
+}
+
+#if KWSYS_CXX_HAS_UNSETENV
+/* unsetenv("A") removes A from the environment.
+ On older platforms it returns void instead of int. */
+static int kwsysUnPutEnv(const std::string& env)
+{
+ size_t pos = env.find('=');
+ if (pos != std::string::npos) {
+ std::string name = env.substr(0, pos);
+ unsetenv(name.c_str());
+ } else {
+ unsetenv(env.c_str());
+ }
+ return 0;
+}
+
+#elif defined(__CYGWIN__) || defined(__GLIBC__)
+/* putenv("A") removes A from the environment. It must not put the
+ memory in the environment because it does not have any "=" syntax. */
+static int kwsysUnPutEnv(const std::string& env)
+{
+ int err = 0;
+ size_t pos = env.find('=');
+ size_t const len = pos == std::string::npos ? env.size() : pos;
+ size_t const sz = len + 1;
+ char local_buf[256];
+ char* buf = sz > sizeof(local_buf) ? (char*)malloc(sz) : local_buf;
+ if (!buf) {
+ return -1;
+ }
+ strncpy(buf, env.c_str(), len);
+ buf[len] = 0;
+ if (putenv(buf) < 0 && errno != EINVAL) {
+ err = errno;
+ }
+ if (buf != local_buf) {
+ free(buf);
+ }
+ if (err) {
+ errno = err;
+ return -1;
+ }
+ return 0;
+}
+
+#elif defined(_WIN32)
+/* putenv("A=") places "A=" in the environment, which is as close to
+ removal as we can get with the putenv API. We have to leak the
+ most recent value placed in the environment for each variable name
+ on program exit in case exit routines access it. */
+
+static kwsysEnvSet kwsysUnPutEnvSet;
+
+static int kwsysUnPutEnv(std::string const& env)
+{
+ std::wstring wEnv = Encoding::ToWide(env);
+ size_t const pos = wEnv.find('=');
+ size_t const len = pos == std::string::npos ? wEnv.size() : pos;
+ wEnv.resize(len + 1, L'=');
+ wchar_t* newEnv = _wcsdup(wEnv.c_str());
+ if (!newEnv) {
+ return -1;
+ }
+ kwsysEnvSet::Free oldEnv(kwsysUnPutEnvSet.Release(newEnv));
+ kwsysUnPutEnvSet.insert(newEnv);
+ return _wputenv(newEnv);
+}
+
+#else
+/* Manipulate the "environ" global directly. */
+static int kwsysUnPutEnv(const std::string& env)
+{
+ size_t pos = env.find('=');
+ size_t const len = pos == std::string::npos ? env.size() : pos;
+ int in = 0;
+ int out = 0;
+ while (environ[in]) {
+ if (strlen(environ[in]) > len && environ[in][len] == '=' &&
+ strncmp(env.c_str(), environ[in], len) == 0) {
+ ++in;
+ } else {
+ environ[out++] = environ[in++];
+ }
+ }
+ while (out < in) {
+ environ[out++] = 0;
+ }
+ return 0;
+}
+#endif
+
+#if KWSYS_CXX_HAS_SETENV
+
+/* setenv("A", "B", 1) will set A=B in the environment and makes its
+ own copies of the strings. */
+bool SystemTools::PutEnv(const std::string& env)
+{
+ size_t pos = env.find('=');
+ if (pos != std::string::npos) {
+ std::string name = env.substr(0, pos);
+ return setenv(name.c_str(), env.c_str() + pos + 1, 1) == 0;
+ } else {
+ return kwsysUnPutEnv(env) == 0;
+ }
+}
+
+bool SystemTools::UnPutEnv(const std::string& env)
+{
+ return kwsysUnPutEnv(env) == 0;
+}
+
+#else
+
+/* putenv("A=B") will set A=B in the environment. Most putenv implementations
+ put their argument directly in the environment. They never free the memory
+ on program exit. Keep an active set of pointers to memory we allocate and
+ pass to putenv, one per environment key. At program exit remove any
+ environment values that may still reference memory we allocated. Then free
+ the memory. This will not affect any environment values we never set. */
+
+# ifdef __INTEL_COMPILER
+# pragma warning disable 444 /* base has non-virtual destructor */
+# endif
+
+class kwsysEnv : public kwsysEnvSet
+{
+public:
+ ~kwsysEnv()
+ {
+ for (iterator i = this->begin(); i != this->end(); ++i) {
+# if defined(_WIN32)
+ const std::string s = Encoding::ToNarrow(*i);
+ kwsysUnPutEnv(s);
+# else
+ kwsysUnPutEnv(*i);
+# endif
+ free(const_cast<envchar*>(*i));
+ }
+ }
+ bool Put(const char* env)
+ {
+# if defined(_WIN32)
+ const std::wstring wEnv = Encoding::ToWide(env);
+ wchar_t* newEnv = _wcsdup(wEnv.c_str());
+# else
+ char* newEnv = strdup(env);
+# endif
+ Free oldEnv(this->Release(newEnv));
+ this->insert(newEnv);
+# if defined(_WIN32)
+ return _wputenv(newEnv) == 0;
+# else
+ return putenv(newEnv) == 0;
+# endif
+ }
+ bool UnPut(const char* env)
+ {
+# if defined(_WIN32)
+ const std::wstring wEnv = Encoding::ToWide(env);
+ Free oldEnv(this->Release(wEnv.c_str()));
+# else
+ Free oldEnv(this->Release(env));
+# endif
+ return kwsysUnPutEnv(env) == 0;
+ }
+};
+
+static kwsysEnv kwsysEnvInstance;
+
+bool SystemTools::PutEnv(const std::string& env)
+{
+ return kwsysEnvInstance.Put(env.c_str());
+}
+
+bool SystemTools::UnPutEnv(const std::string& env)
+{
+ return kwsysEnvInstance.UnPut(env.c_str());
+}
+
+#endif
+
+const char* SystemTools::GetExecutableExtension()
+{
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(__VMS)
+ return ".exe";
+#else
+ return "";
+#endif
+}
+
+FILE* SystemTools::Fopen(const std::string& file, const char* mode)
+{
+#ifdef _WIN32
+ return _wfopen(Encoding::ToWindowsExtendedPath(file).c_str(),
+ Encoding::ToWide(mode).c_str());
+#else
+ return fopen(file.c_str(), mode);
+#endif
+}
+
+bool SystemTools::MakeDirectory(const char* path, const mode_t* mode)
+{
+ if (!path) {
+ return false;
+ }
+ return SystemTools::MakeDirectory(std::string(path), mode);
+}
+
+bool SystemTools::MakeDirectory(const std::string& path, const mode_t* mode)
+{
+ if (SystemTools::PathExists(path)) {
+ return SystemTools::FileIsDirectory(path);
+ }
+ if (path.empty()) {
+ return false;
+ }
+ std::string dir = path;
+ SystemTools::ConvertToUnixSlashes(dir);
+
+ std::string::size_type pos = 0;
+ std::string topdir;
+ while ((pos = dir.find('/', pos)) != std::string::npos) {
+ topdir = dir.substr(0, pos);
+
+ if (Mkdir(topdir) == 0 && mode != nullptr) {
+ SystemTools::SetPermissions(topdir, *mode);
+ }
+
+ ++pos;
+ }
+ topdir = dir;
+ if (Mkdir(topdir) != 0) {
+ // There is a bug in the Borland Run time library which makes MKDIR
+ // return EACCES when it should return EEXISTS
+ // if it is some other error besides directory exists
+ // then return false
+ if ((errno != EEXIST)
+#ifdef __BORLANDC__
+ && (errno != EACCES)
+#endif
+ ) {
+ return false;
+ }
+ } else if (mode != nullptr) {
+ SystemTools::SetPermissions(topdir, *mode);
+ }
+
+ return true;
+}
+
+// replace replace with with as many times as it shows up in source.
+// write the result into source.
+void SystemTools::ReplaceString(std::string& source,
+ const std::string& replace,
+ const std::string& with)
+{
+ // do while hangs if replaceSize is 0
+ if (replace.empty()) {
+ return;
+ }
+
+ SystemToolsStatic::ReplaceString(source, replace.c_str(), replace.size(),
+ with);
+}
+
+void SystemTools::ReplaceString(std::string& source, const char* replace,
+ const char* with)
+{
+ // do while hangs if replaceSize is 0
+ if (!*replace) {
+ return;
+ }
+
+ SystemToolsStatic::ReplaceString(source, replace, strlen(replace),
+ with ? with : "");
+}
+
+void SystemToolsStatic::ReplaceString(std::string& source, const char* replace,
+ size_t replaceSize,
+ const std::string& with)
+{
+ const char* src = source.c_str();
+ char* searchPos = const_cast<char*>(strstr(src, replace));
+
+ // get out quick if string is not found
+ if (!searchPos) {
+ return;
+ }
+
+ // perform replacements until done
+ char* orig = strdup(src);
+ char* currentPos = orig;
+ searchPos = searchPos - src + orig;
+
+ // initialize the result
+ source.erase(source.begin(), source.end());
+ do {
+ *searchPos = '\0';
+ source += currentPos;
+ currentPos = searchPos + replaceSize;
+ // replace
+ source += with;
+ searchPos = strstr(currentPos, replace);
+ } while (searchPos);
+
+ // copy any trailing text
+ source += currentPos;
+ free(orig);
+}
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+
+# if defined(KEY_WOW64_32KEY) && defined(KEY_WOW64_64KEY)
+# define KWSYS_ST_KEY_WOW64_32KEY KEY_WOW64_32KEY
+# define KWSYS_ST_KEY_WOW64_64KEY KEY_WOW64_64KEY
+# else
+# define KWSYS_ST_KEY_WOW64_32KEY 0x0200
+# define KWSYS_ST_KEY_WOW64_64KEY 0x0100
+# endif
+
+static bool SystemToolsParseRegistryKey(const std::string& key,
+ HKEY& primaryKey, std::string& second,
+ std::string& valuename)
+{
+ std::string primary = key;
+
+ size_t start = primary.find('\\');
+ if (start == std::string::npos) {
+ return false;
+ }
+
+ size_t valuenamepos = primary.find(';');
+ if (valuenamepos != std::string::npos) {
+ valuename = primary.substr(valuenamepos + 1);
+ }
+
+ second = primary.substr(start + 1, valuenamepos - start - 1);
+ primary = primary.substr(0, start);
+
+ if (primary == "HKEY_CURRENT_USER") {
+ primaryKey = HKEY_CURRENT_USER;
+ }
+ if (primary == "HKEY_CURRENT_CONFIG") {
+ primaryKey = HKEY_CURRENT_CONFIG;
+ }
+ if (primary == "HKEY_CLASSES_ROOT") {
+ primaryKey = HKEY_CLASSES_ROOT;
+ }
+ if (primary == "HKEY_LOCAL_MACHINE") {
+ primaryKey = HKEY_LOCAL_MACHINE;
+ }
+ if (primary == "HKEY_USERS") {
+ primaryKey = HKEY_USERS;
+ }
+
+ return true;
+}
+
+static DWORD SystemToolsMakeRegistryMode(DWORD mode,
+ SystemTools::KeyWOW64 view)
+{
+ // only add the modes when on a system that supports Wow64.
+ static FARPROC wow64p =
+ GetProcAddress(GetModuleHandleW(L"kernel32"), "IsWow64Process");
+ if (wow64p == nullptr) {
+ return mode;
+ }
+
+ if (view == SystemTools::KeyWOW64_32) {
+ return mode | KWSYS_ST_KEY_WOW64_32KEY;
+ } else if (view == SystemTools::KeyWOW64_64) {
+ return mode | KWSYS_ST_KEY_WOW64_64KEY;
+ }
+ return mode;
+}
+#endif
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+bool SystemTools::GetRegistrySubKeys(const std::string& key,
+ std::vector<std::string>& subkeys,
+ KeyWOW64 view)
+{
+ HKEY primaryKey = HKEY_CURRENT_USER;
+ std::string second;
+ std::string valuename;
+ if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) {
+ return false;
+ }
+
+ HKEY hKey;
+ if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0,
+ SystemToolsMakeRegistryMode(KEY_READ, view),
+ &hKey) != ERROR_SUCCESS) {
+ return false;
+ } else {
+ wchar_t name[1024];
+ DWORD dwNameSize = sizeof(name) / sizeof(name[0]);
+
+ DWORD i = 0;
+ while (RegEnumKeyW(hKey, i, name, dwNameSize) == ERROR_SUCCESS) {
+ subkeys.push_back(Encoding::ToNarrow(name));
+ ++i;
+ }
+
+ RegCloseKey(hKey);
+ }
+
+ return true;
+}
+#else
+bool SystemTools::GetRegistrySubKeys(const std::string&,
+ std::vector<std::string>&, KeyWOW64)
+{
+ return false;
+}
+#endif
+
+// Read a registry value.
+// Example :
+// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath
+// => will return the data of the "default" value of the key
+// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root
+// => will return the data of the "Root" value of the key
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+bool SystemTools::ReadRegistryValue(const std::string& key, std::string& value,
+ KeyWOW64 view)
+{
+ bool valueset = false;
+ HKEY primaryKey = HKEY_CURRENT_USER;
+ std::string second;
+ std::string valuename;
+ if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) {
+ return false;
+ }
+
+ HKEY hKey;
+ if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0,
+ SystemToolsMakeRegistryMode(KEY_READ, view),
+ &hKey) != ERROR_SUCCESS) {
+ return false;
+ } else {
+ DWORD dwType, dwSize;
+ dwSize = 1023;
+ wchar_t data[1024];
+ if (RegQueryValueExW(hKey, Encoding::ToWide(valuename).c_str(), nullptr,
+ &dwType, (BYTE*)data, &dwSize) == ERROR_SUCCESS) {
+ if (dwType == REG_SZ) {
+ value = Encoding::ToNarrow(data);
+ valueset = true;
+ } else if (dwType == REG_EXPAND_SZ) {
+ wchar_t expanded[1024];
+ DWORD dwExpandedSize = sizeof(expanded) / sizeof(expanded[0]);
+ if (ExpandEnvironmentStringsW(data, expanded, dwExpandedSize)) {
+ value = Encoding::ToNarrow(expanded);
+ valueset = true;
+ }
+ }
+ }
+
+ RegCloseKey(hKey);
+ }
+
+ return valueset;
+}
+#else
+bool SystemTools::ReadRegistryValue(const std::string&, std::string&, KeyWOW64)
+{
+ return false;
+}
+#endif
+
+// Write a registry value.
+// Example :
+// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath
+// => will set the data of the "default" value of the key
+// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root
+// => will set the data of the "Root" value of the key
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+bool SystemTools::WriteRegistryValue(const std::string& key,
+ const std::string& value, KeyWOW64 view)
+{
+ HKEY primaryKey = HKEY_CURRENT_USER;
+ std::string second;
+ std::string valuename;
+ if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) {
+ return false;
+ }
+
+ HKEY hKey;
+ DWORD dwDummy;
+ wchar_t lpClass[] = L"";
+ if (RegCreateKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0, lpClass,
+ REG_OPTION_NON_VOLATILE,
+ SystemToolsMakeRegistryMode(KEY_WRITE, view), nullptr,
+ &hKey, &dwDummy) != ERROR_SUCCESS) {
+ return false;
+ }
+
+ std::wstring wvalue = Encoding::ToWide(value);
+ if (RegSetValueExW(hKey, Encoding::ToWide(valuename).c_str(), 0, REG_SZ,
+ (CONST BYTE*)wvalue.c_str(),
+ (DWORD)(sizeof(wchar_t) * (wvalue.size() + 1))) ==
+ ERROR_SUCCESS) {
+ return true;
+ }
+ return false;
+}
+#else
+bool SystemTools::WriteRegistryValue(const std::string&, const std::string&,
+ KeyWOW64)
+{
+ return false;
+}
+#endif
+
+// Delete a registry value.
+// Example :
+// HKEY_LOCAL_MACHINE\SOFTWARE\Python\PythonCore\2.1\InstallPath
+// => will delete the data of the "default" value of the key
+// HKEY_LOCAL_MACHINE\SOFTWARE\Scriptics\Tcl\8.4;Root
+// => will delete the data of the "Root" value of the key
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+bool SystemTools::DeleteRegistryValue(const std::string& key, KeyWOW64 view)
+{
+ HKEY primaryKey = HKEY_CURRENT_USER;
+ std::string second;
+ std::string valuename;
+ if (!SystemToolsParseRegistryKey(key, primaryKey, second, valuename)) {
+ return false;
+ }
+
+ HKEY hKey;
+ if (RegOpenKeyExW(primaryKey, Encoding::ToWide(second).c_str(), 0,
+ SystemToolsMakeRegistryMode(KEY_WRITE, view),
+ &hKey) != ERROR_SUCCESS) {
+ return false;
+ } else {
+ if (RegDeleteValue(hKey, (LPTSTR)valuename.c_str()) == ERROR_SUCCESS) {
+ RegCloseKey(hKey);
+ return true;
+ }
+ }
+ return false;
+}
+#else
+bool SystemTools::DeleteRegistryValue(const std::string&, KeyWOW64)
+{
+ return false;
+}
+#endif
+
+bool SystemTools::SameFile(const std::string& file1, const std::string& file2)
+{
+#ifdef _WIN32
+ HANDLE hFile1, hFile2;
+
+ hFile1 =
+ CreateFileW(Encoding::ToWide(file1).c_str(), GENERIC_READ, FILE_SHARE_READ,
+ nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr);
+ hFile2 =
+ CreateFileW(Encoding::ToWide(file2).c_str(), GENERIC_READ, FILE_SHARE_READ,
+ nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr);
+ if (hFile1 == INVALID_HANDLE_VALUE || hFile2 == INVALID_HANDLE_VALUE) {
+ if (hFile1 != INVALID_HANDLE_VALUE) {
+ CloseHandle(hFile1);
+ }
+ if (hFile2 != INVALID_HANDLE_VALUE) {
+ CloseHandle(hFile2);
+ }
+ return false;
+ }
+
+ BY_HANDLE_FILE_INFORMATION fiBuf1;
+ BY_HANDLE_FILE_INFORMATION fiBuf2;
+ GetFileInformationByHandle(hFile1, &fiBuf1);
+ GetFileInformationByHandle(hFile2, &fiBuf2);
+ CloseHandle(hFile1);
+ CloseHandle(hFile2);
+ return (fiBuf1.dwVolumeSerialNumber == fiBuf2.dwVolumeSerialNumber &&
+ fiBuf1.nFileIndexHigh == fiBuf2.nFileIndexHigh &&
+ fiBuf1.nFileIndexLow == fiBuf2.nFileIndexLow);
+#else
+ struct stat fileStat1, fileStat2;
+ if (stat(file1.c_str(), &fileStat1) == 0 &&
+ stat(file2.c_str(), &fileStat2) == 0) {
+ // see if the files are the same file
+ // check the device inode and size
+ if (memcmp(&fileStat2.st_dev, &fileStat1.st_dev,
+ sizeof(fileStat1.st_dev)) == 0 &&
+ memcmp(&fileStat2.st_ino, &fileStat1.st_ino,
+ sizeof(fileStat1.st_ino)) == 0 &&
+ fileStat2.st_size == fileStat1.st_size) {
+ return true;
+ }
+ }
+ return false;
+#endif
+}
+
+bool SystemTools::PathExists(const std::string& path)
+{
+ if (path.empty()) {
+ return false;
+ }
+#if defined(__CYGWIN__)
+ // Convert path to native windows path if possible.
+ char winpath[MAX_PATH];
+ if (SystemTools::PathCygwinToWin32(path.c_str(), winpath)) {
+ return (GetFileAttributesA(winpath) != INVALID_FILE_ATTRIBUTES);
+ }
+ struct stat st;
+ return lstat(path.c_str(), &st) == 0;
+#elif defined(_WIN32)
+ return (GetFileAttributesW(Encoding::ToWindowsExtendedPath(path).c_str()) !=
+ INVALID_FILE_ATTRIBUTES);
+#else
+ struct stat st;
+ return lstat(path.c_str(), &st) == 0;
+#endif
+}
+
+bool SystemTools::FileExists(const char* filename)
+{
+ if (!filename) {
+ return false;
+ }
+ return SystemTools::FileExists(std::string(filename));
+}
+
+bool SystemTools::FileExists(const std::string& filename)
+{
+ if (filename.empty()) {
+ return false;
+ }
+#if defined(__CYGWIN__)
+ // Convert filename to native windows path if possible.
+ char winpath[MAX_PATH];
+ if (SystemTools::PathCygwinToWin32(filename.c_str(), winpath)) {
+ return (GetFileAttributesA(winpath) != INVALID_FILE_ATTRIBUTES);
+ }
+ return access(filename.c_str(), R_OK) == 0;
+#elif defined(_WIN32)
+ DWORD attr =
+ GetFileAttributesW(Encoding::ToWindowsExtendedPath(filename).c_str());
+ if (attr == INVALID_FILE_ATTRIBUTES) {
+ return false;
+ }
+
+ if (attr & FILE_ATTRIBUTE_REPARSE_POINT) {
+ // Using 0 instead of GENERIC_READ as it allows reading of file attributes
+ // even if we do not have permission to read the file itself
+ HANDLE handle =
+ CreateFileW(Encoding::ToWindowsExtendedPath(filename).c_str(), 0, 0,
+ nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr);
+
+ if (handle == INVALID_HANDLE_VALUE) {
+ return false;
+ }
+
+ CloseHandle(handle);
+ }
+
+ return true;
+#else
+// SCO OpenServer 5.0.7/3.2's command has 711 permission.
+# if defined(_SCO_DS)
+ return access(filename.c_str(), F_OK) == 0;
+# else
+ return access(filename.c_str(), R_OK) == 0;
+# endif
+#endif
+}
+
+bool SystemTools::FileExists(const char* filename, bool isFile)
+{
+ if (!filename) {
+ return false;
+ }
+ return SystemTools::FileExists(std::string(filename), isFile);
+}
+
+bool SystemTools::FileExists(const std::string& filename, bool isFile)
+{
+ if (SystemTools::FileExists(filename)) {
+ // If isFile is set return not FileIsDirectory,
+ // so this will only be true if it is a file
+ return !isFile || !SystemTools::FileIsDirectory(filename);
+ }
+ return false;
+}
+
+bool SystemTools::TestFileAccess(const char* filename,
+ TestFilePermissions permissions)
+{
+ if (!filename) {
+ return false;
+ }
+ return SystemTools::TestFileAccess(std::string(filename), permissions);
+}
+
+bool SystemTools::TestFileAccess(const std::string& filename,
+ TestFilePermissions permissions)
+{
+ if (filename.empty()) {
+ return false;
+ }
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ // If execute set, change to read permission (all files on Windows
+ // are executable if they are readable). The CRT will always fail
+ // if you pass an execute bit.
+ if (permissions & TEST_FILE_EXECUTE) {
+ permissions &= ~TEST_FILE_EXECUTE;
+ permissions |= TEST_FILE_READ;
+ }
+ return _waccess(Encoding::ToWindowsExtendedPath(filename).c_str(),
+ permissions) == 0;
+#else
+ return access(filename.c_str(), permissions) == 0;
+#endif
+}
+
+int SystemTools::Stat(const char* path, SystemTools::Stat_t* buf)
+{
+ if (!path) {
+ errno = EFAULT;
+ return -1;
+ }
+ return SystemTools::Stat(std::string(path), buf);
+}
+
+int SystemTools::Stat(const std::string& path, SystemTools::Stat_t* buf)
+{
+ if (path.empty()) {
+ errno = ENOENT;
+ return -1;
+ }
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ // Ideally we should use Encoding::ToWindowsExtendedPath to support
+ // long paths, but _wstat64 rejects paths with '?' in them, thinking
+ // they are wildcards.
+ std::wstring const& wpath = Encoding::ToWide(path);
+# if defined(__BORLANDC__)
+ return _wstati64(wpath.c_str(), buf);
+# else
+ return _wstat64(wpath.c_str(), buf);
+# endif
+#else
+ return stat(path.c_str(), buf);
+#endif
+}
+
+#ifdef __CYGWIN__
+bool SystemTools::PathCygwinToWin32(const char* path, char* win32_path)
+{
+ auto itr = SystemTools::Statics->Cyg2Win32Map.find(path);
+ if (itr != SystemTools::Statics->Cyg2Win32Map.end()) {
+ strncpy(win32_path, itr->second.c_str(), MAX_PATH);
+ } else {
+ if (cygwin_conv_path(CCP_POSIX_TO_WIN_A, path, win32_path, MAX_PATH) !=
+ 0) {
+ win32_path[0] = 0;
+ }
+ SystemTools::Statics->Cyg2Win32Map.insert(
+ SystemToolsStatic::StringMap::value_type(path, win32_path));
+ }
+ return win32_path[0] != 0;
+}
+#endif
+
+bool SystemTools::Touch(const std::string& filename, bool create)
+{
+ if (!SystemTools::PathExists(filename)) {
+ if (create) {
+ FILE* file = Fopen(filename, "a+b");
+ if (file) {
+ fclose(file);
+ return true;
+ }
+ return false;
+ } else {
+ return true;
+ }
+ }
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ HANDLE h = CreateFileW(Encoding::ToWindowsExtendedPath(filename).c_str(),
+ FILE_WRITE_ATTRIBUTES, FILE_SHARE_WRITE, 0,
+ OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, 0);
+ if (!h) {
+ return false;
+ }
+ FILETIME mtime;
+ GetSystemTimeAsFileTime(&mtime);
+ if (!SetFileTime(h, 0, 0, &mtime)) {
+ CloseHandle(h);
+ return false;
+ }
+ CloseHandle(h);
+#elif KWSYS_CXX_HAS_UTIMENSAT
+ // utimensat is only available on newer Unixes and macOS 10.13+
+ if (utimensat(AT_FDCWD, filename.c_str(), nullptr, 0) < 0) {
+ return false;
+ }
+#else
+ // fall back to utimes
+ if (utimes(filename.c_str(), nullptr) < 0) {
+ return false;
+ }
+#endif
+ return true;
+}
+
+bool SystemTools::FileTimeCompare(const std::string& f1, const std::string& f2,
+ int* result)
+{
+ // Default to same time.
+ *result = 0;
+#if !defined(_WIN32) || defined(__CYGWIN__)
+ // POSIX version. Use stat function to get file modification time.
+ struct stat s1;
+ if (stat(f1.c_str(), &s1) != 0) {
+ return false;
+ }
+ struct stat s2;
+ if (stat(f2.c_str(), &s2) != 0) {
+ return false;
+ }
+# if KWSYS_CXX_STAT_HAS_ST_MTIM
+ // Compare using nanosecond resolution.
+ if (s1.st_mtim.tv_sec < s2.st_mtim.tv_sec) {
+ *result = -1;
+ } else if (s1.st_mtim.tv_sec > s2.st_mtim.tv_sec) {
+ *result = 1;
+ } else if (s1.st_mtim.tv_nsec < s2.st_mtim.tv_nsec) {
+ *result = -1;
+ } else if (s1.st_mtim.tv_nsec > s2.st_mtim.tv_nsec) {
+ *result = 1;
+ }
+# elif KWSYS_CXX_STAT_HAS_ST_MTIMESPEC
+ // Compare using nanosecond resolution.
+ if (s1.st_mtimespec.tv_sec < s2.st_mtimespec.tv_sec) {
+ *result = -1;
+ } else if (s1.st_mtimespec.tv_sec > s2.st_mtimespec.tv_sec) {
+ *result = 1;
+ } else if (s1.st_mtimespec.tv_nsec < s2.st_mtimespec.tv_nsec) {
+ *result = -1;
+ } else if (s1.st_mtimespec.tv_nsec > s2.st_mtimespec.tv_nsec) {
+ *result = 1;
+ }
+# else
+ // Compare using 1 second resolution.
+ if (s1.st_mtime < s2.st_mtime) {
+ *result = -1;
+ } else if (s1.st_mtime > s2.st_mtime) {
+ *result = 1;
+ }
+# endif
+#else
+ // Windows version. Get the modification time from extended file attributes.
+ WIN32_FILE_ATTRIBUTE_DATA f1d;
+ WIN32_FILE_ATTRIBUTE_DATA f2d;
+ if (!GetFileAttributesExW(Encoding::ToWindowsExtendedPath(f1).c_str(),
+ GetFileExInfoStandard, &f1d)) {
+ return false;
+ }
+ if (!GetFileAttributesExW(Encoding::ToWindowsExtendedPath(f2).c_str(),
+ GetFileExInfoStandard, &f2d)) {
+ return false;
+ }
+
+ // Compare the file times using resolution provided by system call.
+ *result = (int)CompareFileTime(&f1d.ftLastWriteTime, &f2d.ftLastWriteTime);
+#endif
+ return true;
+}
+
+// Return a capitalized string (i.e the first letter is uppercased, all other
+// are lowercased)
+std::string SystemTools::Capitalized(const std::string& s)
+{
+ std::string n;
+ if (s.empty()) {
+ return n;
+ }
+ n.resize(s.size());
+ n[0] = static_cast<std::string::value_type>(toupper(s[0]));
+ for (size_t i = 1; i < s.size(); i++) {
+ n[i] = static_cast<std::string::value_type>(tolower(s[i]));
+ }
+ return n;
+}
+
+// Return capitalized words
+std::string SystemTools::CapitalizedWords(const std::string& s)
+{
+ std::string n(s);
+ for (size_t i = 0; i < s.size(); i++) {
+#if defined(_MSC_VER) && defined(_MT) && defined(_DEBUG)
+ // MS has an assert that will fail if s[i] < 0; setting
+ // LC_CTYPE using setlocale() does *not* help. Painful.
+ if ((int)s[i] >= 0 && isalpha(s[i]) &&
+ (i == 0 || ((int)s[i - 1] >= 0 && isspace(s[i - 1]))))
+#else
+ if (isalpha(s[i]) && (i == 0 || isspace(s[i - 1])))
+#endif
+ {
+ n[i] = static_cast<std::string::value_type>(toupper(s[i]));
+ }
+ }
+ return n;
+}
+
+// Return uncapitalized words
+std::string SystemTools::UnCapitalizedWords(const std::string& s)
+{
+ std::string n(s);
+ for (size_t i = 0; i < s.size(); i++) {
+#if defined(_MSC_VER) && defined(_MT) && defined(_DEBUG)
+ // MS has an assert that will fail if s[i] < 0; setting
+ // LC_CTYPE using setlocale() does *not* help. Painful.
+ if ((int)s[i] >= 0 && isalpha(s[i]) &&
+ (i == 0 || ((int)s[i - 1] >= 0 && isspace(s[i - 1]))))
+#else
+ if (isalpha(s[i]) && (i == 0 || isspace(s[i - 1])))
+#endif
+ {
+ n[i] = static_cast<std::string::value_type>(tolower(s[i]));
+ }
+ }
+ return n;
+}
+
+// only works for words with at least two letters
+std::string SystemTools::AddSpaceBetweenCapitalizedWords(const std::string& s)
+{
+ std::string n;
+ if (!s.empty()) {
+ n.reserve(s.size());
+ n += s[0];
+ for (size_t i = 1; i < s.size(); i++) {
+ if (isupper(s[i]) && !isspace(s[i - 1]) && !isupper(s[i - 1])) {
+ n += ' ';
+ }
+ n += s[i];
+ }
+ }
+ return n;
+}
+
+char* SystemTools::AppendStrings(const char* str1, const char* str2)
+{
+ if (!str1) {
+ return SystemTools::DuplicateString(str2);
+ }
+ if (!str2) {
+ return SystemTools::DuplicateString(str1);
+ }
+ size_t len1 = strlen(str1);
+ char* newstr = new char[len1 + strlen(str2) + 1];
+ if (!newstr) {
+ return nullptr;
+ }
+ strcpy(newstr, str1);
+ strcat(newstr + len1, str2);
+ return newstr;
+}
+
+char* SystemTools::AppendStrings(const char* str1, const char* str2,
+ const char* str3)
+{
+ if (!str1) {
+ return SystemTools::AppendStrings(str2, str3);
+ }
+ if (!str2) {
+ return SystemTools::AppendStrings(str1, str3);
+ }
+ if (!str3) {
+ return SystemTools::AppendStrings(str1, str2);
+ }
+
+ size_t len1 = strlen(str1), len2 = strlen(str2);
+ char* newstr = new char[len1 + len2 + strlen(str3) + 1];
+ if (!newstr) {
+ return nullptr;
+ }
+ strcpy(newstr, str1);
+ strcat(newstr + len1, str2);
+ strcat(newstr + len1 + len2, str3);
+ return newstr;
+}
+
+// Return a lower case string
+std::string SystemTools::LowerCase(const std::string& s)
+{
+ std::string n;
+ n.resize(s.size());
+ for (size_t i = 0; i < s.size(); i++) {
+ n[i] = static_cast<std::string::value_type>(tolower(s[i]));
+ }
+ return n;
+}
+
+// Return a lower case string
+std::string SystemTools::UpperCase(const std::string& s)
+{
+ std::string n;
+ n.resize(s.size());
+ for (size_t i = 0; i < s.size(); i++) {
+ n[i] = static_cast<std::string::value_type>(toupper(s[i]));
+ }
+ return n;
+}
+
+// Count char in string
+size_t SystemTools::CountChar(const char* str, char c)
+{
+ size_t count = 0;
+
+ if (str) {
+ while (*str) {
+ if (*str == c) {
+ ++count;
+ }
+ ++str;
+ }
+ }
+ return count;
+}
+
+// Remove chars in string
+char* SystemTools::RemoveChars(const char* str, const char* toremove)
+{
+ if (!str) {
+ return nullptr;
+ }
+ char* clean_str = new char[strlen(str) + 1];
+ char* ptr = clean_str;
+ while (*str) {
+ const char* str2 = toremove;
+ while (*str2 && *str != *str2) {
+ ++str2;
+ }
+ if (!*str2) {
+ *ptr++ = *str;
+ }
+ ++str;
+ }
+ *ptr = '\0';
+ return clean_str;
+}
+
+// Remove chars in string
+char* SystemTools::RemoveCharsButUpperHex(const char* str)
+{
+ if (!str) {
+ return nullptr;
+ }
+ char* clean_str = new char[strlen(str) + 1];
+ char* ptr = clean_str;
+ while (*str) {
+ if ((*str >= '0' && *str <= '9') || (*str >= 'A' && *str <= 'F')) {
+ *ptr++ = *str;
+ }
+ ++str;
+ }
+ *ptr = '\0';
+ return clean_str;
+}
+
+// Replace chars in string
+char* SystemTools::ReplaceChars(char* str, const char* toreplace,
+ char replacement)
+{
+ if (str) {
+ char* ptr = str;
+ while (*ptr) {
+ const char* ptr2 = toreplace;
+ while (*ptr2) {
+ if (*ptr == *ptr2) {
+ *ptr = replacement;
+ }
+ ++ptr2;
+ }
+ ++ptr;
+ }
+ }
+ return str;
+}
+
+// Returns if string starts with another string
+bool SystemTools::StringStartsWith(const char* str1, const char* str2)
+{
+ if (!str1 || !str2) {
+ return false;
+ }
+ size_t len1 = strlen(str1), len2 = strlen(str2);
+ return len1 >= len2 && !strncmp(str1, str2, len2) ? true : false;
+}
+
+// Returns if string starts with another string
+bool SystemTools::StringStartsWith(const std::string& str1, const char* str2)
+{
+ if (!str2) {
+ return false;
+ }
+ size_t len1 = str1.size(), len2 = strlen(str2);
+ return len1 >= len2 && !strncmp(str1.c_str(), str2, len2) ? true : false;
+}
+
+// Returns if string ends with another string
+bool SystemTools::StringEndsWith(const char* str1, const char* str2)
+{
+ if (!str1 || !str2) {
+ return false;
+ }
+ size_t len1 = strlen(str1), len2 = strlen(str2);
+ return len1 >= len2 && !strncmp(str1 + (len1 - len2), str2, len2) ? true
+ : false;
+}
+
+// Returns if string ends with another string
+bool SystemTools::StringEndsWith(const std::string& str1, const char* str2)
+{
+ if (!str2) {
+ return false;
+ }
+ size_t len1 = str1.size(), len2 = strlen(str2);
+ return len1 >= len2 && !strncmp(str1.c_str() + (len1 - len2), str2, len2)
+ ? true
+ : false;
+}
+
+// Returns a pointer to the last occurrence of str2 in str1
+const char* SystemTools::FindLastString(const char* str1, const char* str2)
+{
+ if (!str1 || !str2) {
+ return nullptr;
+ }
+
+ size_t len1 = strlen(str1), len2 = strlen(str2);
+ if (len1 >= len2) {
+ const char* ptr = str1 + len1 - len2;
+ do {
+ if (!strncmp(ptr, str2, len2)) {
+ return ptr;
+ }
+ } while (ptr-- != str1);
+ }
+
+ return nullptr;
+}
+
+// Duplicate string
+char* SystemTools::DuplicateString(const char* str)
+{
+ if (str) {
+ char* newstr = new char[strlen(str) + 1];
+ return strcpy(newstr, str);
+ }
+ return nullptr;
+}
+
+// Return a cropped string
+std::string SystemTools::CropString(const std::string& s, size_t max_len)
+{
+ if (!s.size() || max_len == 0 || max_len >= s.size()) {
+ return s;
+ }
+
+ std::string n;
+ n.reserve(max_len);
+
+ size_t middle = max_len / 2;
+
+ n += s.substr(0, middle);
+ n += s.substr(s.size() - (max_len - middle));
+
+ if (max_len > 2) {
+ n[middle] = '.';
+ if (max_len > 3) {
+ n[middle - 1] = '.';
+ if (max_len > 4) {
+ n[middle + 1] = '.';
+ }
+ }
+ }
+
+ return n;
+}
+
+std::vector<std::string> SystemTools::SplitString(const std::string& p,
+ char sep, bool isPath)
+{
+ std::string path = p;
+ std::vector<std::string> paths;
+ if (path.empty()) {
+ return paths;
+ }
+ if (isPath && path[0] == '/') {
+ path.erase(path.begin());
+ paths.push_back("/");
+ }
+ std::string::size_type pos1 = 0;
+ std::string::size_type pos2 = path.find(sep, pos1 + 1);
+ while (pos2 != std::string::npos) {
+ paths.push_back(path.substr(pos1, pos2 - pos1));
+ pos1 = pos2 + 1;
+ pos2 = path.find(sep, pos1 + 1);
+ }
+ paths.push_back(path.substr(pos1, pos2 - pos1));
+
+ return paths;
+}
+
+int SystemTools::EstimateFormatLength(const char* format, va_list ap)
+{
+ if (!format) {
+ return 0;
+ }
+
+ // Quick-hack attempt at estimating the length of the string.
+ // Should never under-estimate.
+
+ // Start with the length of the format string itself.
+
+ size_t length = strlen(format);
+
+ // Increase the length for every argument in the format.
+
+ const char* cur = format;
+ while (*cur) {
+ if (*cur++ == '%') {
+ // Skip "%%" since it doesn't correspond to a va_arg.
+ if (*cur != '%') {
+ while (!int(isalpha(*cur))) {
+ ++cur;
+ }
+ switch (*cur) {
+ case 's': {
+ // Check the length of the string.
+ char* s = va_arg(ap, char*);
+ if (s) {
+ length += strlen(s);
+ }
+ } break;
+ case 'e':
+ case 'f':
+ case 'g': {
+ // Assume the argument contributes no more than 64 characters.
+ length += 64;
+
+ // Eat the argument.
+ static_cast<void>(va_arg(ap, double));
+ } break;
+ default: {
+ // Assume the argument contributes no more than 64 characters.
+ length += 64;
+
+ // Eat the argument.
+ static_cast<void>(va_arg(ap, int));
+ } break;
+ }
+ }
+
+ // Move past the characters just tested.
+ ++cur;
+ }
+ }
+
+ return static_cast<int>(length);
+}
+
+std::string SystemTools::EscapeChars(const char* str,
+ const char* chars_to_escape,
+ char escape_char)
+{
+ std::string n;
+ if (str) {
+ if (!chars_to_escape || !*chars_to_escape) {
+ n.append(str);
+ } else {
+ n.reserve(strlen(str));
+ while (*str) {
+ const char* ptr = chars_to_escape;
+ while (*ptr) {
+ if (*str == *ptr) {
+ n += escape_char;
+ break;
+ }
+ ++ptr;
+ }
+ n += *str;
+ ++str;
+ }
+ }
+ }
+ return n;
+}
+
+#ifdef __VMS
+static void ConvertVMSToUnix(std::string& path)
+{
+ std::string::size_type rootEnd = path.find(":[");
+ std::string::size_type pathEnd = path.find("]");
+ if (rootEnd != std::string::npos) {
+ std::string root = path.substr(0, rootEnd);
+ std::string pathPart = path.substr(rootEnd + 2, pathEnd - rootEnd - 2);
+ const char* pathCString = pathPart.c_str();
+ const char* pos0 = pathCString;
+ for (std::string::size_type pos = 0; *pos0; ++pos) {
+ if (*pos0 == '.') {
+ pathPart[pos] = '/';
+ }
+ pos0++;
+ }
+ path = "/" + root + "/" + pathPart;
+ }
+}
+#endif
+
+// convert windows slashes to unix slashes
+void SystemTools::ConvertToUnixSlashes(std::string& path)
+{
+ if (path.empty()) {
+ return;
+ }
+
+ const char* pathCString = path.c_str();
+ bool hasDoubleSlash = false;
+#ifdef __VMS
+ ConvertVMSToUnix(path);
+#else
+ const char* pos0 = pathCString;
+ for (std::string::size_type pos = 0; *pos0; ++pos) {
+ if (*pos0 == '\\') {
+ path[pos] = '/';
+ }
+
+ // Also, reuse the loop to check for slash followed by another slash
+ if (!hasDoubleSlash && *(pos0 + 1) == '/' && *(pos0 + 2) == '/') {
+# ifdef _WIN32
+ // However, on windows if the first characters are both slashes,
+ // then keep them that way, so that network paths can be handled.
+ if (pos > 0) {
+ hasDoubleSlash = true;
+ }
+# else
+ hasDoubleSlash = true;
+# endif
+ }
+
+ pos0++;
+ }
+
+ if (hasDoubleSlash) {
+ SystemTools::ReplaceString(path, "//", "/");
+ }
+#endif
+
+ // remove any trailing slash
+ // if there is a tilda ~ then replace it with HOME
+ pathCString = path.c_str();
+ if (pathCString[0] == '~' &&
+ (pathCString[1] == '/' || pathCString[1] == '\0')) {
+ std::string homeEnv;
+ if (SystemTools::GetEnv("HOME", homeEnv)) {
+ path.replace(0, 1, homeEnv);
+ }
+ }
+#ifdef HAVE_GETPWNAM
+ else if (pathCString[0] == '~') {
+ std::string::size_type idx = path.find_first_of("/\0");
+ std::string user = path.substr(1, idx - 1);
+ passwd* pw = getpwnam(user.c_str());
+ if (pw) {
+ path.replace(0, idx, pw->pw_dir);
+ }
+ }
+#endif
+ // remove trailing slash if the path is more than
+ // a single /
+ pathCString = path.c_str();
+ size_t size = path.size();
+ if (size > 1 && path.back() == '/') {
+ // if it is c:/ then do not remove the trailing slash
+ if (!((size == 3 && pathCString[1] == ':'))) {
+ path.resize(size - 1);
+ }
+ }
+}
+
+#ifdef _WIN32
+std::wstring SystemTools::ConvertToWindowsExtendedPath(
+ const std::string& source)
+{
+ return Encoding::ToWindowsExtendedPath(source);
+}
+#endif
+
+// change // to /, and escape any spaces in the path
+std::string SystemTools::ConvertToUnixOutputPath(const std::string& path)
+{
+ std::string ret = path;
+
+ // remove // except at the beginning might be a cygwin drive
+ std::string::size_type pos = 1;
+ while ((pos = ret.find("//", pos)) != std::string::npos) {
+ ret.erase(pos, 1);
+ }
+ // escape spaces and () in the path
+ if (ret.find_first_of(" ") != std::string::npos) {
+ std::string result;
+ char lastch = 1;
+ for (const char* ch = ret.c_str(); *ch != '\0'; ++ch) {
+ // if it is already escaped then don't try to escape it again
+ if ((*ch == ' ') && lastch != '\\') {
+ result += '\\';
+ }
+ result += *ch;
+ lastch = *ch;
+ }
+ ret = result;
+ }
+ return ret;
+}
+
+std::string SystemTools::ConvertToOutputPath(const std::string& path)
+{
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ return SystemTools::ConvertToWindowsOutputPath(path);
+#else
+ return SystemTools::ConvertToUnixOutputPath(path);
+#endif
+}
+
+// remove double slashes not at the start
+std::string SystemTools::ConvertToWindowsOutputPath(const std::string& path)
+{
+ std::string ret;
+ // make it big enough for all of path and double quotes
+ ret.reserve(path.size() + 3);
+ // put path into the string
+ ret = path;
+ std::string::size_type pos = 0;
+ // first convert all of the slashes
+ while ((pos = ret.find('/', pos)) != std::string::npos) {
+ ret[pos] = '\\';
+ pos++;
+ }
+ // check for really small paths
+ if (ret.size() < 2) {
+ return ret;
+ }
+ // now clean up a bit and remove double slashes
+ // Only if it is not the first position in the path which is a network
+ // path on windows
+ pos = 1; // start at position 1
+ if (ret[0] == '\"') {
+ pos = 2; // if the string is already quoted then start at 2
+ if (ret.size() < 3) {
+ return ret;
+ }
+ }
+ while ((pos = ret.find("\\\\", pos)) != std::string::npos) {
+ ret.erase(pos, 1);
+ }
+ // now double quote the path if it has spaces in it
+ // and is not already double quoted
+ if (ret.find(' ') != std::string::npos && ret[0] != '\"') {
+ ret.insert(static_cast<std::string::size_type>(0),
+ static_cast<std::string::size_type>(1), '\"');
+ ret.append(1, '\"');
+ }
+ return ret;
+}
+
+/**
+ * Append the filename from the path source to the directory name dir.
+ */
+static std::string FileInDir(const std::string& source, const std::string& dir)
+{
+ std::string new_destination = dir;
+ SystemTools::ConvertToUnixSlashes(new_destination);
+ return new_destination + '/' + SystemTools::GetFilenameName(source);
+}
+
+bool SystemTools::CopyFileIfDifferent(const std::string& source,
+ const std::string& destination)
+{
+ // special check for a destination that is a directory
+ // FilesDiffer does not handle file to directory compare
+ if (SystemTools::FileIsDirectory(destination)) {
+ const std::string new_destination = FileInDir(source, destination);
+ return SystemTools::CopyFileIfDifferent(source, new_destination);
+ }
+ // source and destination are files so do a copy if they
+ // are different
+ if (SystemTools::FilesDiffer(source, destination)) {
+ return SystemTools::CopyFileAlways(source, destination);
+ }
+ // at this point the files must be the same so return true
+ return true;
+}
+
+#define KWSYS_ST_BUFFER 4096
+
+bool SystemTools::FilesDiffer(const std::string& source,
+ const std::string& destination)
+{
+
+#if defined(_WIN32)
+ WIN32_FILE_ATTRIBUTE_DATA statSource;
+ if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(source).c_str(),
+ GetFileExInfoStandard, &statSource) == 0) {
+ return true;
+ }
+
+ WIN32_FILE_ATTRIBUTE_DATA statDestination;
+ if (GetFileAttributesExW(
+ Encoding::ToWindowsExtendedPath(destination).c_str(),
+ GetFileExInfoStandard, &statDestination) == 0) {
+ return true;
+ }
+
+ if (statSource.nFileSizeHigh != statDestination.nFileSizeHigh ||
+ statSource.nFileSizeLow != statDestination.nFileSizeLow) {
+ return true;
+ }
+
+ if (statSource.nFileSizeHigh == 0 && statSource.nFileSizeLow == 0) {
+ return false;
+ }
+ off_t nleft =
+ ((__int64)statSource.nFileSizeHigh << 32) + statSource.nFileSizeLow;
+
+#else
+
+ struct stat statSource;
+ if (stat(source.c_str(), &statSource) != 0) {
+ return true;
+ }
+
+ struct stat statDestination;
+ if (stat(destination.c_str(), &statDestination) != 0) {
+ return true;
+ }
+
+ if (statSource.st_size != statDestination.st_size) {
+ return true;
+ }
+
+ if (statSource.st_size == 0) {
+ return false;
+ }
+ off_t nleft = statSource.st_size;
+#endif
+
+#if defined(_WIN32)
+ kwsys::ifstream finSource(source.c_str(), (std::ios::binary | std::ios::in));
+ kwsys::ifstream finDestination(destination.c_str(),
+ (std::ios::binary | std::ios::in));
+#else
+ kwsys::ifstream finSource(source.c_str());
+ kwsys::ifstream finDestination(destination.c_str());
+#endif
+ if (!finSource || !finDestination) {
+ return true;
+ }
+
+ // Compare the files a block at a time.
+ char source_buf[KWSYS_ST_BUFFER];
+ char dest_buf[KWSYS_ST_BUFFER];
+ while (nleft > 0) {
+ // Read a block from each file.
+ std::streamsize nnext = (nleft > KWSYS_ST_BUFFER)
+ ? KWSYS_ST_BUFFER
+ : static_cast<std::streamsize>(nleft);
+ finSource.read(source_buf, nnext);
+ finDestination.read(dest_buf, nnext);
+
+ // If either failed to read assume they are different.
+ if (static_cast<std::streamsize>(finSource.gcount()) != nnext ||
+ static_cast<std::streamsize>(finDestination.gcount()) != nnext) {
+ return true;
+ }
+
+ // If this block differs the file differs.
+ if (memcmp(static_cast<const void*>(source_buf),
+ static_cast<const void*>(dest_buf),
+ static_cast<size_t>(nnext)) != 0) {
+ return true;
+ }
+
+ // Update the byte count remaining.
+ nleft -= nnext;
+ }
+
+ // No differences found.
+ return false;
+}
+
+bool SystemTools::TextFilesDiffer(const std::string& path1,
+ const std::string& path2)
+{
+ kwsys::ifstream if1(path1.c_str());
+ kwsys::ifstream if2(path2.c_str());
+ if (!if1 || !if2) {
+ return true;
+ }
+
+ for (;;) {
+ std::string line1, line2;
+ bool hasData1 = GetLineFromStream(if1, line1);
+ bool hasData2 = GetLineFromStream(if2, line2);
+ if (hasData1 != hasData2) {
+ return true;
+ }
+ if (!hasData1) {
+ break;
+ }
+ if (line1 != line2) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * Blockwise copy source to destination file
+ */
+static bool CopyFileContentBlockwise(const std::string& source,
+ const std::string& destination)
+{
+// Open files
+#if defined(_WIN32)
+ kwsys::ifstream fin(
+ Encoding::ToNarrow(Encoding::ToWindowsExtendedPath(source)).c_str(),
+ std::ios::in | std::ios::binary);
+#else
+ kwsys::ifstream fin(source.c_str(), std::ios::in | std::ios::binary);
+#endif
+ if (!fin) {
+ return false;
+ }
+
+ // try and remove the destination file so that read only destination files
+ // can be written to.
+ // If the remove fails continue so that files in read only directories
+ // that do not allow file removal can be modified.
+ SystemTools::RemoveFile(destination);
+
+#if defined(_WIN32)
+ kwsys::ofstream fout(
+ Encoding::ToNarrow(Encoding::ToWindowsExtendedPath(destination)).c_str(),
+ std::ios::out | std::ios::trunc | std::ios::binary);
+#else
+ kwsys::ofstream fout(destination.c_str(),
+ std::ios::out | std::ios::trunc | std::ios::binary);
+#endif
+ if (!fout) {
+ return false;
+ }
+
+ // This copy loop is very sensitive on certain platforms with
+ // slightly broken stream libraries (like HPUX). Normally, it is
+ // incorrect to not check the error condition on the fin.read()
+ // before using the data, but the fin.gcount() will be zero if an
+ // error occurred. Therefore, the loop should be safe everywhere.
+ while (fin) {
+ const int bufferSize = 4096;
+ char buffer[bufferSize];
+
+ fin.read(buffer, bufferSize);
+ if (fin.gcount()) {
+ fout.write(buffer, fin.gcount());
+ } else {
+ break;
+ }
+ }
+
+ // Make sure the operating system has finished writing the file
+ // before closing it. This will ensure the file is finished before
+ // the check below.
+ fout.flush();
+
+ fin.close();
+ fout.close();
+
+ if (!fout) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Clone the source file to the destination file
+ *
+ * If available, the Linux FICLONE ioctl is used to create a check
+ * copy-on-write clone of the source file.
+ *
+ * The method returns false for the following cases:
+ * - The code has not been compiled on Linux or the ioctl was unknown
+ * - The source and destination is on different file systems
+ * - The underlying filesystem does not support file cloning
+ * - An unspecified error occurred
+ */
+static bool CloneFileContent(const std::string& source,
+ const std::string& destination)
+{
+#if defined(__linux) && defined(FICLONE)
+ int in = open(source.c_str(), O_RDONLY);
+ if (in < 0) {
+ return false;
+ }
+
+ SystemTools::RemoveFile(destination);
+
+ int out =
+ open(destination.c_str(), O_WRONLY | O_CREAT | O_TRUNC, S_IRUSR | S_IWUSR);
+ if (out < 0) {
+ close(in);
+ return false;
+ }
+
+ int result = ioctl(out, FICLONE, in);
+ close(in);
+ close(out);
+
+ if (result < 0) {
+ return false;
+ }
+
+ return true;
+#else
+ (void)source;
+ (void)destination;
+ return false;
+#endif
+}
+
+/**
+ * Copy a file named by "source" to the file named by "destination".
+ */
+bool SystemTools::CopyFileAlways(const std::string& source,
+ const std::string& destination)
+{
+ mode_t perm = 0;
+ bool perms = SystemTools::GetPermissions(source, perm);
+ std::string real_destination = destination;
+
+ if (SystemTools::FileIsDirectory(source)) {
+ SystemTools::MakeDirectory(destination);
+ } else {
+ // If destination is a directory, try to create a file with the same
+ // name as the source in that directory.
+
+ std::string destination_dir;
+ if (SystemTools::FileIsDirectory(destination)) {
+ destination_dir = real_destination;
+ SystemTools::ConvertToUnixSlashes(real_destination);
+ real_destination += '/';
+ std::string source_name = source;
+ real_destination += SystemTools::GetFilenameName(source_name);
+ } else {
+ destination_dir = SystemTools::GetFilenamePath(destination);
+ }
+ // If files are the same do not copy
+ if (SystemTools::SameFile(source, real_destination)) {
+ return true;
+ }
+
+ // Create destination directory
+
+ SystemTools::MakeDirectory(destination_dir);
+
+ if (!CloneFileContent(source, real_destination)) {
+ // if cloning did not succeed, fall back to blockwise copy
+ if (!CopyFileContentBlockwise(source, real_destination)) {
+ return false;
+ }
+ }
+ }
+ if (perms) {
+ if (!SystemTools::SetPermissions(real_destination, perm)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool SystemTools::CopyAFile(const std::string& source,
+ const std::string& destination, bool always)
+{
+ if (always) {
+ return SystemTools::CopyFileAlways(source, destination);
+ } else {
+ return SystemTools::CopyFileIfDifferent(source, destination);
+ }
+}
+
+/**
+ * Copy a directory content from "source" directory to the directory named by
+ * "destination".
+ */
+bool SystemTools::CopyADirectory(const std::string& source,
+ const std::string& destination, bool always)
+{
+ Directory dir;
+ if (dir.Load(source) == 0) {
+ return false;
+ }
+ size_t fileNum;
+ if (!SystemTools::MakeDirectory(destination)) {
+ return false;
+ }
+ for (fileNum = 0; fileNum < dir.GetNumberOfFiles(); ++fileNum) {
+ if (strcmp(dir.GetFile(static_cast<unsigned long>(fileNum)), ".") &&
+ strcmp(dir.GetFile(static_cast<unsigned long>(fileNum)), "..")) {
+ std::string fullPath = source;
+ fullPath += "/";
+ fullPath += dir.GetFile(static_cast<unsigned long>(fileNum));
+ if (SystemTools::FileIsDirectory(fullPath)) {
+ std::string fullDestPath = destination;
+ fullDestPath += "/";
+ fullDestPath += dir.GetFile(static_cast<unsigned long>(fileNum));
+ if (!SystemTools::CopyADirectory(fullPath, fullDestPath, always)) {
+ return false;
+ }
+ } else {
+ if (!SystemTools::CopyAFile(fullPath, destination, always)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+// return size of file; also returns zero if no file exists
+unsigned long SystemTools::FileLength(const std::string& filename)
+{
+ unsigned long length = 0;
+#ifdef _WIN32
+ WIN32_FILE_ATTRIBUTE_DATA fs;
+ if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(),
+ GetFileExInfoStandard, &fs) != 0) {
+ /* To support the full 64-bit file size, use fs.nFileSizeHigh
+ * and fs.nFileSizeLow to construct the 64 bit size
+
+ length = ((__int64)fs.nFileSizeHigh << 32) + fs.nFileSizeLow;
+ */
+ length = static_cast<unsigned long>(fs.nFileSizeLow);
+ }
+#else
+ struct stat fs;
+ if (stat(filename.c_str(), &fs) == 0) {
+ length = static_cast<unsigned long>(fs.st_size);
+ }
+#endif
+ return length;
+}
+
+int SystemTools::Strucmp(const char* l, const char* r)
+{
+ int lc;
+ int rc;
+ do {
+ lc = tolower(*l++);
+ rc = tolower(*r++);
+ } while (lc == rc && lc);
+ return lc - rc;
+}
+
+// return file's modified time
+long int SystemTools::ModifiedTime(const std::string& filename)
+{
+ long int mt = 0;
+#ifdef _WIN32
+ WIN32_FILE_ATTRIBUTE_DATA fs;
+ if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(),
+ GetFileExInfoStandard, &fs) != 0) {
+ mt = windows_filetime_to_posix_time(fs.ftLastWriteTime);
+ }
+#else
+ struct stat fs;
+ if (stat(filename.c_str(), &fs) == 0) {
+ mt = static_cast<long int>(fs.st_mtime);
+ }
+#endif
+ return mt;
+}
+
+// return file's creation time
+long int SystemTools::CreationTime(const std::string& filename)
+{
+ long int ct = 0;
+#ifdef _WIN32
+ WIN32_FILE_ATTRIBUTE_DATA fs;
+ if (GetFileAttributesExW(Encoding::ToWindowsExtendedPath(filename).c_str(),
+ GetFileExInfoStandard, &fs) != 0) {
+ ct = windows_filetime_to_posix_time(fs.ftCreationTime);
+ }
+#else
+ struct stat fs;
+ if (stat(filename.c_str(), &fs) == 0) {
+ ct = fs.st_ctime >= 0 ? static_cast<long int>(fs.st_ctime) : 0;
+ }
+#endif
+ return ct;
+}
+
+std::string SystemTools::GetLastSystemError()
+{
+ int e = errno;
+ return strerror(e);
+}
+
+bool SystemTools::RemoveFile(const std::string& source)
+{
+#ifdef _WIN32
+ std::wstring const& ws = Encoding::ToWindowsExtendedPath(source);
+ if (DeleteFileW(ws.c_str())) {
+ return true;
+ }
+ DWORD err = GetLastError();
+ if (err == ERROR_FILE_NOT_FOUND || err == ERROR_PATH_NOT_FOUND) {
+ return true;
+ }
+ if (err != ERROR_ACCESS_DENIED) {
+ return false;
+ }
+ /* The file may be read-only. Try adding write permission. */
+ mode_t mode;
+ if (!SystemTools::GetPermissions(source, mode) ||
+ !SystemTools::SetPermissions(source, S_IWRITE)) {
+ SetLastError(err);
+ return false;
+ }
+
+ const DWORD DIRECTORY_SOFT_LINK_ATTRS =
+ FILE_ATTRIBUTE_DIRECTORY | FILE_ATTRIBUTE_REPARSE_POINT;
+ DWORD attrs = GetFileAttributesW(ws.c_str());
+ if (attrs != INVALID_FILE_ATTRIBUTES &&
+ (attrs & DIRECTORY_SOFT_LINK_ATTRS) == DIRECTORY_SOFT_LINK_ATTRS &&
+ RemoveDirectoryW(ws.c_str())) {
+ return true;
+ }
+ if (DeleteFileW(ws.c_str()) || GetLastError() == ERROR_FILE_NOT_FOUND ||
+ GetLastError() == ERROR_PATH_NOT_FOUND) {
+ return true;
+ }
+ /* Try to restore the original permissions. */
+ SystemTools::SetPermissions(source, mode);
+ SetLastError(err);
+ return false;
+#else
+ return unlink(source.c_str()) == 0 || errno == ENOENT;
+#endif
+}
+
+bool SystemTools::RemoveADirectory(const std::string& source)
+{
+ // Add write permission to the directory so we can modify its
+ // content to remove files and directories from it.
+ mode_t mode;
+ if (SystemTools::GetPermissions(source, mode)) {
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ mode |= S_IWRITE;
+#else
+ mode |= S_IWUSR;
+#endif
+ SystemTools::SetPermissions(source, mode);
+ }
+
+ Directory dir;
+ dir.Load(source);
+ size_t fileNum;
+ for (fileNum = 0; fileNum < dir.GetNumberOfFiles(); ++fileNum) {
+ if (strcmp(dir.GetFile(static_cast<unsigned long>(fileNum)), ".") &&
+ strcmp(dir.GetFile(static_cast<unsigned long>(fileNum)), "..")) {
+ std::string fullPath = source;
+ fullPath += "/";
+ fullPath += dir.GetFile(static_cast<unsigned long>(fileNum));
+ if (SystemTools::FileIsDirectory(fullPath) &&
+ !SystemTools::FileIsSymlink(fullPath)) {
+ if (!SystemTools::RemoveADirectory(fullPath)) {
+ return false;
+ }
+ } else {
+ if (!SystemTools::RemoveFile(fullPath)) {
+ return false;
+ }
+ }
+ }
+ }
+
+ return (Rmdir(source) == 0);
+}
+
+/**
+ */
+size_t SystemTools::GetMaximumFilePathLength()
+{
+ return KWSYS_SYSTEMTOOLS_MAXPATH;
+}
+
+/**
+ * Find the file the given name. Searches the given path and then
+ * the system search path. Returns the full path to the file if it is
+ * found. Otherwise, the empty string is returned.
+ */
+std::string SystemToolsStatic::FindName(
+ const std::string& name, const std::vector<std::string>& userPaths,
+ bool no_system_path)
+{
+ // Add the system search path to our path first
+ std::vector<std::string> path;
+ if (!no_system_path) {
+ SystemTools::GetPath(path, "CMAKE_FILE_PATH");
+ SystemTools::GetPath(path);
+ }
+ // now add the additional paths
+ path.reserve(path.size() + userPaths.size());
+ path.insert(path.end(), userPaths.begin(), userPaths.end());
+ // now look for the file
+ std::string tryPath;
+ for (std::string const& p : path) {
+ tryPath = p;
+ if (tryPath.empty() || tryPath.back() != '/') {
+ tryPath += '/';
+ }
+ tryPath += name;
+ if (SystemTools::FileExists(tryPath)) {
+ return tryPath;
+ }
+ }
+ // Couldn't find the file.
+ return "";
+}
+
+/**
+ * Find the file the given name. Searches the given path and then
+ * the system search path. Returns the full path to the file if it is
+ * found. Otherwise, the empty string is returned.
+ */
+std::string SystemTools::FindFile(const std::string& name,
+ const std::vector<std::string>& userPaths,
+ bool no_system_path)
+{
+ std::string tryPath =
+ SystemToolsStatic::FindName(name, userPaths, no_system_path);
+ if (!tryPath.empty() && !SystemTools::FileIsDirectory(tryPath)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ // Couldn't find the file.
+ return "";
+}
+
+/**
+ * Find the directory the given name. Searches the given path and then
+ * the system search path. Returns the full path to the directory if it is
+ * found. Otherwise, the empty string is returned.
+ */
+std::string SystemTools::FindDirectory(
+ const std::string& name, const std::vector<std::string>& userPaths,
+ bool no_system_path)
+{
+ std::string tryPath =
+ SystemToolsStatic::FindName(name, userPaths, no_system_path);
+ if (!tryPath.empty() && SystemTools::FileIsDirectory(tryPath)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ // Couldn't find the file.
+ return "";
+}
+
+/**
+ * Find the executable with the given name. Searches the given path and then
+ * the system search path. Returns the full path to the executable if it is
+ * found. Otherwise, the empty string is returned.
+ */
+std::string SystemTools::FindProgram(const char* nameIn,
+ const std::vector<std::string>& userPaths,
+ bool no_system_path)
+{
+ if (!nameIn || !*nameIn) {
+ return "";
+ }
+ return SystemTools::FindProgram(std::string(nameIn), userPaths,
+ no_system_path);
+}
+
+std::string SystemTools::FindProgram(const std::string& name,
+ const std::vector<std::string>& userPaths,
+ bool no_system_path)
+{
+ std::string tryPath;
+
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(__MINGW32__)
+ std::vector<std::string> extensions;
+ // check to see if the name already has a .xxx at
+ // the end of it
+ // on windows try .com then .exe
+ if (name.size() <= 3 || name[name.size() - 4] != '.') {
+ extensions.emplace_back(".com");
+ extensions.emplace_back(".exe");
+
+ // first try with extensions if the os supports them
+ for (std::string const& ext : extensions) {
+ tryPath = name;
+ tryPath += ext;
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ }
+ }
+#endif
+
+ // now try just the name
+ if (SystemTools::FileExists(name, true)) {
+ return SystemTools::CollapseFullPath(name);
+ }
+ // now construct the path
+ std::vector<std::string> path;
+ // Add the system search path to our path.
+ if (!no_system_path) {
+ SystemTools::GetPath(path);
+ }
+ // now add the additional paths
+ path.reserve(path.size() + userPaths.size());
+ path.insert(path.end(), userPaths.begin(), userPaths.end());
+ // Add a trailing slash to all paths to aid the search process.
+ for (std::string& p : path) {
+ if (p.empty() || p.back() != '/') {
+ p += '/';
+ }
+ }
+ // Try each path
+ for (std::string& p : path) {
+#ifdef _WIN32
+ // Remove double quotes from the path on windows
+ SystemTools::ReplaceString(p, "\"", "");
+#endif
+#if defined(_WIN32) || defined(__CYGWIN__) || defined(__MINGW32__)
+ // first try with extensions
+ for (std::string const& ext : extensions) {
+ tryPath = p;
+ tryPath += name;
+ tryPath += ext;
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ }
+#endif
+ // now try it without them
+ tryPath = p;
+ tryPath += name;
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ }
+ // Couldn't find the program.
+ return "";
+}
+
+std::string SystemTools::FindProgram(const std::vector<std::string>& names,
+ const std::vector<std::string>& path,
+ bool noSystemPath)
+{
+ for (std::string const& name : names) {
+ // Try to find the program.
+ std::string result = SystemTools::FindProgram(name, path, noSystemPath);
+ if (!result.empty()) {
+ return result;
+ }
+ }
+ return "";
+}
+
+/**
+ * Find the library with the given name. Searches the given path and then
+ * the system search path. Returns the full path to the library if it is
+ * found. Otherwise, the empty string is returned.
+ */
+std::string SystemTools::FindLibrary(const std::string& name,
+ const std::vector<std::string>& userPaths)
+{
+ // See if the executable exists as written.
+ if (SystemTools::FileExists(name, true)) {
+ return SystemTools::CollapseFullPath(name);
+ }
+
+ // Add the system search path to our path.
+ std::vector<std::string> path;
+ SystemTools::GetPath(path);
+ // now add the additional paths
+ path.reserve(path.size() + userPaths.size());
+ path.insert(path.end(), userPaths.begin(), userPaths.end());
+ // Add a trailing slash to all paths to aid the search process.
+ for (std::string& p : path) {
+ if (p.empty() || p.back() != '/') {
+ p += '/';
+ }
+ }
+ std::string tryPath;
+ for (std::string const& p : path) {
+#if defined(__APPLE__)
+ tryPath = p;
+ tryPath += name;
+ tryPath += ".framework";
+ if (SystemTools::FileIsDirectory(tryPath)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+#endif
+#if defined(_WIN32) && !defined(__CYGWIN__) && !defined(__MINGW32__)
+ tryPath = p;
+ tryPath += name;
+ tryPath += ".lib";
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+#else
+ tryPath = p;
+ tryPath += "lib";
+ tryPath += name;
+ tryPath += ".so";
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ tryPath = p;
+ tryPath += "lib";
+ tryPath += name;
+ tryPath += ".a";
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ tryPath = p;
+ tryPath += "lib";
+ tryPath += name;
+ tryPath += ".sl";
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ tryPath = p;
+ tryPath += "lib";
+ tryPath += name;
+ tryPath += ".dylib";
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+ tryPath = p;
+ tryPath += "lib";
+ tryPath += name;
+ tryPath += ".dll";
+ if (SystemTools::FileExists(tryPath, true)) {
+ return SystemTools::CollapseFullPath(tryPath);
+ }
+#endif
+ }
+
+ // Couldn't find the library.
+ return "";
+}
+
+std::string SystemTools::GetRealPath(const std::string& path,
+ std::string* errorMessage)
+{
+ std::string ret;
+ Realpath(path, ret, errorMessage);
+ return ret;
+}
+
+bool SystemTools::FileIsDirectory(const std::string& inName)
+{
+ if (inName.empty()) {
+ return false;
+ }
+ size_t length = inName.size();
+ const char* name = inName.c_str();
+
+ // Remove any trailing slash from the name except in a root component.
+ char local_buffer[KWSYS_SYSTEMTOOLS_MAXPATH];
+ std::string string_buffer;
+ size_t last = length - 1;
+ if (last > 0 && (name[last] == '/' || name[last] == '\\') &&
+ strcmp(name, "/") != 0 && name[last - 1] != ':') {
+ if (last < sizeof(local_buffer)) {
+ memcpy(local_buffer, name, last);
+ local_buffer[last] = '\0';
+ name = local_buffer;
+ } else {
+ string_buffer.append(name, last);
+ name = string_buffer.c_str();
+ }
+ }
+
+// Now check the file node type.
+#if defined(_WIN32)
+ DWORD attr =
+ GetFileAttributesW(Encoding::ToWindowsExtendedPath(name).c_str());
+ if (attr != INVALID_FILE_ATTRIBUTES) {
+ return (attr & FILE_ATTRIBUTE_DIRECTORY) != 0;
+#else
+ struct stat fs;
+ if (stat(name, &fs) == 0) {
+ return S_ISDIR(fs.st_mode);
+#endif
+ } else {
+ return false;
+ }
+}
+
+bool SystemTools::FileIsSymlink(const std::string& name)
+{
+#if defined(_WIN32)
+ std::wstring path = Encoding::ToWindowsExtendedPath(name);
+ DWORD attr = GetFileAttributesW(path.c_str());
+ if (attr != INVALID_FILE_ATTRIBUTES) {
+ if ((attr & FILE_ATTRIBUTE_REPARSE_POINT) != 0) {
+ // FILE_ATTRIBUTE_REPARSE_POINT means:
+ // * a file or directory that has an associated reparse point, or
+ // * a file that is a symbolic link.
+ HANDLE hFile = CreateFileW(
+ path.c_str(), GENERIC_READ, FILE_SHARE_READ, nullptr, OPEN_EXISTING,
+ FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, nullptr);
+ if (hFile == INVALID_HANDLE_VALUE) {
+ return false;
+ }
+ byte buffer[MAXIMUM_REPARSE_DATA_BUFFER_SIZE];
+ DWORD bytesReturned = 0;
+ if (!DeviceIoControl(hFile, FSCTL_GET_REPARSE_POINT, nullptr, 0, buffer,
+ MAXIMUM_REPARSE_DATA_BUFFER_SIZE, &bytesReturned,
+ nullptr)) {
+ CloseHandle(hFile);
+ // Since FILE_ATTRIBUTE_REPARSE_POINT is set this file must be
+ // a symbolic link if it is not a reparse point.
+ return GetLastError() == ERROR_NOT_A_REPARSE_POINT;
+ }
+ CloseHandle(hFile);
+ ULONG reparseTag =
+ reinterpret_cast<PREPARSE_GUID_DATA_BUFFER>(&buffer[0])->ReparseTag;
+ return (reparseTag == IO_REPARSE_TAG_SYMLINK) ||
+ (reparseTag == IO_REPARSE_TAG_MOUNT_POINT);
+ }
+ return false;
+ } else {
+ return false;
+ }
+#else
+ struct stat fs;
+ if (lstat(name.c_str(), &fs) == 0) {
+ return S_ISLNK(fs.st_mode);
+ } else {
+ return false;
+ }
+#endif
+}
+
+bool SystemTools::FileIsFIFO(const std::string& name)
+{
+#if defined(_WIN32)
+ HANDLE hFile =
+ CreateFileW(Encoding::ToWide(name).c_str(), GENERIC_READ, FILE_SHARE_READ,
+ nullptr, OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, nullptr);
+ if (hFile == INVALID_HANDLE_VALUE) {
+ return false;
+ }
+ const DWORD type = GetFileType(hFile);
+ CloseHandle(hFile);
+ return type == FILE_TYPE_PIPE;
+#else
+ struct stat fs;
+ if (lstat(name.c_str(), &fs) == 0) {
+ return S_ISFIFO(fs.st_mode);
+ } else {
+ return false;
+ }
+#endif
+}
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+bool SystemTools::CreateSymlink(const std::string&, const std::string&)
+{
+ return false;
+}
+#else
+bool SystemTools::CreateSymlink(const std::string& origName,
+ const std::string& newName)
+{
+ return symlink(origName.c_str(), newName.c_str()) >= 0;
+}
+#endif
+
+#if defined(_WIN32) && !defined(__CYGWIN__)
+bool SystemTools::ReadSymlink(const std::string&, std::string&)
+{
+ return false;
+}
+#else
+bool SystemTools::ReadSymlink(const std::string& newName,
+ std::string& origName)
+{
+ char buf[KWSYS_SYSTEMTOOLS_MAXPATH + 1];
+ int count = static_cast<int>(
+ readlink(newName.c_str(), buf, KWSYS_SYSTEMTOOLS_MAXPATH));
+ if (count >= 0) {
+ // Add null-terminator.
+ buf[count] = 0;
+ origName = buf;
+ return true;
+ } else {
+ return false;
+ }
+}
+#endif
+
+int SystemTools::ChangeDirectory(const std::string& dir)
+{
+ return Chdir(dir);
+}
+
+std::string SystemTools::GetCurrentWorkingDirectory(bool collapse)
+{
+ char buf[2048];
+ const char* cwd = Getcwd(buf, 2048);
+ std::string path;
+ if (cwd) {
+ path = cwd;
+ }
+ if (collapse) {
+ return SystemTools::CollapseFullPath(path);
+ }
+ return path;
+}
+
+std::string SystemTools::GetProgramPath(const std::string& in_name)
+{
+ std::string dir, file;
+ SystemTools::SplitProgramPath(in_name, dir, file);
+ return dir;
+}
+
+bool SystemTools::SplitProgramPath(const std::string& in_name,
+ std::string& dir, std::string& file, bool)
+{
+ dir = in_name;
+ file = "";
+ SystemTools::ConvertToUnixSlashes(dir);
+
+ if (!SystemTools::FileIsDirectory(dir)) {
+ std::string::size_type slashPos = dir.rfind("/");
+ if (slashPos != std::string::npos) {
+ file = dir.substr(slashPos + 1);
+ dir = dir.substr(0, slashPos);
+ } else {
+ file = dir;
+ dir = "";
+ }
+ }
+ if (!(dir.empty()) && !SystemTools::FileIsDirectory(dir)) {
+ std::string oldDir = in_name;
+ SystemTools::ConvertToUnixSlashes(oldDir);
+ dir = in_name;
+ return false;
+ }
+ return true;
+}
+
+bool SystemTools::FindProgramPath(const char* argv0, std::string& pathOut,
+ std::string& errorMsg, const char* exeName,
+ const char* buildDir,
+ const char* installPrefix)
+{
+ std::vector<std::string> failures;
+ std::string self = argv0 ? argv0 : "";
+ failures.push_back(self);
+ SystemTools::ConvertToUnixSlashes(self);
+ self = SystemTools::FindProgram(self);
+ if (!SystemTools::FileExists(self)) {
+ if (buildDir) {
+ std::string intdir = ".";
+#ifdef CMAKE_INTDIR
+ intdir = CMAKE_INTDIR;
+#endif
+ self = buildDir;
+ self += "/bin/";
+ self += intdir;
+ self += "/";
+ self += exeName;
+ self += SystemTools::GetExecutableExtension();
+ }
+ }
+ if (installPrefix) {
+ if (!SystemTools::FileExists(self)) {
+ failures.push_back(self);
+ self = installPrefix;
+ self += "/bin/";
+ self += exeName;
+ }
+ }
+ if (!SystemTools::FileExists(self)) {
+ failures.push_back(self);
+ std::ostringstream msg;
+ msg << "Can not find the command line program ";
+ if (exeName) {
+ msg << exeName;
+ }
+ msg << "\n";
+ if (argv0) {
+ msg << " argv[0] = \"" << argv0 << "\"\n";
+ }
+ msg << " Attempted paths:\n";
+ for (std::string const& ff : failures) {
+ msg << " \"" << ff << "\"\n";
+ }
+ errorMsg = msg.str();
+ return false;
+ }
+ pathOut = self;
+ return true;
+}
+
+std::string SystemTools::CollapseFullPath(const std::string& in_relative)
+{
+ return SystemTools::CollapseFullPath(in_relative, nullptr);
+}
+
+#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP
+void SystemTools::AddTranslationPath(const std::string& a,
+ const std::string& b)
+{
+ std::string path_a = a;
+ std::string path_b = b;
+ SystemTools::ConvertToUnixSlashes(path_a);
+ SystemTools::ConvertToUnixSlashes(path_b);
+ // First check this is a directory path, since we don't want the table to
+ // grow too fat
+ if (SystemTools::FileIsDirectory(path_a)) {
+ // Make sure the path is a full path and does not contain no '..'
+ // Ken--the following code is incorrect. .. can be in a valid path
+ // for example /home/martink/MyHubba...Hubba/Src
+ if (SystemTools::FileIsFullPath(path_b) &&
+ path_b.find("..") == std::string::npos) {
+ // Before inserting make sure path ends with '/'
+ if (!path_a.empty() && path_a.back() != '/') {
+ path_a += '/';
+ }
+ if (!path_b.empty() && path_b.back() != '/') {
+ path_b += '/';
+ }
+ if (!(path_a == path_b)) {
+ SystemTools::Statics->TranslationMap.insert(
+ SystemToolsStatic::StringMap::value_type(std::move(path_a),
+ std::move(path_b)));
+ }
+ }
+ }
+}
+
+void SystemTools::AddKeepPath(const std::string& dir)
+{
+ std::string cdir;
+ Realpath(SystemTools::CollapseFullPath(dir), cdir);
+ SystemTools::AddTranslationPath(cdir, dir);
+}
+
+void SystemTools::CheckTranslationPath(std::string& path)
+{
+ // Do not translate paths that are too short to have meaningful
+ // translations.
+ if (path.size() < 2) {
+ return;
+ }
+
+ // Always add a trailing slash before translation. It does not
+ // matter if this adds an extra slash, but we do not want to
+ // translate part of a directory (like the foo part of foo-dir).
+ path += '/';
+
+ // In case a file was specified we still have to go through this:
+ // Now convert any path found in the table back to the one desired:
+ for (auto const& pair : SystemTools::Statics->TranslationMap) {
+ // We need to check of the path is a substring of the other path
+ if (path.find(pair.first) == 0) {
+ path = path.replace(0, pair.first.size(), pair.second);
+ }
+ }
+
+ // Remove the trailing slash we added before.
+ path.pop_back();
+}
+#endif
+
+static void SystemToolsAppendComponents(
+ std::vector<std::string>& out_components,
+ std::vector<std::string>::iterator first,
+ std::vector<std::string>::iterator last)
+{
+ static const std::string up = "..";
+ static const std::string cur = ".";
+ for (std::vector<std::string>::const_iterator i = first; i != last; ++i) {
+ if (*i == up) {
+ // Remove the previous component if possible. Ignore ../ components
+ // that try to go above the root. Keep ../ components if they are
+ // at the beginning of a relative path (base path is relative).
+ if (out_components.size() > 1 && out_components.back() != up) {
+ out_components.resize(out_components.size() - 1);
+ } else if (!out_components.empty() && out_components[0].empty()) {
+ out_components.emplace_back(std::move(*i));
+ }
+ } else if (!i->empty() && *i != cur) {
+ out_components.emplace_back(std::move(*i));
+ }
+ }
+}
+
+std::string SystemTools::CollapseFullPath(const std::string& in_path,
+ const char* in_base)
+{
+ // Use the current working directory as a base path.
+ char buf[2048];
+ const char* res_in_base = in_base;
+ if (!res_in_base) {
+ if (const char* cwd = Getcwd(buf, 2048)) {
+ res_in_base = cwd;
+ } else {
+ res_in_base = "";
+ }
+ }
+
+ return SystemTools::CollapseFullPath(in_path, std::string(res_in_base));
+}
+
+std::string SystemTools::CollapseFullPath(const std::string& in_path,
+ const std::string& in_base)
+{
+ // Collect the output path components.
+ std::vector<std::string> out_components;
+
+ // Split the input path components.
+ std::vector<std::string> path_components;
+ SystemTools::SplitPath(in_path, path_components);
+ out_components.reserve(path_components.size());
+
+ // If the input path is relative, start with a base path.
+ if (path_components[0].empty()) {
+ std::vector<std::string> base_components;
+ // Use the given base path.
+ SystemTools::SplitPath(in_base, base_components);
+
+ // Append base path components to the output path.
+ out_components.push_back(base_components[0]);
+ SystemToolsAppendComponents(out_components, base_components.begin() + 1,
+ base_components.end());
+ }
+
+ // Append input path components to the output path.
+ SystemToolsAppendComponents(out_components, path_components.begin(),
+ path_components.end());
+
+ // Transform the path back to a string.
+ std::string newPath = SystemTools::JoinPath(out_components);
+
+#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP
+ // Update the translation table with this potentially new path. I am not
+ // sure why this line is here, it seems really questionable, but yet I
+ // would put good money that if I remove it something will break, basically
+ // from what I can see it created a mapping from the collapsed path, to be
+ // replaced by the input path, which almost completely does the opposite of
+ // this function, the only thing preventing this from happening a lot is
+ // that if the in_path has a .. in it, then it is not added to the
+ // translation table. So for most calls this either does nothing due to the
+ // .. or it adds a translation between identical paths as nothing was
+ // collapsed, so I am going to try to comment it out, and see what hits the
+ // fan, hopefully quickly.
+ // Commented out line below:
+ // SystemTools::AddTranslationPath(newPath, in_path);
+
+ SystemTools::CheckTranslationPath(newPath);
+#endif
+#ifdef _WIN32
+ newPath = SystemTools::Statics->GetActualCaseForPathCached(newPath);
+ SystemTools::ConvertToUnixSlashes(newPath);
+#endif
+ // Return the reconstructed path.
+ return newPath;
+}
+
+// compute the relative path from here to there
+std::string SystemTools::RelativePath(const std::string& local,
+ const std::string& remote)
+{
+ if (!SystemTools::FileIsFullPath(local)) {
+ return "";
+ }
+ if (!SystemTools::FileIsFullPath(remote)) {
+ return "";
+ }
+
+ std::string l = SystemTools::CollapseFullPath(local);
+ std::string r = SystemTools::CollapseFullPath(remote);
+
+ // split up both paths into arrays of strings using / as a separator
+ std::vector<std::string> localSplit = SystemTools::SplitString(l, '/', true);
+ std::vector<std::string> remoteSplit =
+ SystemTools::SplitString(r, '/', true);
+ std::vector<std::string>
+ commonPath; // store shared parts of path in this array
+ std::vector<std::string> finalPath; // store the final relative path here
+ // count up how many matching directory names there are from the start
+ unsigned int sameCount = 0;
+ while (((sameCount <= (localSplit.size() - 1)) &&
+ (sameCount <= (remoteSplit.size() - 1))) &&
+// for Windows and Apple do a case insensitive string compare
+#if defined(_WIN32) || defined(__APPLE__)
+ SystemTools::Strucmp(localSplit[sameCount].c_str(),
+ remoteSplit[sameCount].c_str()) == 0
+#else
+ localSplit[sameCount] == remoteSplit[sameCount]
+#endif
+ ) {
+ // put the common parts of the path into the commonPath array
+ commonPath.push_back(localSplit[sameCount]);
+ // erase the common parts of the path from the original path arrays
+ localSplit[sameCount] = "";
+ remoteSplit[sameCount] = "";
+ sameCount++;
+ }
+
+ // If there is nothing in common at all then just return the full
+ // path. This is the case only on windows when the paths have
+ // different drive letters. On unix two full paths always at least
+ // have the root "/" in common so we will return a relative path
+ // that passes through the root directory.
+ if (sameCount == 0) {
+ return remote;
+ }
+
+ // for each entry that is not common in the local path
+ // add a ../ to the finalpath array, this gets us out of the local
+ // path into the remote dir
+ for (std::string const& lp : localSplit) {
+ if (!lp.empty()) {
+ finalPath.emplace_back("../");
+ }
+ }
+ // for each entry that is not common in the remote path add it
+ // to the final path.
+ for (std::string const& rp : remoteSplit) {
+ if (!rp.empty()) {
+ finalPath.push_back(rp);
+ }
+ }
+ std::string relativePath; // result string
+ // now turn the array of directories into a unix path by puttint /
+ // between each entry that does not already have one
+ for (std::string const& fp : finalPath) {
+ if (!relativePath.empty() && relativePath.back() != '/') {
+ relativePath += '/';
+ }
+ relativePath += fp;
+ }
+ return relativePath;
+}
+
+std::string SystemTools::GetActualCaseForPath(const std::string& p)
+{
+#ifdef _WIN32
+ return SystemToolsStatic::GetCasePathName(p);
+#else
+ return p;
+#endif
+}
+
+const char* SystemTools::SplitPathRootComponent(const std::string& p,
+ std::string* root)
+{
+ // Identify the root component.
+ const char* c = p.c_str();
+ if ((c[0] == '/' && c[1] == '/') || (c[0] == '\\' && c[1] == '\\')) {
+ // Network path.
+ if (root) {
+ *root = "//";
+ }
+ c += 2;
+ } else if (c[0] == '/' || c[0] == '\\') {
+ // Unix path (or Windows path w/out drive letter).
+ if (root) {
+ *root = "/";
+ }
+ c += 1;
+ } else if (c[0] && c[1] == ':' && (c[2] == '/' || c[2] == '\\')) {
+ // Windows path.
+ if (root) {
+ (*root) = "_:/";
+ (*root)[0] = c[0];
+ }
+ c += 3;
+ } else if (c[0] && c[1] == ':') {
+ // Path relative to a windows drive working directory.
+ if (root) {
+ (*root) = "_:";
+ (*root)[0] = c[0];
+ }
+ c += 2;
+ } else if (c[0] == '~') {
+ // Home directory. The returned root should always have a
+ // trailing slash so that appending components as
+ // c[0]c[1]/c[2]/... works. The remaining path returned should
+ // skip the first slash if it exists:
+ //
+ // "~" : root = "~/" , return ""
+ // "~/ : root = "~/" , return ""
+ // "~/x : root = "~/" , return "x"
+ // "~u" : root = "~u/", return ""
+ // "~u/" : root = "~u/", return ""
+ // "~u/x" : root = "~u/", return "x"
+ size_t n = 1;
+ while (c[n] && c[n] != '/') {
+ ++n;
+ }
+ if (root) {
+ root->assign(c, n);
+ *root += '/';
+ }
+ if (c[n] == '/') {
+ ++n;
+ }
+ c += n;
+ } else {
+ // Relative path.
+ if (root) {
+ *root = "";
+ }
+ }
+
+ // Return the remaining path.
+ return c;
+}
+
+void SystemTools::SplitPath(const std::string& p,
+ std::vector<std::string>& components,
+ bool expand_home_dir)
+{
+ const char* c;
+ components.clear();
+
+ // Identify the root component.
+ {
+ std::string root;
+ c = SystemTools::SplitPathRootComponent(p, &root);
+
+ // Expand home directory references if requested.
+ if (expand_home_dir && !root.empty() && root[0] == '~') {
+ std::string homedir;
+ root = root.substr(0, root.size() - 1);
+ if (root.size() == 1) {
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ if (!SystemTools::GetEnv("USERPROFILE", homedir))
+#endif
+ SystemTools::GetEnv("HOME", homedir);
+ }
+#ifdef HAVE_GETPWNAM
+ else if (passwd* pw = getpwnam(root.c_str() + 1)) {
+ if (pw->pw_dir) {
+ homedir = pw->pw_dir;
+ }
+ }
+#endif
+ if (!homedir.empty() &&
+ (homedir.back() == '/' || homedir.back() == '\\')) {
+ homedir.resize(homedir.size() - 1);
+ }
+ SystemTools::SplitPath(homedir, components);
+ } else {
+ components.push_back(root);
+ }
+ }
+
+ // Parse the remaining components.
+ const char* first = c;
+ const char* last = first;
+ for (; *last; ++last) {
+ if (*last == '/' || *last == '\\') {
+ // End of a component. Save it.
+ components.push_back(std::string(first, last));
+ first = last + 1;
+ }
+ }
+
+ // Save the last component unless there were no components.
+ if (last != c) {
+ components.push_back(std::string(first, last));
+ }
+}
+
+std::string SystemTools::JoinPath(const std::vector<std::string>& components)
+{
+ return SystemTools::JoinPath(components.begin(), components.end());
+}
+
+std::string SystemTools::JoinPath(
+ std::vector<std::string>::const_iterator first,
+ std::vector<std::string>::const_iterator last)
+{
+ // Construct result in a single string.
+ std::string result;
+ size_t len = 0;
+ for (std::vector<std::string>::const_iterator i = first; i != last; ++i) {
+ len += 1 + i->size();
+ }
+ result.reserve(len);
+
+ // The first two components do not add a slash.
+ if (first != last) {
+ result.append(*first++);
+ }
+ if (first != last) {
+ result.append(*first++);
+ }
+
+ // All remaining components are always separated with a slash.
+ while (first != last) {
+ result.push_back('/');
+ result.append((*first++));
+ }
+
+ // Return the concatenated result.
+ return result;
+}
+
+bool SystemTools::ComparePath(const std::string& c1, const std::string& c2)
+{
+#if defined(_WIN32) || defined(__APPLE__)
+# ifdef _MSC_VER
+ return _stricmp(c1.c_str(), c2.c_str()) == 0;
+# elif defined(__APPLE__) || defined(__GNUC__)
+ return strcasecmp(c1.c_str(), c2.c_str()) == 0;
+# else
+ return SystemTools::Strucmp(c1.c_str(), c2.c_str()) == 0;
+# endif
+#else
+ return c1 == c2;
+#endif
+}
+
+bool SystemTools::Split(const std::string& str,
+ std::vector<std::string>& lines, char separator)
+{
+ std::string data(str);
+ std::string::size_type lpos = 0;
+ while (lpos < data.length()) {
+ std::string::size_type rpos = data.find_first_of(separator, lpos);
+ if (rpos == std::string::npos) {
+ // String ends at end of string without a separator.
+ lines.push_back(data.substr(lpos));
+ return false;
+ } else {
+ // String ends in a separator, remove the character.
+ lines.push_back(data.substr(lpos, rpos - lpos));
+ }
+ lpos = rpos + 1;
+ }
+ return true;
+}
+
+bool SystemTools::Split(const std::string& str,
+ std::vector<std::string>& lines)
+{
+ std::string data(str);
+ std::string::size_type lpos = 0;
+ while (lpos < data.length()) {
+ std::string::size_type rpos = data.find_first_of('\n', lpos);
+ if (rpos == std::string::npos) {
+ // Line ends at end of string without a newline.
+ lines.push_back(data.substr(lpos));
+ return false;
+ }
+ if ((rpos > lpos) && (data[rpos - 1] == '\r')) {
+ // Line ends in a "\r\n" pair, remove both characters.
+ lines.push_back(data.substr(lpos, (rpos - 1) - lpos));
+ } else {
+ // Line ends in a "\n", remove the character.
+ lines.push_back(data.substr(lpos, rpos - lpos));
+ }
+ lpos = rpos + 1;
+ }
+ return true;
+}
+
+/**
+ * Return path of a full filename (no trailing slashes).
+ * Warning: returned path is converted to Unix slashes format.
+ */
+std::string SystemTools::GetFilenamePath(const std::string& filename)
+{
+ std::string fn = filename;
+ SystemTools::ConvertToUnixSlashes(fn);
+
+ std::string::size_type slash_pos = fn.rfind("/");
+ if (slash_pos != std::string::npos) {
+ std::string ret = fn.substr(0, slash_pos);
+ if (ret.size() == 2 && ret[1] == ':') {
+ return ret + '/';
+ }
+ if (ret.empty()) {
+ return "/";
+ }
+ return ret;
+ } else {
+ return "";
+ }
+}
+
+/**
+ * Return file name of a full filename (i.e. file name without path).
+ */
+std::string SystemTools::GetFilenameName(const std::string& filename)
+{
+#if defined(_WIN32) || defined(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES)
+ const char* separators = "/\\";
+#else
+ char separators = '/';
+#endif
+ std::string::size_type slash_pos = filename.find_last_of(separators);
+ if (slash_pos != std::string::npos) {
+ return filename.substr(slash_pos + 1);
+ } else {
+ return filename;
+ }
+}
+
+/**
+ * Return file extension of a full filename (dot included).
+ * Warning: this is the longest extension (for example: .tar.gz)
+ */
+std::string SystemTools::GetFilenameExtension(const std::string& filename)
+{
+ std::string name = SystemTools::GetFilenameName(filename);
+ std::string::size_type dot_pos = name.find('.');
+ if (dot_pos != std::string::npos) {
+ return name.substr(dot_pos);
+ } else {
+ return "";
+ }
+}
+
+/**
+ * Return file extension of a full filename (dot included).
+ * Warning: this is the shortest extension (for example: .gz of .tar.gz)
+ */
+std::string SystemTools::GetFilenameLastExtension(const std::string& filename)
+{
+ std::string name = SystemTools::GetFilenameName(filename);
+ std::string::size_type dot_pos = name.rfind('.');
+ if (dot_pos != std::string::npos) {
+ return name.substr(dot_pos);
+ } else {
+ return "";
+ }
+}
+
+/**
+ * Return file name without extension of a full filename (i.e. without path).
+ * Warning: it considers the longest extension (for example: .tar.gz)
+ */
+std::string SystemTools::GetFilenameWithoutExtension(
+ const std::string& filename)
+{
+ std::string name = SystemTools::GetFilenameName(filename);
+ std::string::size_type dot_pos = name.find('.');
+ if (dot_pos != std::string::npos) {
+ return name.substr(0, dot_pos);
+ } else {
+ return name;
+ }
+}
+
+/**
+ * Return file name without extension of a full filename (i.e. without path).
+ * Warning: it considers the last extension (for example: removes .gz
+ * from .tar.gz)
+ */
+std::string SystemTools::GetFilenameWithoutLastExtension(
+ const std::string& filename)
+{
+ std::string name = SystemTools::GetFilenameName(filename);
+ std::string::size_type dot_pos = name.rfind('.');
+ if (dot_pos != std::string::npos) {
+ return name.substr(0, dot_pos);
+ } else {
+ return name;
+ }
+}
+
+bool SystemTools::FileHasSignature(const char* filename, const char* signature,
+ long offset)
+{
+ if (!filename || !signature) {
+ return false;
+ }
+
+ FILE* fp = Fopen(filename, "rb");
+ if (!fp) {
+ return false;
+ }
+
+ fseek(fp, offset, SEEK_SET);
+
+ bool res = false;
+ size_t signature_len = strlen(signature);
+ char* buffer = new char[signature_len];
+
+ if (fread(buffer, 1, signature_len, fp) == signature_len) {
+ res = (!strncmp(buffer, signature, signature_len) ? true : false);
+ }
+
+ delete[] buffer;
+
+ fclose(fp);
+ return res;
+}
+
+SystemTools::FileTypeEnum SystemTools::DetectFileType(const char* filename,
+ unsigned long length,
+ double percent_bin)
+{
+ if (!filename || percent_bin < 0) {
+ return SystemTools::FileTypeUnknown;
+ }
+
+ if (SystemTools::FileIsDirectory(filename)) {
+ return SystemTools::FileTypeUnknown;
+ }
+
+ FILE* fp = Fopen(filename, "rb");
+ if (!fp) {
+ return SystemTools::FileTypeUnknown;
+ }
+
+ // Allocate buffer and read bytes
+
+ unsigned char* buffer = new unsigned char[length];
+ size_t read_length = fread(buffer, 1, length, fp);
+ fclose(fp);
+ if (read_length == 0) {
+ delete[] buffer;
+ return SystemTools::FileTypeUnknown;
+ }
+
+ // Loop over contents and count
+
+ size_t text_count = 0;
+
+ const unsigned char* ptr = buffer;
+ const unsigned char* buffer_end = buffer + read_length;
+
+ while (ptr != buffer_end) {
+ if ((*ptr >= 0x20 && *ptr <= 0x7F) || *ptr == '\n' || *ptr == '\r' ||
+ *ptr == '\t') {
+ text_count++;
+ }
+ ptr++;
+ }
+
+ delete[] buffer;
+
+ double current_percent_bin = (static_cast<double>(read_length - text_count) /
+ static_cast<double>(read_length));
+
+ if (current_percent_bin >= percent_bin) {
+ return SystemTools::FileTypeBinary;
+ }
+
+ return SystemTools::FileTypeText;
+}
+
+bool SystemTools::LocateFileInDir(const char* filename, const char* dir,
+ std::string& filename_found,
+ int try_filename_dirs)
+{
+ if (!filename || !dir) {
+ return false;
+ }
+
+ // Get the basename of 'filename'
+
+ std::string filename_base = SystemTools::GetFilenameName(filename);
+
+ // Check if 'dir' is really a directory
+ // If win32 and matches something like C:, accept it as a dir
+
+ std::string real_dir;
+ if (!SystemTools::FileIsDirectory(dir)) {
+#if defined(_WIN32)
+ size_t dir_len = strlen(dir);
+ if (dir_len < 2 || dir[dir_len - 1] != ':') {
+#endif
+ real_dir = SystemTools::GetFilenamePath(dir);
+ dir = real_dir.c_str();
+#if defined(_WIN32)
+ }
+#endif
+ }
+
+ // Try to find the file in 'dir'
+
+ bool res = false;
+ if (!filename_base.empty() && dir) {
+ size_t dir_len = strlen(dir);
+ int need_slash =
+ (dir_len && dir[dir_len - 1] != '/' && dir[dir_len - 1] != '\\');
+
+ std::string temp = dir;
+ if (need_slash) {
+ temp += "/";
+ }
+ temp += filename_base;
+
+ if (SystemTools::FileExists(temp)) {
+ res = true;
+ filename_found = temp;
+ }
+
+ // If not found, we can try harder by appending part of the file to
+ // to the directory to look inside.
+ // Example: if we were looking for /foo/bar/yo.txt in /d1/d2, then
+ // try to find yo.txt in /d1/d2/bar, then /d1/d2/foo/bar, etc.
+
+ else if (try_filename_dirs) {
+ std::string filename_dir(filename);
+ std::string filename_dir_base;
+ std::string filename_dir_bases;
+ do {
+ filename_dir = SystemTools::GetFilenamePath(filename_dir);
+ filename_dir_base = SystemTools::GetFilenameName(filename_dir);
+#if defined(_WIN32)
+ if (filename_dir_base.empty() || filename_dir_base.back() == ':')
+#else
+ if (filename_dir_base.empty())
+#endif
+ {
+ break;
+ }
+
+ filename_dir_bases = filename_dir_base + "/" + filename_dir_bases;
+
+ temp = dir;
+ if (need_slash) {
+ temp += "/";
+ }
+ temp += filename_dir_bases;
+
+ res = SystemTools::LocateFileInDir(filename_base.c_str(), temp.c_str(),
+ filename_found, 0);
+
+ } while (!res && !filename_dir_base.empty());
+ }
+ }
+
+ return res;
+}
+
+bool SystemTools::FileIsFullPath(const std::string& in_name)
+{
+ return SystemToolsStatic::FileIsFullPath(in_name.c_str(), in_name.size());
+}
+
+bool SystemTools::FileIsFullPath(const char* in_name)
+{
+ return SystemToolsStatic::FileIsFullPath(
+ in_name, in_name[0] ? (in_name[1] ? 2 : 1) : 0);
+}
+
+bool SystemToolsStatic::FileIsFullPath(const char* in_name, size_t len)
+{
+#if defined(_WIN32) || defined(__CYGWIN__)
+ // On Windows, the name must be at least two characters long.
+ if (len < 2) {
+ return false;
+ }
+ if (in_name[1] == ':') {
+ return true;
+ }
+ if (in_name[0] == '\\') {
+ return true;
+ }
+#else
+ // On UNIX, the name must be at least one character long.
+ if (len < 1) {
+ return false;
+ }
+#endif
+#if !defined(_WIN32)
+ if (in_name[0] == '~') {
+ return true;
+ }
+#endif
+ // On UNIX, the name must begin in a '/'.
+ // On Windows, if the name begins in a '/', then it is a full
+ // network path.
+ if (in_name[0] == '/') {
+ return true;
+ }
+ return false;
+}
+
+bool SystemTools::GetShortPath(const std::string& path, std::string& shortPath)
+{
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ std::string tempPath = path; // create a buffer
+
+ // if the path passed in has quotes around it, first remove the quotes
+ if (!path.empty() && path[0] == '"' && path.back() == '"') {
+ tempPath = path.substr(1, path.length() - 2);
+ }
+
+ std::wstring wtempPath = Encoding::ToWide(tempPath);
+ DWORD ret = GetShortPathNameW(wtempPath.c_str(), nullptr, 0);
+ std::vector<wchar_t> buffer(ret);
+ if (ret != 0) {
+ ret = GetShortPathNameW(wtempPath.c_str(), &buffer[0],
+ static_cast<DWORD>(buffer.size()));
+ }
+
+ if (ret == 0) {
+ return false;
+ } else {
+ shortPath = Encoding::ToNarrow(&buffer[0]);
+ return true;
+ }
+#else
+ shortPath = path;
+ return true;
+#endif
+}
+
+std::string SystemTools::GetCurrentDateTime(const char* format)
+{
+ char buf[1024];
+ time_t t;
+ time(&t);
+ strftime(buf, sizeof(buf), format, localtime(&t));
+ return std::string(buf);
+}
+
+std::string SystemTools::MakeCidentifier(const std::string& s)
+{
+ std::string str(s);
+ if (str.find_first_of("0123456789") == 0) {
+ str = "_" + str;
+ }
+
+ std::string permited_chars("_"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789");
+ std::string::size_type pos = 0;
+ while ((pos = str.find_first_not_of(permited_chars, pos)) !=
+ std::string::npos) {
+ str[pos] = '_';
+ }
+ return str;
+}
+
+// Convenience function around std::getline which removes a trailing carriage
+// return and can truncate the buffer as needed. Returns true
+// if any data were read before the end-of-file was reached.
+bool SystemTools::GetLineFromStream(std::istream& is, std::string& line,
+ bool* has_newline /* = 0 */,
+ long sizeLimit /* = -1 */)
+{
+ // Start with an empty line.
+ line = "";
+
+ // Early short circuit return if stream is no good. Just return
+ // false and the empty line. (Probably means caller tried to
+ // create a file stream with a non-existent file name...)
+ //
+ if (!is) {
+ if (has_newline) {
+ *has_newline = false;
+ }
+ return false;
+ }
+
+ std::getline(is, line);
+ bool haveData = !line.empty() || !is.eof();
+ if (!line.empty()) {
+ // Avoid storing a carriage return character.
+ if (line.back() == '\r') {
+ line.resize(line.size() - 1);
+ }
+
+ // if we read too much then truncate the buffer
+ if (sizeLimit >= 0 && line.size() >= static_cast<size_t>(sizeLimit)) {
+ line.resize(sizeLimit);
+ }
+ }
+
+ // Return the results.
+ if (has_newline) {
+ *has_newline = !is.eof();
+ }
+ return haveData;
+}
+
+int SystemTools::GetTerminalWidth()
+{
+ int width = -1;
+#ifdef HAVE_TTY_INFO
+ struct winsize ws;
+ std::string columns; /* Unix98 environment variable */
+ if (ioctl(1, TIOCGWINSZ, &ws) != -1 && ws.ws_col > 0 && ws.ws_row > 0) {
+ width = ws.ws_col;
+ }
+ if (!isatty(STDOUT_FILENO)) {
+ width = -1;
+ }
+ if (SystemTools::GetEnv("COLUMNS", columns) && !columns.empty()) {
+ long t;
+ char* endptr;
+ t = strtol(columns.c_str(), &endptr, 0);
+ if (endptr && !*endptr && (t > 0) && (t < 1000)) {
+ width = static_cast<int>(t);
+ }
+ }
+ if (width < 9) {
+ width = -1;
+ }
+#endif
+ return width;
+}
+
+bool SystemTools::GetPermissions(const char* file, mode_t& mode)
+{
+ if (!file) {
+ return false;
+ }
+ return SystemTools::GetPermissions(std::string(file), mode);
+}
+
+bool SystemTools::GetPermissions(const std::string& file, mode_t& mode)
+{
+#if defined(_WIN32)
+ DWORD attr =
+ GetFileAttributesW(Encoding::ToWindowsExtendedPath(file).c_str());
+ if (attr == INVALID_FILE_ATTRIBUTES) {
+ return false;
+ }
+ if ((attr & FILE_ATTRIBUTE_READONLY) != 0) {
+ mode = (_S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6));
+ } else {
+ mode = (_S_IWRITE | (_S_IWRITE >> 3) | (_S_IWRITE >> 6)) |
+ (_S_IREAD | (_S_IREAD >> 3) | (_S_IREAD >> 6));
+ }
+ if ((attr & FILE_ATTRIBUTE_DIRECTORY) != 0) {
+ mode |= S_IFDIR | (_S_IEXEC | (_S_IEXEC >> 3) | (_S_IEXEC >> 6));
+ } else {
+ mode |= S_IFREG;
+ }
+ size_t dotPos = file.rfind('.');
+ const char* ext = dotPos == std::string::npos ? 0 : (file.c_str() + dotPos);
+ if (ext &&
+ (Strucmp(ext, ".exe") == 0 || Strucmp(ext, ".com") == 0 ||
+ Strucmp(ext, ".cmd") == 0 || Strucmp(ext, ".bat") == 0)) {
+ mode |= (_S_IEXEC | (_S_IEXEC >> 3) | (_S_IEXEC >> 6));
+ }
+#else
+ struct stat st;
+ if (stat(file.c_str(), &st) < 0) {
+ return false;
+ }
+ mode = st.st_mode;
+#endif
+ return true;
+}
+
+bool SystemTools::SetPermissions(const char* file, mode_t mode,
+ bool honor_umask)
+{
+ if (!file) {
+ return false;
+ }
+ return SystemTools::SetPermissions(std::string(file), mode, honor_umask);
+}
+
+bool SystemTools::SetPermissions(const std::string& file, mode_t mode,
+ bool honor_umask)
+{
+ if (!SystemTools::PathExists(file)) {
+ return false;
+ }
+ if (honor_umask) {
+ mode_t currentMask = umask(0);
+ umask(currentMask);
+ mode &= ~currentMask;
+ }
+#ifdef _WIN32
+ if (_wchmod(Encoding::ToWindowsExtendedPath(file).c_str(), mode) < 0)
+#else
+ if (chmod(file.c_str(), mode) < 0)
+#endif
+ {
+ return false;
+ }
+
+ return true;
+}
+
+std::string SystemTools::GetParentDirectory(const std::string& fileOrDir)
+{
+ return SystemTools::GetFilenamePath(fileOrDir);
+}
+
+bool SystemTools::IsSubDirectory(const std::string& cSubdir,
+ const std::string& cDir)
+{
+ if (cDir.empty()) {
+ return false;
+ }
+ std::string subdir = cSubdir;
+ std::string dir = cDir;
+ SystemTools::ConvertToUnixSlashes(subdir);
+ SystemTools::ConvertToUnixSlashes(dir);
+ if (subdir.size() <= dir.size() || dir.empty()) {
+ return false;
+ }
+ bool isRootPath = dir.back() == '/'; // like "/" or "C:/"
+ size_t expectedSlashPosition = isRootPath ? dir.size() - 1u : dir.size();
+ if (subdir[expectedSlashPosition] != '/') {
+ return false;
+ }
+ std::string s = subdir.substr(0, dir.size());
+ return SystemTools::ComparePath(s, dir);
+}
+
+void SystemTools::Delay(unsigned int msec)
+{
+#ifdef _WIN32
+ Sleep(msec);
+#else
+ // The sleep function gives 1 second resolution and the usleep
+ // function gives 1e-6 second resolution but on some platforms has a
+ // maximum sleep time of 1 second. This could be re-implemented to
+ // use select with masked signals or pselect to mask signals
+ // atomically. If select is given empty sets and zero as the max
+ // file descriptor but a non-zero timeout it can be used to block
+ // for a precise amount of time.
+ if (msec >= 1000) {
+ sleep(msec / 1000);
+ usleep((msec % 1000) * 1000);
+ } else {
+ usleep(msec * 1000);
+ }
+#endif
+}
+
+std::string SystemTools::GetOperatingSystemNameAndVersion()
+{
+ std::string res;
+
+#ifdef _WIN32
+ char buffer[256];
+
+ OSVERSIONINFOEXA osvi;
+ BOOL bOsVersionInfoEx;
+
+ ZeroMemory(&osvi, sizeof(osvi));
+ osvi.dwOSVersionInfoSize = sizeof(osvi);
+
+# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# pragma warning(push)
+# ifdef __INTEL_COMPILER
+# pragma warning(disable : 1478)
+# elif defined __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
+# else
+# pragma warning(disable : 4996)
+# endif
+# endif
+ bOsVersionInfoEx = GetVersionExA((OSVERSIONINFOA*)&osvi);
+ if (!bOsVersionInfoEx) {
+ return 0;
+ }
+# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersionEx
+# ifdef __clang__
+# pragma clang diagnostic pop
+# else
+# pragma warning(pop)
+# endif
+# endif
+
+ switch (osvi.dwPlatformId) {
+ // Test for the Windows NT product family.
+
+ case VER_PLATFORM_WIN32_NT:
+
+ // Test for the specific product family.
+ if (osvi.dwMajorVersion == 10 && osvi.dwMinorVersion == 0) {
+ if (osvi.wProductType == VER_NT_WORKSTATION) {
+ res += "Microsoft Windows 10";
+ } else {
+ res += "Microsoft Windows Server 2016 family";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 3) {
+ if (osvi.wProductType == VER_NT_WORKSTATION) {
+ res += "Microsoft Windows 8.1";
+ } else {
+ res += "Microsoft Windows Server 2012 R2 family";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 2) {
+ if (osvi.wProductType == VER_NT_WORKSTATION) {
+ res += "Microsoft Windows 8";
+ } else {
+ res += "Microsoft Windows Server 2012 family";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 1) {
+ if (osvi.wProductType == VER_NT_WORKSTATION) {
+ res += "Microsoft Windows 7";
+ } else {
+ res += "Microsoft Windows Server 2008 R2 family";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 6 && osvi.dwMinorVersion == 0) {
+ if (osvi.wProductType == VER_NT_WORKSTATION) {
+ res += "Microsoft Windows Vista";
+ } else {
+ res += "Microsoft Windows Server 2008 family";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) {
+ res += "Microsoft Windows Server 2003 family";
+ }
+
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 1) {
+ res += "Microsoft Windows XP";
+ }
+
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) {
+ res += "Microsoft Windows 2000";
+ }
+
+ if (osvi.dwMajorVersion <= 4) {
+ res += "Microsoft Windows NT";
+ }
+
+ // Test for specific product on Windows NT 4.0 SP6 and later.
+
+ if (bOsVersionInfoEx) {
+ // Test for the workstation type.
+
+ if (osvi.wProductType == VER_NT_WORKSTATION) {
+ if (osvi.dwMajorVersion == 4) {
+ res += " Workstation 4.0";
+ } else if (osvi.dwMajorVersion == 5) {
+ if (osvi.wSuiteMask & VER_SUITE_PERSONAL) {
+ res += " Home Edition";
+ } else {
+ res += " Professional";
+ }
+ }
+ }
+
+ // Test for the server type.
+
+ else if (osvi.wProductType == VER_NT_SERVER) {
+ if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 2) {
+ if (osvi.wSuiteMask & VER_SUITE_DATACENTER) {
+ res += " Datacenter Edition";
+ } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) {
+ res += " Enterprise Edition";
+ } else if (osvi.wSuiteMask == VER_SUITE_BLADE) {
+ res += " Web Edition";
+ } else {
+ res += " Standard Edition";
+ }
+ }
+
+ else if (osvi.dwMajorVersion == 5 && osvi.dwMinorVersion == 0) {
+ if (osvi.wSuiteMask & VER_SUITE_DATACENTER) {
+ res += " Datacenter Server";
+ } else if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) {
+ res += " Advanced Server";
+ } else {
+ res += " Server";
+ }
+ }
+
+ else if (osvi.dwMajorVersion <= 4) // Windows NT 4.0
+ {
+ if (osvi.wSuiteMask & VER_SUITE_ENTERPRISE) {
+ res += " Server 4.0, Enterprise Edition";
+ } else {
+ res += " Server 4.0";
+ }
+ }
+ }
+ }
+
+ // Test for specific product on Windows NT 4.0 SP5 and earlier
+
+ else {
+ HKEY hKey;
+# define BUFSIZE 80
+ wchar_t szProductType[BUFSIZE];
+ DWORD dwBufLen = BUFSIZE;
+ LONG lRet;
+
+ lRet =
+ RegOpenKeyExW(HKEY_LOCAL_MACHINE,
+ L"SYSTEM\\CurrentControlSet\\Control\\ProductOptions",
+ 0, KEY_QUERY_VALUE, &hKey);
+ if (lRet != ERROR_SUCCESS) {
+ return 0;
+ }
+
+ lRet = RegQueryValueExW(hKey, L"ProductType", nullptr, nullptr,
+ (LPBYTE)szProductType, &dwBufLen);
+
+ if ((lRet != ERROR_SUCCESS) || (dwBufLen > BUFSIZE)) {
+ return 0;
+ }
+
+ RegCloseKey(hKey);
+
+ if (lstrcmpiW(L"WINNT", szProductType) == 0) {
+ res += " Workstation";
+ }
+ if (lstrcmpiW(L"LANMANNT", szProductType) == 0) {
+ res += " Server";
+ }
+ if (lstrcmpiW(L"SERVERNT", szProductType) == 0) {
+ res += " Advanced Server";
+ }
+
+ res += " ";
+ sprintf(buffer, "%ld", osvi.dwMajorVersion);
+ res += buffer;
+ res += ".";
+ sprintf(buffer, "%ld", osvi.dwMinorVersion);
+ res += buffer;
+ }
+
+ // Display service pack (if any) and build number.
+
+ if (osvi.dwMajorVersion == 4 &&
+ lstrcmpiA(osvi.szCSDVersion, "Service Pack 6") == 0) {
+ HKEY hKey;
+ LONG lRet;
+
+ // Test for SP6 versus SP6a.
+
+ lRet = RegOpenKeyExW(
+ HKEY_LOCAL_MACHINE,
+ L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Hotfix\\Q246009",
+ 0, KEY_QUERY_VALUE, &hKey);
+
+ if (lRet == ERROR_SUCCESS) {
+ res += " Service Pack 6a (Build ";
+ sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF);
+ res += buffer;
+ res += ")";
+ } else // Windows NT 4.0 prior to SP6a
+ {
+ res += " ";
+ res += osvi.szCSDVersion;
+ res += " (Build ";
+ sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF);
+ res += buffer;
+ res += ")";
+ }
+
+ RegCloseKey(hKey);
+ } else // Windows NT 3.51 and earlier or Windows 2000 and later
+ {
+ res += " ";
+ res += osvi.szCSDVersion;
+ res += " (Build ";
+ sprintf(buffer, "%ld", osvi.dwBuildNumber & 0xFFFF);
+ res += buffer;
+ res += ")";
+ }
+
+ break;
+
+ // Test for the Windows 95 product family.
+
+ case VER_PLATFORM_WIN32_WINDOWS:
+
+ if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 0) {
+ res += "Microsoft Windows 95";
+ if (osvi.szCSDVersion[1] == 'C' || osvi.szCSDVersion[1] == 'B') {
+ res += " OSR2";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 10) {
+ res += "Microsoft Windows 98";
+ if (osvi.szCSDVersion[1] == 'A') {
+ res += " SE";
+ }
+ }
+
+ if (osvi.dwMajorVersion == 4 && osvi.dwMinorVersion == 90) {
+ res += "Microsoft Windows Millennium Edition";
+ }
+ break;
+
+ case VER_PLATFORM_WIN32s:
+
+ res += "Microsoft Win32s";
+ break;
+ }
+#endif
+
+ return res;
+}
+
+bool SystemTools::ParseURLProtocol(const std::string& URL,
+ std::string& protocol,
+ std::string& dataglom)
+{
+ // match 0 entire url
+ // match 1 protocol
+ // match 2 dataglom following protocol://
+ kwsys::RegularExpression urlRe(VTK_URL_PROTOCOL_REGEX);
+
+ if (!urlRe.find(URL))
+ return false;
+
+ protocol = urlRe.match(1);
+ dataglom = urlRe.match(2);
+
+ return true;
+}
+
+bool SystemTools::ParseURL(const std::string& URL, std::string& protocol,
+ std::string& username, std::string& password,
+ std::string& hostname, std::string& dataport,
+ std::string& database)
+{
+ kwsys::RegularExpression urlRe(VTK_URL_REGEX);
+ if (!urlRe.find(URL))
+ return false;
+
+ // match 0 URL
+ // match 1 protocol
+ // match 2 mangled user
+ // match 3 username
+ // match 4 mangled password
+ // match 5 password
+ // match 6 hostname
+ // match 7 mangled port
+ // match 8 dataport
+ // match 9 database name
+
+ protocol = urlRe.match(1);
+ username = urlRe.match(3);
+ password = urlRe.match(5);
+ hostname = urlRe.match(6);
+ dataport = urlRe.match(8);
+ database = urlRe.match(9);
+
+ return true;
+}
+
+// These must NOT be initialized. Default initialization to zero is
+// necessary.
+static unsigned int SystemToolsManagerCount;
+SystemToolsStatic* SystemTools::Statics;
+
+// SystemToolsManager manages the SystemTools singleton.
+// SystemToolsManager should be included in any translation unit
+// that will use SystemTools or that implements the singleton
+// pattern. It makes sure that the SystemTools singleton is created
+// before and destroyed after all other singletons in CMake.
+
+SystemToolsManager::SystemToolsManager()
+{
+ if (++SystemToolsManagerCount == 1) {
+ SystemTools::ClassInitialize();
+ }
+}
+
+SystemToolsManager::~SystemToolsManager()
+{
+ if (--SystemToolsManagerCount == 0) {
+ SystemTools::ClassFinalize();
+ }
+}
+
+#if defined(__VMS)
+// On VMS we configure the run time C library to be more UNIX like.
+// http://h71000.www7.hp.com/doc/732final/5763/5763pro_004.html
+extern "C" int decc$feature_get_index(char* name);
+extern "C" int decc$feature_set_value(int index, int mode, int value);
+static int SetVMSFeature(char* name, int value)
+{
+ int i;
+ errno = 0;
+ i = decc$feature_get_index(name);
+ return i >= 0 && (decc$feature_set_value(i, 1, value) >= 0 || errno == 0);
+}
+#endif
+
+void SystemTools::ClassInitialize()
+{
+#ifdef __VMS
+ SetVMSFeature("DECC$FILENAME_UNIX_ONLY", 1);
+#endif
+
+ // Create statics singleton instance
+ SystemTools::Statics = new SystemToolsStatic;
+
+#if KWSYS_SYSTEMTOOLS_USE_TRANSLATION_MAP
+// Add some special translation paths for unix. These are not added
+// for windows because drive letters need to be maintained. Also,
+// there are not sym-links and mount points on windows anyway.
+# if !defined(_WIN32) || defined(__CYGWIN__)
+ // The tmp path is frequently a logical path so always keep it:
+ SystemTools::AddKeepPath("/tmp/");
+
+ // If the current working directory is a logical path then keep the
+ // logical name.
+ std::string pwd_str;
+ if (SystemTools::GetEnv("PWD", pwd_str)) {
+ char buf[2048];
+ if (const char* cwd = Getcwd(buf, 2048)) {
+ // The current working directory may be a logical path. Find
+ // the shortest logical path that still produces the correct
+ // physical path.
+ std::string cwd_changed;
+ std::string pwd_changed;
+
+ // Test progressively shorter logical-to-physical mappings.
+ std::string cwd_str = cwd;
+ std::string pwd_path;
+ Realpath(pwd_str, pwd_path);
+ while (cwd_str == pwd_path && cwd_str != pwd_str) {
+ // The current pair of paths is a working logical mapping.
+ cwd_changed = cwd_str;
+ pwd_changed = pwd_str;
+
+ // Strip off one directory level and see if the logical
+ // mapping still works.
+ pwd_str = SystemTools::GetFilenamePath(pwd_str);
+ cwd_str = SystemTools::GetFilenamePath(cwd_str);
+ Realpath(pwd_str, pwd_path);
+ }
+
+ // Add the translation to keep the logical path name.
+ if (!cwd_changed.empty() && !pwd_changed.empty()) {
+ SystemTools::AddTranslationPath(cwd_changed, pwd_changed);
+ }
+ }
+ }
+# endif
+#endif
+}
+
+void SystemTools::ClassFinalize()
+{
+ delete SystemTools::Statics;
+}
+
+} // namespace KWSYS_NAMESPACE
+
+#if defined(_MSC_VER) && defined(_DEBUG)
+# include <crtdbg.h>
+# include <stdio.h>
+# include <stdlib.h>
+namespace KWSYS_NAMESPACE {
+
+static int SystemToolsDebugReport(int, char* message, int*)
+{
+ fprintf(stderr, "%s", message);
+ fflush(stderr);
+ return 1; // no further reporting required
+}
+
+void SystemTools::EnableMSVCDebugHook()
+{
+ if (SystemTools::HasEnv("DART_TEST_FROM_DART") ||
+ SystemTools::HasEnv("DASHBOARD_TEST_FROM_CTEST")) {
+ _CrtSetReportHook(SystemToolsDebugReport);
+ }
+}
+
+} // namespace KWSYS_NAMESPACE
+#else
+namespace KWSYS_NAMESPACE {
+void SystemTools::EnableMSVCDebugHook()
+{
+}
+} // namespace KWSYS_NAMESPACE
+#endif
diff --git a/test/API/driver/kwsys/SystemTools.hxx.in b/test/API/driver/kwsys/SystemTools.hxx.in
new file mode 100644
index 0000000..c4ab9d4
--- /dev/null
+++ b/test/API/driver/kwsys/SystemTools.hxx.in
@@ -0,0 +1,981 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_SystemTools_hxx
+#define @KWSYS_NAMESPACE@_SystemTools_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <iosfwd>
+#include <map>
+#include <string>
+#include <vector>
+
+#include <sys/types.h>
+// include sys/stat.h after sys/types.h
+#include <sys/stat.h>
+
+#if !defined(_WIN32) || defined(__CYGWIN__)
+# include <unistd.h> // For access permissions for use with access()
+#endif
+
+// Required for va_list
+#include <stdarg.h>
+// Required for FILE*
+#include <stdio.h>
+#if !defined(va_list)
+// Some compilers move va_list into the std namespace and there is no way to
+// tell that this has been done. Playing with things being included before or
+// after stdarg.h does not solve things because we do not have control over
+// what the user does. This hack solves this problem by moving va_list to our
+// own namespace that is local for kwsys.
+namespace std {
+} // Required for platforms that do not have std namespace
+namespace @KWSYS_NAMESPACE@_VA_LIST {
+using namespace std;
+typedef va_list hack_va_list;
+}
+namespace @KWSYS_NAMESPACE@ {
+typedef @KWSYS_NAMESPACE@_VA_LIST::hack_va_list va_list;
+}
+#endif // va_list
+
+namespace @KWSYS_NAMESPACE@ {
+
+class SystemToolsStatic;
+
+/** \class SystemToolsManager
+ * \brief Use to make sure SystemTools is initialized before it is used
+ * and is the last static object destroyed
+ */
+class @KWSYS_NAMESPACE@_EXPORT SystemToolsManager
+{
+public:
+ SystemToolsManager();
+ ~SystemToolsManager();
+
+ SystemToolsManager(const SystemToolsManager&) = delete;
+ SystemToolsManager& operator=(const SystemToolsManager&) = delete;
+};
+
+// This instance will show up in any translation unit that uses
+// SystemTools. It will make sure SystemTools is initialized
+// before it is used and is the last static object destroyed.
+static SystemToolsManager SystemToolsManagerInstance;
+
+// Flags for use with TestFileAccess. Use a typedef in case any operating
+// system in the future needs a special type. These are flags that may be
+// combined using the | operator.
+typedef int TestFilePermissions;
+#if defined(_WIN32) && !defined(__CYGWIN__)
+// On Windows (VC and Borland), no system header defines these constants...
+static const TestFilePermissions TEST_FILE_OK = 0;
+static const TestFilePermissions TEST_FILE_READ = 4;
+static const TestFilePermissions TEST_FILE_WRITE = 2;
+static const TestFilePermissions TEST_FILE_EXECUTE = 1;
+#else
+// Standard POSIX constants
+static const TestFilePermissions TEST_FILE_OK = F_OK;
+static const TestFilePermissions TEST_FILE_READ = R_OK;
+static const TestFilePermissions TEST_FILE_WRITE = W_OK;
+static const TestFilePermissions TEST_FILE_EXECUTE = X_OK;
+#endif
+
+/** \class SystemTools
+ * \brief A collection of useful platform-independent system functions.
+ */
+class @KWSYS_NAMESPACE@_EXPORT SystemTools
+{
+public:
+ /** -----------------------------------------------------------------
+ * String Manipulation Routines
+ * -----------------------------------------------------------------
+ */
+
+ /**
+ * Replace symbols in str that are not valid in C identifiers as
+ * defined by the 1999 standard, ie. anything except [A-Za-z0-9_].
+ * They are replaced with `_' and if the first character is a digit
+ * then an underscore is prepended. Note that this can produce
+ * identifiers that the standard reserves (_[A-Z].* and __.*).
+ */
+ static std::string MakeCidentifier(const std::string& s);
+
+ static std::string MakeCindentifier(const std::string& s)
+ {
+ return MakeCidentifier(s);
+ }
+
+ /**
+ * Replace replace all occurrences of the string in the source string.
+ */
+ static void ReplaceString(std::string& source, const char* replace,
+ const char* with);
+ static void ReplaceString(std::string& source, const std::string& replace,
+ const std::string& with);
+
+ /**
+ * Return a capitalized string (i.e the first letter is uppercased,
+ * all other are lowercased).
+ */
+ static std::string Capitalized(const std::string&);
+
+ /**
+ * Return a 'capitalized words' string (i.e the first letter of each word
+ * is uppercased all other are left untouched though).
+ */
+ static std::string CapitalizedWords(const std::string&);
+
+ /**
+ * Return a 'uncapitalized words' string (i.e the first letter of each word
+ * is lowercased all other are left untouched though).
+ */
+ static std::string UnCapitalizedWords(const std::string&);
+
+ /**
+ * Return a lower case string
+ */
+ static std::string LowerCase(const std::string&);
+
+ /**
+ * Return a lower case string
+ */
+ static std::string UpperCase(const std::string&);
+
+ /**
+ * Count char in string
+ */
+ static size_t CountChar(const char* str, char c);
+
+ /**
+ * Remove some characters from a string.
+ * Return a pointer to the new resulting string (allocated with 'new')
+ */
+ static char* RemoveChars(const char* str, const char* toremove);
+
+ /**
+ * Remove remove all but 0->9, A->F characters from a string.
+ * Return a pointer to the new resulting string (allocated with 'new')
+ */
+ static char* RemoveCharsButUpperHex(const char* str);
+
+ /**
+ * Replace some characters by another character in a string (in-place)
+ * Return a pointer to string
+ */
+ static char* ReplaceChars(char* str, const char* toreplace,
+ char replacement);
+
+ /**
+ * Returns true if str1 starts (respectively ends) with str2
+ */
+ static bool StringStartsWith(const char* str1, const char* str2);
+ static bool StringStartsWith(const std::string& str1, const char* str2);
+ static bool StringEndsWith(const char* str1, const char* str2);
+ static bool StringEndsWith(const std::string& str1, const char* str2);
+
+ /**
+ * Returns a pointer to the last occurrence of str2 in str1
+ */
+ static const char* FindLastString(const char* str1, const char* str2);
+
+ /**
+ * Make a duplicate of the string similar to the strdup C function
+ * but use new to create the 'new' string, so one can use
+ * 'delete' to remove it. Returns 0 if the input is empty.
+ */
+ static char* DuplicateString(const char* str);
+
+ /**
+ * Return the string cropped to a given length by removing chars in the
+ * center of the string and replacing them with an ellipsis (...)
+ */
+ static std::string CropString(const std::string&, size_t max_len);
+
+ /** split a path by separator into an array of strings, default is /.
+ If isPath is true then the string is treated like a path and if
+ s starts with a / then the first element of the returned array will
+ be /, so /foo/bar will be [/, foo, bar]
+ */
+ static std::vector<std::string> SplitString(const std::string& s,
+ char separator = '/',
+ bool isPath = false);
+ /**
+ * Perform a case-independent string comparison
+ */
+ static int Strucmp(const char* s1, const char* s2);
+
+ /**
+ * Split a string on its newlines into multiple lines
+ * Return false only if the last line stored had no newline
+ */
+ static bool Split(const std::string& s, std::vector<std::string>& l);
+ static bool Split(const std::string& s, std::vector<std::string>& l,
+ char separator);
+
+ /**
+ * Return string with space added between capitalized words
+ * (i.e. EatMyShorts becomes Eat My Shorts )
+ * (note that IEatShorts becomes IEat Shorts)
+ */
+ static std::string AddSpaceBetweenCapitalizedWords(const std::string&);
+
+ /**
+ * Append two or more strings and produce new one.
+ * Programmer must 'delete []' the resulting string, which was allocated
+ * with 'new'.
+ * Return 0 if inputs are empty or there was an error
+ */
+ static char* AppendStrings(const char* str1, const char* str2);
+ static char* AppendStrings(const char* str1, const char* str2,
+ const char* str3);
+
+ /**
+ * Estimate the length of the string that will be produced
+ * from printing the given format string and arguments. The
+ * returned length will always be at least as large as the string
+ * that will result from printing.
+ * WARNING: since va_arg is called to iterate of the argument list,
+ * you will not be able to use this 'ap' anymore from the beginning.
+ * It's up to you to call va_end though.
+ */
+ static int EstimateFormatLength(const char* format, va_list ap);
+
+ /**
+ * Escape specific characters in 'str'.
+ */
+ static std::string EscapeChars(const char* str, const char* chars_to_escape,
+ char escape_char = '\\');
+
+ /** -----------------------------------------------------------------
+ * Filename Manipulation Routines
+ * -----------------------------------------------------------------
+ */
+
+ /**
+ * Replace Windows file system slashes with Unix-style slashes.
+ */
+ static void ConvertToUnixSlashes(std::string& path);
+
+#ifdef _WIN32
+ /** Calls Encoding::ToWindowsExtendedPath. */
+ static std::wstring ConvertToWindowsExtendedPath(const std::string&);
+#endif
+
+ /**
+ * For windows this calls ConvertToWindowsOutputPath and for unix
+ * it calls ConvertToUnixOutputPath
+ */
+ static std::string ConvertToOutputPath(const std::string&);
+
+ /**
+ * Convert the path to a string that can be used in a unix makefile.
+ * double slashes are removed, and spaces are escaped.
+ */
+ static std::string ConvertToUnixOutputPath(const std::string&);
+
+ /**
+ * Convert the path to string that can be used in a windows project or
+ * makefile. Double slashes are removed if they are not at the start of
+ * the string, the slashes are converted to windows style backslashes, and
+ * if there are spaces in the string it is double quoted.
+ */
+ static std::string ConvertToWindowsOutputPath(const std::string&);
+
+ /**
+ * Return true if a path with the given name exists in the current directory.
+ */
+ static bool PathExists(const std::string& path);
+
+ /**
+ * Return true if a file exists in the current directory.
+ * If isFile = true, then make sure the file is a file and
+ * not a directory. If isFile = false, then return true
+ * if it is a file or a directory. Note that the file will
+ * also be checked for read access. (Currently, this check
+ * for read access is only done on POSIX systems.)
+ */
+ static bool FileExists(const char* filename, bool isFile);
+ static bool FileExists(const std::string& filename, bool isFile);
+ static bool FileExists(const char* filename);
+ static bool FileExists(const std::string& filename);
+
+ /**
+ * Test if a file exists and can be accessed with the requested
+ * permissions. Symbolic links are followed. Returns true if
+ * the access test was successful.
+ *
+ * On POSIX systems (including Cygwin), this maps to the access
+ * function. On Windows systems, all existing files are
+ * considered readable, and writable files are considered to
+ * have the read-only file attribute cleared.
+ */
+ static bool TestFileAccess(const char* filename,
+ TestFilePermissions permissions);
+ static bool TestFileAccess(const std::string& filename,
+ TestFilePermissions permissions);
+/**
+ * Cross platform wrapper for stat struct
+ */
+#if defined(_WIN32) && !defined(__CYGWIN__)
+# if defined(__BORLANDC__)
+ typedef struct stati64 Stat_t;
+# else
+ typedef struct _stat64 Stat_t;
+# endif
+#else
+ typedef struct stat Stat_t;
+#endif
+
+ /**
+ * Cross platform wrapper for stat system call
+ *
+ * On Windows this may not work for paths longer than 250 characters
+ * due to limitations of the underlying '_wstat64' call.
+ */
+ static int Stat(const char* path, Stat_t* buf);
+ static int Stat(const std::string& path, Stat_t* buf);
+
+/**
+ * Converts Cygwin path to Win32 path. Uses dictionary container for
+ * caching and calls to cygwin_conv_to_win32_path from Cygwin dll
+ * for actual translation. Returns true on success, else false.
+ */
+#ifdef __CYGWIN__
+ static bool PathCygwinToWin32(const char* path, char* win32_path);
+#endif
+
+ /**
+ * Return file length
+ */
+ static unsigned long FileLength(const std::string& filename);
+
+ /**
+ Change the modification time or create a file
+ */
+ static bool Touch(const std::string& filename, bool create);
+
+ /**
+ * Compare file modification times.
+ * Return true for successful comparison and false for error.
+ * When true is returned, result has -1, 0, +1 for
+ * f1 older, same, or newer than f2.
+ */
+ static bool FileTimeCompare(const std::string& f1, const std::string& f2,
+ int* result);
+
+ /**
+ * Get the file extension (including ".") needed for an executable
+ * on the current platform ("" for unix, ".exe" for Windows).
+ */
+ static const char* GetExecutableExtension();
+
+ /**
+ * Given a path on a Windows machine, return the actual case of
+ * the path as it exists on disk. Path components that do not
+ * exist on disk are returned unchanged. Relative paths are always
+ * returned unchanged. Drive letters are always made upper case.
+ * This does nothing on non-Windows systems but return the path.
+ */
+ static std::string GetActualCaseForPath(const std::string& path);
+
+ /**
+ * Given the path to a program executable, get the directory part of
+ * the path with the file stripped off. If there is no directory
+ * part, the empty string is returned.
+ */
+ static std::string GetProgramPath(const std::string&);
+ static bool SplitProgramPath(const std::string& in_name, std::string& dir,
+ std::string& file, bool errorReport = true);
+
+ /**
+ * Given argv[0] for a unix program find the full path to a running
+ * executable. argv0 can be null for windows WinMain programs
+ * in this case GetModuleFileName will be used to find the path
+ * to the running executable. If argv0 is not a full path,
+ * then this will try to find the full path. If the path is not
+ * found false is returned, if found true is returned. An error
+ * message of the attempted paths is stored in errorMsg.
+ * exeName is the name of the executable.
+ * buildDir is a possibly null path to the build directory.
+ * installPrefix is a possibly null pointer to the install directory.
+ */
+ static bool FindProgramPath(const char* argv0, std::string& pathOut,
+ std::string& errorMsg,
+ const char* exeName = nullptr,
+ const char* buildDir = nullptr,
+ const char* installPrefix = nullptr);
+
+ /**
+ * Given a path to a file or directory, convert it to a full path.
+ * This collapses away relative paths relative to the cwd argument
+ * (which defaults to the current working directory). The full path
+ * is returned.
+ */
+ static std::string CollapseFullPath(const std::string& in_relative);
+ static std::string CollapseFullPath(const std::string& in_relative,
+ const char* in_base);
+ static std::string CollapseFullPath(const std::string& in_relative,
+ const std::string& in_base);
+
+ /**
+ * Get the real path for a given path, removing all symlinks. In
+ * the event of an error (non-existent path, permissions issue,
+ * etc.) the original path is returned if errorMessage pointer is
+ * nullptr. Otherwise empty string is returned and errorMessage
+ * contains error description.
+ */
+ static std::string GetRealPath(const std::string& path,
+ std::string* errorMessage = nullptr);
+
+ /**
+ * Split a path name into its root component and the rest of the
+ * path. The root component is one of the following:
+ * "/" = UNIX full path
+ * "c:/" = Windows full path (can be any drive letter)
+ * "c:" = Windows drive-letter relative path (can be any drive letter)
+ * "//" = Network path
+ * "~/" = Home path for current user
+ * "~u/" = Home path for user 'u'
+ * "" = Relative path
+ *
+ * A pointer to the rest of the path after the root component is
+ * returned. The root component is stored in the "root" string if
+ * given.
+ */
+ static const char* SplitPathRootComponent(const std::string& p,
+ std::string* root = nullptr);
+
+ /**
+ * Split a path name into its basic components. The first component
+ * always exists and is the root returned by SplitPathRootComponent.
+ * The remaining components form the path. If there is a trailing
+ * slash then the last component is the empty string. The
+ * components can be recombined as "c[0]c[1]/c[2]/.../c[n]" to
+ * produce the original path. Home directory references are
+ * automatically expanded if expand_home_dir is true and this
+ * platform supports them.
+ *
+ * This does *not* normalize the input path. All components are
+ * preserved, including empty ones. Typically callers should use
+ * this only on paths that have already been normalized.
+ */
+ static void SplitPath(const std::string& p,
+ std::vector<std::string>& components,
+ bool expand_home_dir = true);
+
+ /**
+ * Join components of a path name into a single string. See
+ * SplitPath for the format of the components.
+ *
+ * This does *not* normalize the input path. All components are
+ * preserved, including empty ones. Typically callers should use
+ * this only on paths that have already been normalized.
+ */
+ static std::string JoinPath(const std::vector<std::string>& components);
+ static std::string JoinPath(std::vector<std::string>::const_iterator first,
+ std::vector<std::string>::const_iterator last);
+
+ /**
+ * Compare a path or components of a path.
+ */
+ static bool ComparePath(const std::string& c1, const std::string& c2);
+
+ /**
+ * Return path of a full filename (no trailing slashes)
+ */
+ static std::string GetFilenamePath(const std::string&);
+
+ /**
+ * Return file name of a full filename (i.e. file name without path)
+ */
+ static std::string GetFilenameName(const std::string&);
+
+ /**
+ * Return longest file extension of a full filename (dot included)
+ */
+ static std::string GetFilenameExtension(const std::string&);
+
+ /**
+ * Return shortest file extension of a full filename (dot included)
+ */
+ static std::string GetFilenameLastExtension(const std::string& filename);
+
+ /**
+ * Return file name without extension of a full filename
+ */
+ static std::string GetFilenameWithoutExtension(const std::string&);
+
+ /**
+ * Return file name without its last (shortest) extension
+ */
+ static std::string GetFilenameWithoutLastExtension(const std::string&);
+
+ /**
+ * Return whether the path represents a full path (not relative)
+ */
+ static bool FileIsFullPath(const std::string&);
+ static bool FileIsFullPath(const char*);
+
+ /**
+ * For windows return the short path for the given path,
+ * Unix just a pass through
+ */
+ static bool GetShortPath(const std::string& path, std::string& result);
+
+ /**
+ * Read line from file. Make sure to read a full line and truncates it if
+ * requested via sizeLimit. Returns true if any data were read before the
+ * end-of-file was reached. If the has_newline argument is specified, it will
+ * be true when the line read had a newline character.
+ */
+ static bool GetLineFromStream(std::istream& istr, std::string& line,
+ bool* has_newline = nullptr,
+ long sizeLimit = -1);
+
+ /**
+ * Get the parent directory of the directory or file
+ */
+ static std::string GetParentDirectory(const std::string& fileOrDir);
+
+ /**
+ * Check if the given file or directory is in subdirectory of dir
+ */
+ static bool IsSubDirectory(const std::string& fileOrDir,
+ const std::string& dir);
+
+ /** -----------------------------------------------------------------
+ * File Manipulation Routines
+ * -----------------------------------------------------------------
+ */
+
+ /**
+ * Open a file considering unicode.
+ */
+ static FILE* Fopen(const std::string& file, const char* mode);
+
+/**
+ * Visual C++ does not define mode_t (note that Borland does, however).
+ */
+#if defined(_MSC_VER)
+ typedef unsigned short mode_t;
+#endif
+
+ /**
+ * Make a new directory if it is not there. This function
+ * can make a full path even if none of the directories existed
+ * prior to calling this function.
+ */
+ static bool MakeDirectory(const char* path, const mode_t* mode = nullptr);
+ static bool MakeDirectory(const std::string& path,
+ const mode_t* mode = nullptr);
+
+ /**
+ * Copy the source file to the destination file only
+ * if the two files differ.
+ */
+ static bool CopyFileIfDifferent(const std::string& source,
+ const std::string& destination);
+
+ /**
+ * Compare the contents of two files. Return true if different
+ */
+ static bool FilesDiffer(const std::string& source,
+ const std::string& destination);
+
+ /**
+ * Compare the contents of two files, ignoring line ending differences.
+ * Return true if different
+ */
+ static bool TextFilesDiffer(const std::string& path1,
+ const std::string& path2);
+
+ /**
+ * Return true if the two files are the same file
+ */
+ static bool SameFile(const std::string& file1, const std::string& file2);
+
+ /**
+ * Copy a file.
+ */
+ static bool CopyFileAlways(const std::string& source,
+ const std::string& destination);
+
+ /**
+ * Copy a file. If the "always" argument is true the file is always
+ * copied. If it is false, the file is copied only if it is new or
+ * has changed.
+ */
+ static bool CopyAFile(const std::string& source,
+ const std::string& destination, bool always = true);
+
+ /**
+ * Copy content directory to another directory with all files and
+ * subdirectories. If the "always" argument is true all files are
+ * always copied. If it is false, only files that have changed or
+ * are new are copied.
+ */
+ static bool CopyADirectory(const std::string& source,
+ const std::string& destination,
+ bool always = true);
+
+ /**
+ * Remove a file
+ */
+ static bool RemoveFile(const std::string& source);
+
+ /**
+ * Remove a directory
+ */
+ static bool RemoveADirectory(const std::string& source);
+
+ /**
+ * Get the maximum full file path length
+ */
+ static size_t GetMaximumFilePathLength();
+
+ /**
+ * Find a file in the system PATH, with optional extra paths
+ */
+ static std::string FindFile(
+ const std::string& name,
+ const std::vector<std::string>& path = std::vector<std::string>(),
+ bool no_system_path = false);
+
+ /**
+ * Find a directory in the system PATH, with optional extra paths
+ */
+ static std::string FindDirectory(
+ const std::string& name,
+ const std::vector<std::string>& path = std::vector<std::string>(),
+ bool no_system_path = false);
+
+ /**
+ * Find an executable in the system PATH, with optional extra paths
+ */
+ static std::string FindProgram(
+ const char* name,
+ const std::vector<std::string>& path = std::vector<std::string>(),
+ bool no_system_path = false);
+ static std::string FindProgram(
+ const std::string& name,
+ const std::vector<std::string>& path = std::vector<std::string>(),
+ bool no_system_path = false);
+ static std::string FindProgram(
+ const std::vector<std::string>& names,
+ const std::vector<std::string>& path = std::vector<std::string>(),
+ bool no_system_path = false);
+
+ /**
+ * Find a library in the system PATH, with optional extra paths
+ */
+ static std::string FindLibrary(const std::string& name,
+ const std::vector<std::string>& path);
+
+ /**
+ * Return true if the file is a directory
+ */
+ static bool FileIsDirectory(const std::string& name);
+
+ /**
+ * Return true if the file is a symlink
+ */
+ static bool FileIsSymlink(const std::string& name);
+
+ /**
+ * Return true if the file is a FIFO
+ */
+ static bool FileIsFIFO(const std::string& name);
+
+ /**
+ * Return true if the file has a given signature (first set of bytes)
+ */
+ static bool FileHasSignature(const char* filename, const char* signature,
+ long offset = 0);
+
+ /**
+ * Attempt to detect and return the type of a file.
+ * Up to 'length' bytes are read from the file, if more than 'percent_bin' %
+ * of the bytes are non-textual elements, the file is considered binary,
+ * otherwise textual. Textual elements are bytes in the ASCII [0x20, 0x7E]
+ * range, but also \\n, \\r, \\t.
+ * The algorithm is simplistic, and should probably check for usual file
+ * extensions, 'magic' signature, unicode, etc.
+ */
+ enum FileTypeEnum
+ {
+ FileTypeUnknown,
+ FileTypeBinary,
+ FileTypeText
+ };
+ static SystemTools::FileTypeEnum DetectFileType(const char* filename,
+ unsigned long length = 256,
+ double percent_bin = 0.05);
+
+ /**
+ * Create a symbolic link if the platform supports it. Returns whether
+ * creation succeeded.
+ */
+ static bool CreateSymlink(const std::string& origName,
+ const std::string& newName);
+
+ /**
+ * Read the contents of a symbolic link. Returns whether reading
+ * succeeded.
+ */
+ static bool ReadSymlink(const std::string& newName, std::string& origName);
+
+ /**
+ * Try to locate the file 'filename' in the directory 'dir'.
+ * If 'filename' is a fully qualified filename, the basename of the file is
+ * used to check for its existence in 'dir'.
+ * If 'dir' is not a directory, GetFilenamePath() is called on 'dir' to
+ * get its directory first (thus, you can pass a filename as 'dir', as
+ * a convenience).
+ * 'filename_found' is assigned the fully qualified name/path of the file
+ * if it is found (not touched otherwise).
+ * If 'try_filename_dirs' is true, try to find the file using the
+ * components of its path, i.e. if we are looking for c:/foo/bar/bill.txt,
+ * first look for bill.txt in 'dir', then in 'dir'/bar, then in 'dir'/foo/bar
+ * etc.
+ * Return true if the file was found, false otherwise.
+ */
+ static bool LocateFileInDir(const char* filename, const char* dir,
+ std::string& filename_found,
+ int try_filename_dirs = 0);
+
+ /** compute the relative path from local to remote. local must
+ be a directory. remote can be a file or a directory.
+ Both remote and local must be full paths. Basically, if
+ you are in directory local and you want to access the file in remote
+ what is the relative path to do that. For example:
+ /a/b/c/d to /a/b/c1/d1 -> ../../c1/d1
+ from /usr/src to /usr/src/test/blah/foo.cpp -> test/blah/foo.cpp
+ */
+ static std::string RelativePath(const std::string& local,
+ const std::string& remote);
+
+ /**
+ * Return file's modified time
+ */
+ static long int ModifiedTime(const std::string& filename);
+
+ /**
+ * Return file's creation time (Win32: works only for NTFS, not FAT)
+ */
+ static long int CreationTime(const std::string& filename);
+
+ /**
+ * Get and set permissions of the file. If honor_umask is set, the umask
+ * is queried and applied to the given permissions. Returns false if
+ * failure.
+ *
+ * WARNING: A non-thread-safe method is currently used to get the umask
+ * if a honor_umask parameter is set to true.
+ */
+ static bool GetPermissions(const char* file, mode_t& mode);
+ static bool GetPermissions(const std::string& file, mode_t& mode);
+ static bool SetPermissions(const char* file, mode_t mode,
+ bool honor_umask = false);
+ static bool SetPermissions(const std::string& file, mode_t mode,
+ bool honor_umask = false);
+
+ /** -----------------------------------------------------------------
+ * Time Manipulation Routines
+ * -----------------------------------------------------------------
+ */
+
+ /** Get current time in seconds since Posix Epoch (Jan 1, 1970). */
+ static double GetTime();
+
+ /**
+ * Get current date/time
+ */
+ static std::string GetCurrentDateTime(const char* format);
+
+ /** -----------------------------------------------------------------
+ * Registry Manipulation Routines
+ * -----------------------------------------------------------------
+ */
+
+ /**
+ * Specify access to the 32-bit or 64-bit application view of
+ * registry values. The default is to match the currently running
+ * binary type.
+ */
+ enum KeyWOW64
+ {
+ KeyWOW64_Default,
+ KeyWOW64_32,
+ KeyWOW64_64
+ };
+
+ /**
+ * Get a list of subkeys.
+ */
+ static bool GetRegistrySubKeys(const std::string& key,
+ std::vector<std::string>& subkeys,
+ KeyWOW64 view = KeyWOW64_Default);
+
+ /**
+ * Read a registry value
+ */
+ static bool ReadRegistryValue(const std::string& key, std::string& value,
+ KeyWOW64 view = KeyWOW64_Default);
+
+ /**
+ * Write a registry value
+ */
+ static bool WriteRegistryValue(const std::string& key,
+ const std::string& value,
+ KeyWOW64 view = KeyWOW64_Default);
+
+ /**
+ * Delete a registry value
+ */
+ static bool DeleteRegistryValue(const std::string& key,
+ KeyWOW64 view = KeyWOW64_Default);
+
+ /** -----------------------------------------------------------------
+ * Environment Manipulation Routines
+ * -----------------------------------------------------------------
+ */
+
+ /**
+ * Add the paths from the environment variable PATH to the
+ * string vector passed in. If env is set then the value
+ * of env will be used instead of PATH.
+ */
+ static void GetPath(std::vector<std::string>& path,
+ const char* env = nullptr);
+
+ /**
+ * Read an environment variable
+ */
+ static const char* GetEnv(const char* key);
+ static const char* GetEnv(const std::string& key);
+ static bool GetEnv(const char* key, std::string& result);
+ static bool GetEnv(const std::string& key, std::string& result);
+ static bool HasEnv(const char* key);
+ static bool HasEnv(const std::string& key);
+
+ /** Put a string into the environment
+ of the form var=value */
+ static bool PutEnv(const std::string& env);
+
+ /** Remove a string from the environment.
+ Input is of the form "var" or "var=value" (value is ignored). */
+ static bool UnPutEnv(const std::string& env);
+
+ /**
+ * Get current working directory CWD
+ */
+ static std::string GetCurrentWorkingDirectory(bool collapse = true);
+
+ /**
+ * Change directory to the directory specified
+ */
+ static int ChangeDirectory(const std::string& dir);
+
+ /**
+ * Get the result of strerror(errno)
+ */
+ static std::string GetLastSystemError();
+
+ /**
+ * When building DEBUG with MSVC, this enables a hook that prevents
+ * error dialogs from popping up if the program is being run from
+ * DART.
+ */
+ static void EnableMSVCDebugHook();
+
+ /**
+ * Get the width of the terminal window. The code may or may not work, so
+ * make sure you have some reasonable defaults prepared if the code returns
+ * some bogus size.
+ */
+ static int GetTerminalWidth();
+
+#if @KWSYS_NAMESPACE@_SYSTEMTOOLS_USE_TRANSLATION_MAP
+ /**
+ * Add an entry in the path translation table.
+ */
+ static void AddTranslationPath(const std::string& dir,
+ const std::string& refdir);
+
+ /**
+ * If dir is different after CollapseFullPath is called,
+ * Then insert it into the path translation table
+ */
+ static void AddKeepPath(const std::string& dir);
+
+ /**
+ * Update path by going through the Path Translation table;
+ */
+ static void CheckTranslationPath(std::string& path);
+#endif
+
+ /**
+ * Delay the execution for a specified amount of time specified
+ * in milliseconds
+ */
+ static void Delay(unsigned int msec);
+
+ /**
+ * Get the operating system name and version
+ * This is implemented for Win32 only for the moment
+ */
+ static std::string GetOperatingSystemNameAndVersion();
+
+ /** -----------------------------------------------------------------
+ * URL Manipulation Routines
+ * -----------------------------------------------------------------
+ */
+
+ /**
+ * Parse a character string :
+ * protocol://dataglom
+ * and fill protocol as appropriate.
+ * Return false if the URL does not have the required form, true otherwise.
+ */
+ static bool ParseURLProtocol(const std::string& URL, std::string& protocol,
+ std::string& dataglom);
+
+ /**
+ * Parse a string (a URL without protocol prefix) with the form:
+ * protocol://[[username[':'password]'@']hostname[':'dataport]]'/'[datapath]
+ * and fill protocol, username, password, hostname, dataport, and datapath
+ * when values are found.
+ * Return true if the string matches the format; false otherwise.
+ */
+ static bool ParseURL(const std::string& URL, std::string& protocol,
+ std::string& username, std::string& password,
+ std::string& hostname, std::string& dataport,
+ std::string& datapath);
+
+private:
+ /**
+ * Allocate the stl map that serve as the Path Translation table.
+ */
+ static void ClassInitialize();
+
+ /**
+ * Deallocate the stl map that serve as the Path Translation table.
+ */
+ static void ClassFinalize();
+
+ /**
+ * This method prevents warning on SGI
+ */
+ SystemToolsManager* GetSystemToolsManager()
+ {
+ return &SystemToolsManagerInstance;
+ }
+
+ static SystemToolsStatic* Statics;
+ friend class SystemToolsStatic;
+ friend class SystemToolsManager;
+};
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/Terminal.c b/test/API/driver/kwsys/Terminal.c
new file mode 100644
index 0000000..4dd2461
--- /dev/null
+++ b/test/API/driver/kwsys/Terminal.c
@@ -0,0 +1,414 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Terminal.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "Terminal.h.in"
+#endif
+
+/* Configure support for this platform. */
+#if defined(_WIN32) || defined(__CYGWIN__)
+# define KWSYS_TERMINAL_SUPPORT_CONSOLE
+#endif
+#if !defined(_WIN32)
+# define KWSYS_TERMINAL_ISATTY_WORKS
+#endif
+
+/* Include needed system APIs. */
+
+#include <stdarg.h> /* va_list */
+#include <stdlib.h> /* getenv */
+#include <string.h> /* strcmp */
+
+#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE)
+# include <io.h> /* _get_osfhandle */
+# include <windows.h> /* SetConsoleTextAttribute */
+#endif
+
+#if defined(KWSYS_TERMINAL_ISATTY_WORKS)
+# include <unistd.h> /* isatty */
+#else
+# include <sys/stat.h> /* fstat */
+#endif
+
+static int kwsysTerminalStreamIsVT100(FILE* stream, int default_vt100,
+ int default_tty);
+static void kwsysTerminalSetVT100Color(FILE* stream, int color);
+#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE)
+static HANDLE kwsysTerminalGetStreamHandle(FILE* stream);
+static void kwsysTerminalSetConsoleColor(HANDLE hOut,
+ CONSOLE_SCREEN_BUFFER_INFO* hOutInfo,
+ FILE* stream, int color);
+#endif
+
+void kwsysTerminal_cfprintf(int color, FILE* stream, const char* format, ...)
+{
+ /* Setup the stream with the given color if possible. */
+ int pipeIsConsole = 0;
+ int pipeIsVT100 = 0;
+ int default_vt100 = color & kwsysTerminal_Color_AssumeVT100;
+ int default_tty = color & kwsysTerminal_Color_AssumeTTY;
+#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE)
+ CONSOLE_SCREEN_BUFFER_INFO hOutInfo;
+ HANDLE hOut = kwsysTerminalGetStreamHandle(stream);
+ if (GetConsoleScreenBufferInfo(hOut, &hOutInfo)) {
+ pipeIsConsole = 1;
+ kwsysTerminalSetConsoleColor(hOut, &hOutInfo, stream, color);
+ }
+#endif
+ if (!pipeIsConsole &&
+ kwsysTerminalStreamIsVT100(stream, default_vt100, default_tty)) {
+ pipeIsVT100 = 1;
+ kwsysTerminalSetVT100Color(stream, color);
+ }
+
+ /* Format the text into the stream. */
+ {
+ va_list var_args;
+ va_start(var_args, format);
+ vfprintf(stream, format, var_args);
+ va_end(var_args);
+ }
+
+/* Restore the normal color state for the stream. */
+#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE)
+ if (pipeIsConsole) {
+ kwsysTerminalSetConsoleColor(hOut, &hOutInfo, stream,
+ kwsysTerminal_Color_Normal);
+ }
+#endif
+ if (pipeIsVT100) {
+ kwsysTerminalSetVT100Color(stream, kwsysTerminal_Color_Normal);
+ }
+}
+
+/* Detect cases when a stream is definitely not interactive. */
+#if !defined(KWSYS_TERMINAL_ISATTY_WORKS)
+static int kwsysTerminalStreamIsNotInteractive(FILE* stream)
+{
+ /* The given stream is definitely not interactive if it is a regular
+ file. */
+ struct stat stream_stat;
+ if (fstat(fileno(stream), &stream_stat) == 0) {
+ if (stream_stat.st_mode & S_IFREG) {
+ return 1;
+ }
+ }
+ return 0;
+}
+#endif
+
+/* List of terminal names known to support VT100 color escape sequences. */
+static const char* kwsysTerminalVT100Names[] = { "Eterm",
+ "alacritty",
+ "alacritty-direct",
+ "ansi",
+ "color-xterm",
+ "con132x25",
+ "con132x30",
+ "con132x43",
+ "con132x60",
+ "con80x25",
+ "con80x28",
+ "con80x30",
+ "con80x43",
+ "con80x50",
+ "con80x60",
+ "cons25",
+ "console",
+ "cygwin",
+ "dtterm",
+ "eterm-color",
+ "gnome",
+ "gnome-256color",
+ "konsole",
+ "konsole-256color",
+ "kterm",
+ "linux",
+ "msys",
+ "linux-c",
+ "mach-color",
+ "mlterm",
+ "putty",
+ "putty-256color",
+ "rxvt",
+ "rxvt-256color",
+ "rxvt-cygwin",
+ "rxvt-cygwin-native",
+ "rxvt-unicode",
+ "rxvt-unicode-256color",
+ "screen",
+ "screen-256color",
+ "screen-256color-bce",
+ "screen-bce",
+ "screen-w",
+ "screen.linux",
+ "tmux",
+ "tmux-256color",
+ "vt100",
+ "xterm",
+ "xterm-16color",
+ "xterm-256color",
+ "xterm-88color",
+ "xterm-color",
+ "xterm-debian",
+ "xterm-kitty",
+ "xterm-termite",
+ 0 };
+
+/* Detect whether a stream is displayed in a VT100-compatible terminal. */
+static int kwsysTerminalStreamIsVT100(FILE* stream, int default_vt100,
+ int default_tty)
+{
+ /* Force color according to http://bixense.com/clicolors/ convention. */
+ {
+ const char* clicolor_force = getenv("CLICOLOR_FORCE");
+ if (clicolor_force && *clicolor_force &&
+ strcmp(clicolor_force, "0") != 0) {
+ return 1;
+ }
+ }
+
+ /* If running inside emacs the terminal is not VT100. Some emacs
+ seem to claim the TERM is xterm even though they do not support
+ VT100 escapes. */
+ {
+ const char* emacs = getenv("EMACS");
+ if (emacs && *emacs == 't') {
+ return 0;
+ }
+ }
+
+ /* Check for a valid terminal. */
+ if (!default_vt100) {
+ const char** t = 0;
+ const char* term = getenv("TERM");
+ if (term) {
+ for (t = kwsysTerminalVT100Names; *t && strcmp(term, *t) != 0; ++t) {
+ }
+ }
+ if (!(t && *t)) {
+ return 0;
+ }
+ }
+
+#if defined(KWSYS_TERMINAL_ISATTY_WORKS)
+ /* Make sure the stream is a tty. */
+ (void)default_tty;
+ return isatty(fileno(stream)) ? 1 : 0;
+#else
+ /* Check for cases in which the stream is definitely not a tty. */
+ if (kwsysTerminalStreamIsNotInteractive(stream)) {
+ return 0;
+ }
+
+ /* Use the provided default for whether this is a tty. */
+ return default_tty;
+#endif
+}
+
+/* VT100 escape sequence strings. */
+#if defined(__MVS__)
+/* if building on z/OS (aka MVS), assume we are using EBCDIC */
+# define ESCAPE_CHAR "\47"
+#else
+# define ESCAPE_CHAR "\33"
+#endif
+
+#define KWSYS_TERMINAL_VT100_NORMAL ESCAPE_CHAR "[0m"
+#define KWSYS_TERMINAL_VT100_BOLD ESCAPE_CHAR "[1m"
+#define KWSYS_TERMINAL_VT100_UNDERLINE ESCAPE_CHAR "[4m"
+#define KWSYS_TERMINAL_VT100_BLINK ESCAPE_CHAR "[5m"
+#define KWSYS_TERMINAL_VT100_INVERSE ESCAPE_CHAR "[7m"
+#define KWSYS_TERMINAL_VT100_FOREGROUND_BLACK ESCAPE_CHAR "[30m"
+#define KWSYS_TERMINAL_VT100_FOREGROUND_RED ESCAPE_CHAR "[31m"
+#define KWSYS_TERMINAL_VT100_FOREGROUND_GREEN ESCAPE_CHAR "[32m"
+#define KWSYS_TERMINAL_VT100_FOREGROUND_YELLOW ESCAPE_CHAR "[33m"
+#define KWSYS_TERMINAL_VT100_FOREGROUND_BLUE ESCAPE_CHAR "[34m"
+#define KWSYS_TERMINAL_VT100_FOREGROUND_MAGENTA ESCAPE_CHAR "[35m"
+#define KWSYS_TERMINAL_VT100_FOREGROUND_CYAN ESCAPE_CHAR "[36m"
+#define KWSYS_TERMINAL_VT100_FOREGROUND_WHITE ESCAPE_CHAR "[37m"
+#define KWSYS_TERMINAL_VT100_BACKGROUND_BLACK ESCAPE_CHAR "[40m"
+#define KWSYS_TERMINAL_VT100_BACKGROUND_RED ESCAPE_CHAR "[41m"
+#define KWSYS_TERMINAL_VT100_BACKGROUND_GREEN ESCAPE_CHAR "[42m"
+#define KWSYS_TERMINAL_VT100_BACKGROUND_YELLOW ESCAPE_CHAR "[43m"
+#define KWSYS_TERMINAL_VT100_BACKGROUND_BLUE ESCAPE_CHAR "[44m"
+#define KWSYS_TERMINAL_VT100_BACKGROUND_MAGENTA ESCAPE_CHAR "[45m"
+#define KWSYS_TERMINAL_VT100_BACKGROUND_CYAN ESCAPE_CHAR "[46m"
+#define KWSYS_TERMINAL_VT100_BACKGROUND_WHITE ESCAPE_CHAR "[47m"
+
+/* Write VT100 escape sequences to the stream for the given color. */
+static void kwsysTerminalSetVT100Color(FILE* stream, int color)
+{
+ if (color == kwsysTerminal_Color_Normal) {
+ fprintf(stream, KWSYS_TERMINAL_VT100_NORMAL);
+ return;
+ }
+
+ switch (color & kwsysTerminal_Color_ForegroundMask) {
+ case kwsysTerminal_Color_Normal:
+ fprintf(stream, KWSYS_TERMINAL_VT100_NORMAL);
+ break;
+ case kwsysTerminal_Color_ForegroundBlack:
+ fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_BLACK);
+ break;
+ case kwsysTerminal_Color_ForegroundRed:
+ fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_RED);
+ break;
+ case kwsysTerminal_Color_ForegroundGreen:
+ fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_GREEN);
+ break;
+ case kwsysTerminal_Color_ForegroundYellow:
+ fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_YELLOW);
+ break;
+ case kwsysTerminal_Color_ForegroundBlue:
+ fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_BLUE);
+ break;
+ case kwsysTerminal_Color_ForegroundMagenta:
+ fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_MAGENTA);
+ break;
+ case kwsysTerminal_Color_ForegroundCyan:
+ fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_CYAN);
+ break;
+ case kwsysTerminal_Color_ForegroundWhite:
+ fprintf(stream, KWSYS_TERMINAL_VT100_FOREGROUND_WHITE);
+ break;
+ }
+ switch (color & kwsysTerminal_Color_BackgroundMask) {
+ case kwsysTerminal_Color_BackgroundBlack:
+ fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_BLACK);
+ break;
+ case kwsysTerminal_Color_BackgroundRed:
+ fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_RED);
+ break;
+ case kwsysTerminal_Color_BackgroundGreen:
+ fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_GREEN);
+ break;
+ case kwsysTerminal_Color_BackgroundYellow:
+ fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_YELLOW);
+ break;
+ case kwsysTerminal_Color_BackgroundBlue:
+ fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_BLUE);
+ break;
+ case kwsysTerminal_Color_BackgroundMagenta:
+ fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_MAGENTA);
+ break;
+ case kwsysTerminal_Color_BackgroundCyan:
+ fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_CYAN);
+ break;
+ case kwsysTerminal_Color_BackgroundWhite:
+ fprintf(stream, KWSYS_TERMINAL_VT100_BACKGROUND_WHITE);
+ break;
+ }
+ if (color & kwsysTerminal_Color_ForegroundBold) {
+ fprintf(stream, KWSYS_TERMINAL_VT100_BOLD);
+ }
+}
+
+#if defined(KWSYS_TERMINAL_SUPPORT_CONSOLE)
+
+# define KWSYS_TERMINAL_MASK_FOREGROUND \
+ (FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED | \
+ FOREGROUND_INTENSITY)
+# define KWSYS_TERMINAL_MASK_BACKGROUND \
+ (BACKGROUND_BLUE | BACKGROUND_GREEN | BACKGROUND_RED | \
+ BACKGROUND_INTENSITY)
+
+/* Get the Windows handle for a FILE stream. */
+static HANDLE kwsysTerminalGetStreamHandle(FILE* stream)
+{
+ /* Get the C-library file descriptor from the stream. */
+ int fd = fileno(stream);
+
+# if defined(__CYGWIN__)
+ /* Cygwin seems to have an extra pipe level. If the file descriptor
+ corresponds to stdout or stderr then obtain the matching windows
+ handle directly. */
+ if (fd == fileno(stdout)) {
+ return GetStdHandle(STD_OUTPUT_HANDLE);
+ } else if (fd == fileno(stderr)) {
+ return GetStdHandle(STD_ERROR_HANDLE);
+ }
+# endif
+
+ /* Get the underlying Windows handle for the descriptor. */
+ return (HANDLE)_get_osfhandle(fd);
+}
+
+/* Set color attributes in a Windows console. */
+static void kwsysTerminalSetConsoleColor(HANDLE hOut,
+ CONSOLE_SCREEN_BUFFER_INFO* hOutInfo,
+ FILE* stream, int color)
+{
+ WORD attributes = 0;
+ switch (color & kwsysTerminal_Color_ForegroundMask) {
+ case kwsysTerminal_Color_Normal:
+ attributes |= hOutInfo->wAttributes & KWSYS_TERMINAL_MASK_FOREGROUND;
+ break;
+ case kwsysTerminal_Color_ForegroundBlack:
+ attributes |= 0;
+ break;
+ case kwsysTerminal_Color_ForegroundRed:
+ attributes |= FOREGROUND_RED;
+ break;
+ case kwsysTerminal_Color_ForegroundGreen:
+ attributes |= FOREGROUND_GREEN;
+ break;
+ case kwsysTerminal_Color_ForegroundYellow:
+ attributes |= FOREGROUND_RED | FOREGROUND_GREEN;
+ break;
+ case kwsysTerminal_Color_ForegroundBlue:
+ attributes |= FOREGROUND_BLUE;
+ break;
+ case kwsysTerminal_Color_ForegroundMagenta:
+ attributes |= FOREGROUND_RED | FOREGROUND_BLUE;
+ break;
+ case kwsysTerminal_Color_ForegroundCyan:
+ attributes |= FOREGROUND_BLUE | FOREGROUND_GREEN;
+ break;
+ case kwsysTerminal_Color_ForegroundWhite:
+ attributes |= FOREGROUND_BLUE | FOREGROUND_GREEN | FOREGROUND_RED;
+ break;
+ }
+ switch (color & kwsysTerminal_Color_BackgroundMask) {
+ case kwsysTerminal_Color_Normal:
+ attributes |= hOutInfo->wAttributes & KWSYS_TERMINAL_MASK_BACKGROUND;
+ break;
+ case kwsysTerminal_Color_BackgroundBlack:
+ attributes |= 0;
+ break;
+ case kwsysTerminal_Color_BackgroundRed:
+ attributes |= BACKGROUND_RED;
+ break;
+ case kwsysTerminal_Color_BackgroundGreen:
+ attributes |= BACKGROUND_GREEN;
+ break;
+ case kwsysTerminal_Color_BackgroundYellow:
+ attributes |= BACKGROUND_RED | BACKGROUND_GREEN;
+ break;
+ case kwsysTerminal_Color_BackgroundBlue:
+ attributes |= BACKGROUND_BLUE;
+ break;
+ case kwsysTerminal_Color_BackgroundMagenta:
+ attributes |= BACKGROUND_RED | BACKGROUND_BLUE;
+ break;
+ case kwsysTerminal_Color_BackgroundCyan:
+ attributes |= BACKGROUND_BLUE | BACKGROUND_GREEN;
+ break;
+ case kwsysTerminal_Color_BackgroundWhite:
+ attributes |= BACKGROUND_BLUE | BACKGROUND_GREEN | BACKGROUND_RED;
+ break;
+ }
+ if (color & kwsysTerminal_Color_ForegroundBold) {
+ attributes |= FOREGROUND_INTENSITY;
+ }
+ if (color & kwsysTerminal_Color_BackgroundBold) {
+ attributes |= BACKGROUND_INTENSITY;
+ }
+ fflush(stream);
+ SetConsoleTextAttribute(hOut, attributes);
+}
+#endif
diff --git a/test/API/driver/kwsys/Terminal.h.in b/test/API/driver/kwsys/Terminal.h.in
new file mode 100644
index 0000000..1a2c745
--- /dev/null
+++ b/test/API/driver/kwsys/Terminal.h.in
@@ -0,0 +1,170 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_Terminal_h
+#define @KWSYS_NAMESPACE@_Terminal_h
+
+#include <@KWSYS_NAMESPACE@/Configure.h>
+
+#include <stdio.h> /* For file stream type FILE. */
+
+/* Redefine all public interface symbol names to be in the proper
+ namespace. These macros are used internally to kwsys only, and are
+ not visible to user code. Use kwsysHeaderDump.pl to reproduce
+ these macros after making changes to the interface. */
+#if !defined(KWSYS_NAMESPACE)
+# define kwsys_ns(x) @KWSYS_NAMESPACE@##x
+# define kwsysEXPORT @KWSYS_NAMESPACE@_EXPORT
+#endif
+#if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# define kwsysTerminal_cfprintf kwsys_ns(Terminal_cfprintf)
+# define kwsysTerminal_Color_e kwsys_ns(Terminal_Color_e)
+# define kwsysTerminal_Color_Normal kwsys_ns(Terminal_Color_Normal)
+# define kwsysTerminal_Color_ForegroundBlack \
+ kwsys_ns(Terminal_Color_ForegroundBlack)
+# define kwsysTerminal_Color_ForegroundRed \
+ kwsys_ns(Terminal_Color_ForegroundRed)
+# define kwsysTerminal_Color_ForegroundGreen \
+ kwsys_ns(Terminal_Color_ForegroundGreen)
+# define kwsysTerminal_Color_ForegroundYellow \
+ kwsys_ns(Terminal_Color_ForegroundYellow)
+# define kwsysTerminal_Color_ForegroundBlue \
+ kwsys_ns(Terminal_Color_ForegroundBlue)
+# define kwsysTerminal_Color_ForegroundMagenta \
+ kwsys_ns(Terminal_Color_ForegroundMagenta)
+# define kwsysTerminal_Color_ForegroundCyan \
+ kwsys_ns(Terminal_Color_ForegroundCyan)
+# define kwsysTerminal_Color_ForegroundWhite \
+ kwsys_ns(Terminal_Color_ForegroundWhite)
+# define kwsysTerminal_Color_ForegroundMask \
+ kwsys_ns(Terminal_Color_ForegroundMask)
+# define kwsysTerminal_Color_BackgroundBlack \
+ kwsys_ns(Terminal_Color_BackgroundBlack)
+# define kwsysTerminal_Color_BackgroundRed \
+ kwsys_ns(Terminal_Color_BackgroundRed)
+# define kwsysTerminal_Color_BackgroundGreen \
+ kwsys_ns(Terminal_Color_BackgroundGreen)
+# define kwsysTerminal_Color_BackgroundYellow \
+ kwsys_ns(Terminal_Color_BackgroundYellow)
+# define kwsysTerminal_Color_BackgroundBlue \
+ kwsys_ns(Terminal_Color_BackgroundBlue)
+# define kwsysTerminal_Color_BackgroundMagenta \
+ kwsys_ns(Terminal_Color_BackgroundMagenta)
+# define kwsysTerminal_Color_BackgroundCyan \
+ kwsys_ns(Terminal_Color_BackgroundCyan)
+# define kwsysTerminal_Color_BackgroundWhite \
+ kwsys_ns(Terminal_Color_BackgroundWhite)
+# define kwsysTerminal_Color_BackgroundMask \
+ kwsys_ns(Terminal_Color_BackgroundMask)
+# define kwsysTerminal_Color_ForegroundBold \
+ kwsys_ns(Terminal_Color_ForegroundBold)
+# define kwsysTerminal_Color_BackgroundBold \
+ kwsys_ns(Terminal_Color_BackgroundBold)
+# define kwsysTerminal_Color_AssumeTTY kwsys_ns(Terminal_Color_AssumeTTY)
+# define kwsysTerminal_Color_AssumeVT100 kwsys_ns(Terminal_Color_AssumeVT100)
+# define kwsysTerminal_Color_AttributeMask \
+ kwsys_ns(Terminal_Color_AttributeMask)
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * Write colored and formatted text to a stream. Color is used only
+ * for streams supporting it. The color specification is constructed
+ * by bitwise-OR-ing enumeration values. At most one foreground and
+ * one background value may be given.
+ *
+ * Whether the a stream supports color is usually automatically
+ * detected, but with two exceptions:
+ *
+ * - When the stream is displayed in a terminal supporting VT100
+ * color but using an intermediate pipe for communication the
+ * detection of a tty fails. (This typically occurs for a shell
+ * running in an rxvt terminal in MSYS.) If the caller knows this
+ * to be the case, the attribute Color_AssumeTTY may be included in
+ * the color specification.
+ *
+ * - When the stream is displayed in a terminal whose TERM
+ * environment variable is not set or is set to a value that is not
+ * known to support VT100 colors. If the caller knows this to be
+ * the case, the attribute Color_AssumeVT100 may be included in the
+ * color specification.
+ */
+kwsysEXPORT void kwsysTerminal_cfprintf(int color, FILE* stream,
+ const char* format, ...);
+enum kwsysTerminal_Color_e
+{
+ /* Normal Text */
+ kwsysTerminal_Color_Normal = 0,
+
+ /* Foreground Color */
+ kwsysTerminal_Color_ForegroundBlack = 0x1,
+ kwsysTerminal_Color_ForegroundRed = 0x2,
+ kwsysTerminal_Color_ForegroundGreen = 0x3,
+ kwsysTerminal_Color_ForegroundYellow = 0x4,
+ kwsysTerminal_Color_ForegroundBlue = 0x5,
+ kwsysTerminal_Color_ForegroundMagenta = 0x6,
+ kwsysTerminal_Color_ForegroundCyan = 0x7,
+ kwsysTerminal_Color_ForegroundWhite = 0x8,
+ kwsysTerminal_Color_ForegroundMask = 0xF,
+
+ /* Background Color */
+ kwsysTerminal_Color_BackgroundBlack = 0x10,
+ kwsysTerminal_Color_BackgroundRed = 0x20,
+ kwsysTerminal_Color_BackgroundGreen = 0x30,
+ kwsysTerminal_Color_BackgroundYellow = 0x40,
+ kwsysTerminal_Color_BackgroundBlue = 0x50,
+ kwsysTerminal_Color_BackgroundMagenta = 0x60,
+ kwsysTerminal_Color_BackgroundCyan = 0x70,
+ kwsysTerminal_Color_BackgroundWhite = 0x80,
+ kwsysTerminal_Color_BackgroundMask = 0xF0,
+
+ /* Attributes */
+ kwsysTerminal_Color_ForegroundBold = 0x100,
+ kwsysTerminal_Color_BackgroundBold = 0x200,
+ kwsysTerminal_Color_AssumeTTY = 0x400,
+ kwsysTerminal_Color_AssumeVT100 = 0x800,
+ kwsysTerminal_Color_AttributeMask = 0xF00
+};
+
+#if defined(__cplusplus)
+} /* extern "C" */
+#endif
+
+/* If we are building a kwsys .c or .cxx file, let it use these macros.
+ Otherwise, undefine them to keep the namespace clean. */
+#if !defined(KWSYS_NAMESPACE)
+# undef kwsys_ns
+# undef kwsysEXPORT
+# if !@KWSYS_NAMESPACE@_NAME_IS_KWSYS
+# undef kwsysTerminal_cfprintf
+# undef kwsysTerminal_Color_e
+# undef kwsysTerminal_Color_Normal
+# undef kwsysTerminal_Color_ForegroundBlack
+# undef kwsysTerminal_Color_ForegroundRed
+# undef kwsysTerminal_Color_ForegroundGreen
+# undef kwsysTerminal_Color_ForegroundYellow
+# undef kwsysTerminal_Color_ForegroundBlue
+# undef kwsysTerminal_Color_ForegroundMagenta
+# undef kwsysTerminal_Color_ForegroundCyan
+# undef kwsysTerminal_Color_ForegroundWhite
+# undef kwsysTerminal_Color_ForegroundMask
+# undef kwsysTerminal_Color_BackgroundBlack
+# undef kwsysTerminal_Color_BackgroundRed
+# undef kwsysTerminal_Color_BackgroundGreen
+# undef kwsysTerminal_Color_BackgroundYellow
+# undef kwsysTerminal_Color_BackgroundBlue
+# undef kwsysTerminal_Color_BackgroundMagenta
+# undef kwsysTerminal_Color_BackgroundCyan
+# undef kwsysTerminal_Color_BackgroundWhite
+# undef kwsysTerminal_Color_BackgroundMask
+# undef kwsysTerminal_Color_ForegroundBold
+# undef kwsysTerminal_Color_BackgroundBold
+# undef kwsysTerminal_Color_AssumeTTY
+# undef kwsysTerminal_Color_AssumeVT100
+# undef kwsysTerminal_Color_AttributeMask
+# endif
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/clang-format.bash b/test/API/driver/kwsys/clang-format.bash
new file mode 100644
index 0000000..b0282ab
--- /dev/null
+++ b/test/API/driver/kwsys/clang-format.bash
@@ -0,0 +1,128 @@
+#!/usr/bin/env bash
+#=============================================================================
+# Copyright 2015-2017 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+usage='usage: clang-format.bash [<options>] [--]
+
+ --help Print usage plus more detailed help.
+
+ --clang-format <tool> Use given clang-format tool.
+
+ --amend Filter files changed by HEAD.
+ --cached Filter files locally staged for commit.
+ --modified Filter files locally modified from HEAD.
+ --tracked Filter files tracked by Git.
+'
+
+help="$usage"'
+Example to format locally modified files:
+
+ ./clang-format.bash --modified
+
+Example to format locally modified files staged for commit:
+
+ ./clang-format.bash --cached
+
+Example to format files modified by the most recent commit:
+
+ ./clang-format.bash --amend
+
+Example to format all files:
+
+ ./clang-format.bash --tracked
+
+Example to format the current topic:
+
+ git filter-branch \
+ --tree-filter "./clang-format.bash --tracked" \
+ master..
+'
+
+die() {
+ echo "$@" 1>&2; exit 1
+}
+
+#-----------------------------------------------------------------------------
+
+# Parse command-line arguments.
+clang_format=''
+mode=''
+while test "$#" != 0; do
+ case "$1" in
+ --amend) mode="amend" ;;
+ --cached) mode="cached" ;;
+ --clang-format) shift; clang_format="$1" ;;
+ --help) echo "$help"; exit 0 ;;
+ --modified) mode="modified" ;;
+ --tracked) mode="tracked" ;;
+ --) shift ; break ;;
+ -*) die "$usage" ;;
+ *) break ;;
+ esac
+ shift
+done
+test "$#" = 0 || die "$usage"
+
+# Find a default tool.
+tools='
+ clang-format-6.0
+ clang-format
+'
+if test "x$clang_format" = "x"; then
+ for tool in $tools; do
+ if type -p "$tool" >/dev/null; then
+ clang_format="$tool"
+ break
+ fi
+ done
+fi
+
+# Verify that we have a tool.
+if ! type -p "$clang_format" >/dev/null; then
+ echo "Unable to locate a 'clang-format' tool."
+ exit 1
+fi
+
+if ! "$clang_format" --version | grep 'clang-format version 6\.0' >/dev/null 2>/dev/null; then
+ echo "clang-format version 6.0 is required (exactly)"
+ exit 1
+fi
+
+# Select listing mode.
+case "$mode" in
+ '') echo "$usage"; exit 0 ;;
+ amend) git_ls='git diff-tree --diff-filter=AM --name-only HEAD -r --no-commit-id' ;;
+ cached) git_ls='git diff-index --diff-filter=AM --name-only HEAD --cached' ;;
+ modified) git_ls='git diff-index --diff-filter=AM --name-only HEAD' ;;
+ tracked) git_ls='git ls-files' ;;
+ *) die "invalid mode: $mode" ;;
+esac
+
+# List files as selected above.
+list_files() {
+ $git_ls |
+
+ # Select sources with our attribute.
+ git check-attr --stdin format.clang-format |
+ sed -n '/: format\.clang-format: set$/ {s/:[^:]*:[^:]*$//p}'
+}
+
+# Transform configured sources to protect @SYMBOLS@.
+list_files | xargs -d '\n' sed -i 's/@\(KWSYS_[A-Z0-9_]\+\)@/x\1x/g'
+# Update sources in-place.
+list_files | xargs -d '\n' "$clang_format" -i
+# Transform configured sources to restore @SYMBOLS@.
+list_files | xargs -d '\n' sed -i 's/x\(KWSYS_[A-Z0-9_]\+\)x/@\1@/g'
diff --git a/test/API/driver/kwsys/hash_fun.hxx.in b/test/API/driver/kwsys/hash_fun.hxx.in
new file mode 100644
index 0000000..8626c2a
--- /dev/null
+++ b/test/API/driver/kwsys/hash_fun.hxx.in
@@ -0,0 +1,166 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+/*
+ * Copyright (c) 1996
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ *
+ * Copyright (c) 1994
+ * Hewlett-Packard Company
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Hewlett-Packard Company makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ */
+#ifndef @KWSYS_NAMESPACE@_hash_fun_hxx
+#define @KWSYS_NAMESPACE@_hash_fun_hxx
+
+#include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+#include <stddef.h> // size_t
+#include <string>
+
+namespace @KWSYS_NAMESPACE@ {
+
+template <class _Key>
+struct hash
+{
+};
+
+inline size_t _stl_hash_string(const char* __s)
+{
+ unsigned long __h = 0;
+ for (; *__s; ++__s)
+ __h = 5 * __h + *__s;
+
+ return size_t(__h);
+}
+
+template <>
+struct hash<char*>
+{
+ size_t operator()(const char* __s) const { return _stl_hash_string(__s); }
+};
+
+template <>
+struct hash<const char*>
+{
+ size_t operator()(const char* __s) const { return _stl_hash_string(__s); }
+};
+
+template <>
+struct hash<std::string>
+{
+ size_t operator()(const std::string& __s) const
+ {
+ return _stl_hash_string(__s.c_str());
+ }
+};
+
+#if !defined(__BORLANDC__)
+template <>
+struct hash<const std::string>
+{
+ size_t operator()(const std::string& __s) const
+ {
+ return _stl_hash_string(__s.c_str());
+ }
+};
+#endif
+
+template <>
+struct hash<char>
+{
+ size_t operator()(char __x) const { return __x; }
+};
+
+template <>
+struct hash<unsigned char>
+{
+ size_t operator()(unsigned char __x) const { return __x; }
+};
+
+template <>
+struct hash<signed char>
+{
+ size_t operator()(unsigned char __x) const { return __x; }
+};
+
+template <>
+struct hash<short>
+{
+ size_t operator()(short __x) const { return __x; }
+};
+
+template <>
+struct hash<unsigned short>
+{
+ size_t operator()(unsigned short __x) const { return __x; }
+};
+
+template <>
+struct hash<int>
+{
+ size_t operator()(int __x) const { return __x; }
+};
+
+template <>
+struct hash<unsigned int>
+{
+ size_t operator()(unsigned int __x) const { return __x; }
+};
+
+template <>
+struct hash<long>
+{
+ size_t operator()(long __x) const { return __x; }
+};
+
+template <>
+struct hash<unsigned long>
+{
+ size_t operator()(unsigned long __x) const { return __x; }
+};
+
+// use long long or __int64
+#if @KWSYS_USE_LONG_LONG@
+template <>
+struct hash<long long>
+{
+ size_t operator()(long long __x) const { return __x; }
+};
+
+template <>
+struct hash<unsigned long long>
+{
+ size_t operator()(unsigned long long __x) const { return __x; }
+};
+#elif @KWSYS_USE___INT64@
+template <>
+struct hash<__int64>
+{
+ size_t operator()(__int64 __x) const { return __x; }
+};
+template <>
+struct hash<unsigned __int64>
+{
+ size_t operator()(unsigned __int64 __x) const { return __x; }
+};
+#endif // use long long or __int64
+
+} // namespace @KWSYS_NAMESPACE@
+
+#endif
diff --git a/test/API/driver/kwsys/hash_map.hxx.in b/test/API/driver/kwsys/hash_map.hxx.in
new file mode 100644
index 0000000..5f04e9c
--- /dev/null
+++ b/test/API/driver/kwsys/hash_map.hxx.in
@@ -0,0 +1,423 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+/*
+ * Copyright (c) 1996
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ *
+ * Copyright (c) 1994
+ * Hewlett-Packard Company
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Hewlett-Packard Company makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ */
+#ifndef @KWSYS_NAMESPACE@_hash_map_hxx
+#define @KWSYS_NAMESPACE@_hash_map_hxx
+
+#include <@KWSYS_NAMESPACE@/hashtable.hxx>
+
+#include <@KWSYS_NAMESPACE@/hash_fun.hxx>
+
+#include <functional> // equal_to
+
+#if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4284)
+# pragma warning(disable : 4786)
+#endif
+
+#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
+# pragma set woff 1174
+# pragma set woff 1375
+#endif
+
+namespace @KWSYS_NAMESPACE@ {
+
+// select1st is an extension: it is not part of the standard.
+template <class T1, class T2>
+struct hash_select1st
+{
+ const T1& operator()(const std::pair<T1, T2>& __x) const
+ {
+ return __x.first;
+ }
+};
+
+// Forward declaration of equality operator; needed for friend declaration.
+
+template <class _Key, class _Tp, class _HashFcn = hash<_Key>,
+ class _EqualKey = std::equal_to<_Key>,
+ class _Alloc = std::allocator<char> >
+class hash_map;
+
+template <class _Key, class _Tp, class _HashFn, class _EqKey, class _Alloc>
+bool operator==(const hash_map<_Key, _Tp, _HashFn, _EqKey, _Alloc>&,
+ const hash_map<_Key, _Tp, _HashFn, _EqKey, _Alloc>&);
+
+template <class _Key, class _Tp, class _HashFcn, class _EqualKey, class _Alloc>
+class hash_map
+{
+private:
+ typedef hashtable<std::pair<const _Key, _Tp>, _Key, _HashFcn,
+ hash_select1st<const _Key, _Tp>, _EqualKey, _Alloc>
+ _Ht;
+ _Ht _M_ht;
+
+public:
+ typedef typename _Ht::key_type key_type;
+ typedef _Tp data_type;
+ typedef _Tp mapped_type;
+ typedef typename _Ht::value_type value_type;
+ typedef typename _Ht::hasher hasher;
+ typedef typename _Ht::key_equal key_equal;
+
+ typedef typename _Ht::size_type size_type;
+ typedef typename _Ht::difference_type difference_type;
+ typedef typename _Ht::pointer pointer;
+ typedef typename _Ht::const_pointer const_pointer;
+ typedef typename _Ht::reference reference;
+ typedef typename _Ht::const_reference const_reference;
+
+ typedef typename _Ht::iterator iterator;
+ typedef typename _Ht::const_iterator const_iterator;
+
+ typedef typename _Ht::allocator_type allocator_type;
+
+ hasher hash_funct() const { return _M_ht.hash_funct(); }
+ key_equal key_eq() const { return _M_ht.key_eq(); }
+ allocator_type get_allocator() const { return _M_ht.get_allocator(); }
+
+public:
+ hash_map()
+ : _M_ht(100, hasher(), key_equal(), allocator_type())
+ {
+ }
+ explicit hash_map(size_type __n)
+ : _M_ht(__n, hasher(), key_equal(), allocator_type())
+ {
+ }
+ hash_map(size_type __n, const hasher& __hf)
+ : _M_ht(__n, __hf, key_equal(), allocator_type())
+ {
+ }
+ hash_map(size_type __n, const hasher& __hf, const key_equal& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_ht(__n, __hf, __eql, __a)
+ {
+ }
+
+ template <class _InputIterator>
+ hash_map(_InputIterator __f, _InputIterator __l)
+ : _M_ht(100, hasher(), key_equal(), allocator_type())
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_map(_InputIterator __f, _InputIterator __l, size_type __n)
+ : _M_ht(__n, hasher(), key_equal(), allocator_type())
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_map(_InputIterator __f, _InputIterator __l, size_type __n,
+ const hasher& __hf)
+ : _M_ht(__n, __hf, key_equal(), allocator_type())
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_map(_InputIterator __f, _InputIterator __l, size_type __n,
+ const hasher& __hf, const key_equal& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_ht(__n, __hf, __eql, __a)
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+
+public:
+ size_type size() const { return _M_ht.size(); }
+ size_type max_size() const { return _M_ht.max_size(); }
+ bool empty() const { return _M_ht.empty(); }
+ void swap(hash_map& __hs) { _M_ht.swap(__hs._M_ht); }
+
+ friend bool operator==<>(const hash_map&, const hash_map&);
+
+ iterator begin() { return _M_ht.begin(); }
+ iterator end() { return _M_ht.end(); }
+ const_iterator begin() const { return _M_ht.begin(); }
+ const_iterator end() const { return _M_ht.end(); }
+
+public:
+ std::pair<iterator, bool> insert(const value_type& __obj)
+ {
+ return _M_ht.insert_unique(__obj);
+ }
+ template <class _InputIterator>
+ void insert(_InputIterator __f, _InputIterator __l)
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+ std::pair<iterator, bool> insert_noresize(const value_type& __obj)
+ {
+ return _M_ht.insert_unique_noresize(__obj);
+ }
+
+ iterator find(const key_type& __key) { return _M_ht.find(__key); }
+ const_iterator find(const key_type& __key) const
+ {
+ return _M_ht.find(__key);
+ }
+
+ _Tp& operator[](const key_type& __key)
+ {
+ return _M_ht.find_or_insert(value_type(__key, _Tp())).second;
+ }
+
+ size_type count(const key_type& __key) const { return _M_ht.count(__key); }
+
+ std::pair<iterator, iterator> equal_range(const key_type& __key)
+ {
+ return _M_ht.equal_range(__key);
+ }
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_type& __key) const
+ {
+ return _M_ht.equal_range(__key);
+ }
+
+ size_type erase(const key_type& __key) { return _M_ht.erase(__key); }
+ void erase(iterator __it) { _M_ht.erase(__it); }
+ void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); }
+ void clear() { _M_ht.clear(); }
+
+ void resize(size_type __hint) { _M_ht.resize(__hint); }
+ size_type bucket_count() const { return _M_ht.bucket_count(); }
+ size_type max_bucket_count() const { return _M_ht.max_bucket_count(); }
+ size_type elems_in_bucket(size_type __n) const
+ {
+ return _M_ht.elems_in_bucket(__n);
+ }
+};
+
+template <class _Key, class _Tp, class _HashFcn, class _EqlKey, class _Alloc>
+bool operator==(const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1,
+ const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2)
+{
+ return __hm1._M_ht == __hm2._M_ht;
+}
+
+template <class _Key, class _Tp, class _HashFcn, class _EqlKey, class _Alloc>
+inline bool operator!=(
+ const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1,
+ const hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2)
+{
+ return !(__hm1 == __hm2);
+}
+
+template <class _Key, class _Tp, class _HashFcn, class _EqlKey, class _Alloc>
+inline void swap(hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1,
+ hash_map<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2)
+{
+ __hm1.swap(__hm2);
+}
+
+// Forward declaration of equality operator; needed for friend declaration.
+
+template <class _Key, class _Tp, class _HashFcn = hash<_Key>,
+ class _EqualKey = std::equal_to<_Key>,
+ class _Alloc = std::allocator<char> >
+class hash_multimap;
+
+template <class _Key, class _Tp, class _HF, class _EqKey, class _Alloc>
+bool operator==(const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1,
+ const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2);
+
+template <class _Key, class _Tp, class _HashFcn, class _EqualKey, class _Alloc>
+class hash_multimap
+{
+private:
+ typedef hashtable<std::pair<const _Key, _Tp>, _Key, _HashFcn,
+ hash_select1st<const _Key, _Tp>, _EqualKey, _Alloc>
+ _Ht;
+ _Ht _M_ht;
+
+public:
+ typedef typename _Ht::key_type key_type;
+ typedef _Tp data_type;
+ typedef _Tp mapped_type;
+ typedef typename _Ht::value_type value_type;
+ typedef typename _Ht::hasher hasher;
+ typedef typename _Ht::key_equal key_equal;
+
+ typedef typename _Ht::size_type size_type;
+ typedef typename _Ht::difference_type difference_type;
+ typedef typename _Ht::pointer pointer;
+ typedef typename _Ht::const_pointer const_pointer;
+ typedef typename _Ht::reference reference;
+ typedef typename _Ht::const_reference const_reference;
+
+ typedef typename _Ht::iterator iterator;
+ typedef typename _Ht::const_iterator const_iterator;
+
+ typedef typename _Ht::allocator_type allocator_type;
+
+ hasher hash_funct() const { return _M_ht.hash_funct(); }
+ key_equal key_eq() const { return _M_ht.key_eq(); }
+ allocator_type get_allocator() const { return _M_ht.get_allocator(); }
+
+public:
+ hash_multimap()
+ : _M_ht(100, hasher(), key_equal(), allocator_type())
+ {
+ }
+ explicit hash_multimap(size_type __n)
+ : _M_ht(__n, hasher(), key_equal(), allocator_type())
+ {
+ }
+ hash_multimap(size_type __n, const hasher& __hf)
+ : _M_ht(__n, __hf, key_equal(), allocator_type())
+ {
+ }
+ hash_multimap(size_type __n, const hasher& __hf, const key_equal& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_ht(__n, __hf, __eql, __a)
+ {
+ }
+
+ template <class _InputIterator>
+ hash_multimap(_InputIterator __f, _InputIterator __l)
+ : _M_ht(100, hasher(), key_equal(), allocator_type())
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n)
+ : _M_ht(__n, hasher(), key_equal(), allocator_type())
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n,
+ const hasher& __hf)
+ : _M_ht(__n, __hf, key_equal(), allocator_type())
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_multimap(_InputIterator __f, _InputIterator __l, size_type __n,
+ const hasher& __hf, const key_equal& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_ht(__n, __hf, __eql, __a)
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+
+public:
+ size_type size() const { return _M_ht.size(); }
+ size_type max_size() const { return _M_ht.max_size(); }
+ bool empty() const { return _M_ht.empty(); }
+ void swap(hash_multimap& __hs) { _M_ht.swap(__hs._M_ht); }
+
+ friend bool operator==<>(const hash_multimap&, const hash_multimap&);
+
+ iterator begin() { return _M_ht.begin(); }
+ iterator end() { return _M_ht.end(); }
+ const_iterator begin() const { return _M_ht.begin(); }
+ const_iterator end() const { return _M_ht.end(); }
+
+public:
+ iterator insert(const value_type& __obj)
+ {
+ return _M_ht.insert_equal(__obj);
+ }
+ template <class _InputIterator>
+ void insert(_InputIterator __f, _InputIterator __l)
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+ iterator insert_noresize(const value_type& __obj)
+ {
+ return _M_ht.insert_equal_noresize(__obj);
+ }
+
+ iterator find(const key_type& __key) { return _M_ht.find(__key); }
+ const_iterator find(const key_type& __key) const
+ {
+ return _M_ht.find(__key);
+ }
+
+ size_type count(const key_type& __key) const { return _M_ht.count(__key); }
+
+ std::pair<iterator, iterator> equal_range(const key_type& __key)
+ {
+ return _M_ht.equal_range(__key);
+ }
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_type& __key) const
+ {
+ return _M_ht.equal_range(__key);
+ }
+
+ size_type erase(const key_type& __key) { return _M_ht.erase(__key); }
+ void erase(iterator __it) { _M_ht.erase(__it); }
+ void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); }
+ void clear() { _M_ht.clear(); }
+
+public:
+ void resize(size_type __hint) { _M_ht.resize(__hint); }
+ size_type bucket_count() const { return _M_ht.bucket_count(); }
+ size_type max_bucket_count() const { return _M_ht.max_bucket_count(); }
+ size_type elems_in_bucket(size_type __n) const
+ {
+ return _M_ht.elems_in_bucket(__n);
+ }
+};
+
+template <class _Key, class _Tp, class _HF, class _EqKey, class _Alloc>
+bool operator==(const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1,
+ const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2)
+{
+ return __hm1._M_ht == __hm2._M_ht;
+}
+
+template <class _Key, class _Tp, class _HF, class _EqKey, class _Alloc>
+inline bool operator!=(
+ const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm1,
+ const hash_multimap<_Key, _Tp, _HF, _EqKey, _Alloc>& __hm2)
+{
+ return !(__hm1 == __hm2);
+}
+
+template <class _Key, class _Tp, class _HashFcn, class _EqlKey, class _Alloc>
+inline void swap(hash_multimap<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm1,
+ hash_multimap<_Key, _Tp, _HashFcn, _EqlKey, _Alloc>& __hm2)
+{
+ __hm1.swap(__hm2);
+}
+
+} // namespace @KWSYS_NAMESPACE@
+
+#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
+# pragma reset woff 1174
+# pragma reset woff 1375
+#endif
+
+#if defined(_MSC_VER)
+# pragma warning(pop)
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/hash_set.hxx.in b/test/API/driver/kwsys/hash_set.hxx.in
new file mode 100644
index 0000000..f4a37ee
--- /dev/null
+++ b/test/API/driver/kwsys/hash_set.hxx.in
@@ -0,0 +1,392 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+/*
+ * Copyright (c) 1996
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ *
+ * Copyright (c) 1994
+ * Hewlett-Packard Company
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Hewlett-Packard Company makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ */
+#ifndef @KWSYS_NAMESPACE@_hash_set_hxx
+#define @KWSYS_NAMESPACE@_hash_set_hxx
+
+#include <@KWSYS_NAMESPACE@/hashtable.hxx>
+
+#include <@KWSYS_NAMESPACE@/hash_fun.hxx>
+
+#include <functional> // equal_to
+
+#if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4284)
+# pragma warning(disable : 4786)
+#endif
+
+#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
+# pragma set woff 1174
+# pragma set woff 1375
+#endif
+
+namespace @KWSYS_NAMESPACE@ {
+
+// identity is an extension: it is not part of the standard.
+template <class _Tp>
+struct _Identity
+{
+ const _Tp& operator()(const _Tp& __x) const { return __x; }
+};
+
+// Forward declaration of equality operator; needed for friend declaration.
+
+template <class _Value, class _HashFcn = hash<_Value>,
+ class _EqualKey = std::equal_to<_Value>,
+ class _Alloc = std::allocator<char> >
+class hash_set;
+
+template <class _Value, class _HashFcn, class _EqualKey, class _Alloc>
+bool operator==(const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1,
+ const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2);
+
+template <class _Value, class _HashFcn, class _EqualKey, class _Alloc>
+class hash_set
+{
+private:
+ typedef hashtable<_Value, _Value, _HashFcn, _Identity<_Value>, _EqualKey,
+ _Alloc>
+ _Ht;
+ _Ht _M_ht;
+
+public:
+ typedef typename _Ht::key_type key_type;
+ typedef typename _Ht::value_type value_type;
+ typedef typename _Ht::hasher hasher;
+ typedef typename _Ht::key_equal key_equal;
+
+ typedef typename _Ht::size_type size_type;
+ typedef typename _Ht::difference_type difference_type;
+ typedef typename _Ht::const_pointer pointer;
+ typedef typename _Ht::const_pointer const_pointer;
+ typedef typename _Ht::const_reference reference;
+ typedef typename _Ht::const_reference const_reference;
+
+ typedef typename _Ht::const_iterator iterator;
+ typedef typename _Ht::const_iterator const_iterator;
+
+ typedef typename _Ht::allocator_type allocator_type;
+
+ hasher hash_funct() const { return _M_ht.hash_funct(); }
+ key_equal key_eq() const { return _M_ht.key_eq(); }
+ allocator_type get_allocator() const { return _M_ht.get_allocator(); }
+
+public:
+ hash_set()
+ : _M_ht(100, hasher(), key_equal(), allocator_type())
+ {
+ }
+ explicit hash_set(size_type __n)
+ : _M_ht(__n, hasher(), key_equal(), allocator_type())
+ {
+ }
+ hash_set(size_type __n, const hasher& __hf)
+ : _M_ht(__n, __hf, key_equal(), allocator_type())
+ {
+ }
+ hash_set(size_type __n, const hasher& __hf, const key_equal& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_ht(__n, __hf, __eql, __a)
+ {
+ }
+
+ template <class _InputIterator>
+ hash_set(_InputIterator __f, _InputIterator __l)
+ : _M_ht(100, hasher(), key_equal(), allocator_type())
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_set(_InputIterator __f, _InputIterator __l, size_type __n)
+ : _M_ht(__n, hasher(), key_equal(), allocator_type())
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_set(_InputIterator __f, _InputIterator __l, size_type __n,
+ const hasher& __hf)
+ : _M_ht(__n, __hf, key_equal(), allocator_type())
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_set(_InputIterator __f, _InputIterator __l, size_type __n,
+ const hasher& __hf, const key_equal& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_ht(__n, __hf, __eql, __a)
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+
+public:
+ size_type size() const { return _M_ht.size(); }
+ size_type max_size() const { return _M_ht.max_size(); }
+ bool empty() const { return _M_ht.empty(); }
+ void swap(hash_set& __hs) { _M_ht.swap(__hs._M_ht); }
+
+ friend bool operator==<>(const hash_set&, const hash_set&);
+
+ iterator begin() const { return _M_ht.begin(); }
+ iterator end() const { return _M_ht.end(); }
+
+public:
+ std::pair<iterator, bool> insert(const value_type& __obj)
+ {
+ typedef typename _Ht::iterator _Ht_iterator;
+ std::pair<_Ht_iterator, bool> __p = _M_ht.insert_unique(__obj);
+ return std::pair<iterator, bool>(__p.first, __p.second);
+ }
+ template <class _InputIterator>
+ void insert(_InputIterator __f, _InputIterator __l)
+ {
+ _M_ht.insert_unique(__f, __l);
+ }
+ std::pair<iterator, bool> insert_noresize(const value_type& __obj)
+ {
+ typedef typename _Ht::iterator _Ht_iterator;
+ std::pair<_Ht_iterator, bool> __p = _M_ht.insert_unique_noresize(__obj);
+ return std::pair<iterator, bool>(__p.first, __p.second);
+ }
+
+ iterator find(const key_type& __key) const { return _M_ht.find(__key); }
+
+ size_type count(const key_type& __key) const { return _M_ht.count(__key); }
+
+ std::pair<iterator, iterator> equal_range(const key_type& __key) const
+ {
+ return _M_ht.equal_range(__key);
+ }
+
+ size_type erase(const key_type& __key) { return _M_ht.erase(__key); }
+ void erase(iterator __it) { _M_ht.erase(__it); }
+ void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); }
+ void clear() { _M_ht.clear(); }
+
+public:
+ void resize(size_type __hint) { _M_ht.resize(__hint); }
+ size_type bucket_count() const { return _M_ht.bucket_count(); }
+ size_type max_bucket_count() const { return _M_ht.max_bucket_count(); }
+ size_type elems_in_bucket(size_type __n) const
+ {
+ return _M_ht.elems_in_bucket(__n);
+ }
+};
+
+template <class _Value, class _HashFcn, class _EqualKey, class _Alloc>
+bool operator==(const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1,
+ const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2)
+{
+ return __hs1._M_ht == __hs2._M_ht;
+}
+
+template <class _Value, class _HashFcn, class _EqualKey, class _Alloc>
+inline bool operator!=(
+ const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs1,
+ const hash_set<_Value, _HashFcn, _EqualKey, _Alloc>& __hs2)
+{
+ return !(__hs1 == __hs2);
+}
+
+template <class _Val, class _HashFcn, class _EqualKey, class _Alloc>
+inline void swap(hash_set<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1,
+ hash_set<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2)
+{
+ __hs1.swap(__hs2);
+}
+
+template <class _Value, class _HashFcn = hash<_Value>,
+ class _EqualKey = std::equal_to<_Value>,
+ class _Alloc = std::allocator<char> >
+class hash_multiset;
+
+template <class _Val, class _HashFcn, class _EqualKey, class _Alloc>
+bool operator==(const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1,
+ const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2);
+
+template <class _Value, class _HashFcn, class _EqualKey, class _Alloc>
+class hash_multiset
+{
+private:
+ typedef hashtable<_Value, _Value, _HashFcn, _Identity<_Value>, _EqualKey,
+ _Alloc>
+ _Ht;
+ _Ht _M_ht;
+
+public:
+ typedef typename _Ht::key_type key_type;
+ typedef typename _Ht::value_type value_type;
+ typedef typename _Ht::hasher hasher;
+ typedef typename _Ht::key_equal key_equal;
+
+ typedef typename _Ht::size_type size_type;
+ typedef typename _Ht::difference_type difference_type;
+ typedef typename _Ht::const_pointer pointer;
+ typedef typename _Ht::const_pointer const_pointer;
+ typedef typename _Ht::const_reference reference;
+ typedef typename _Ht::const_reference const_reference;
+
+ typedef typename _Ht::const_iterator iterator;
+ typedef typename _Ht::const_iterator const_iterator;
+
+ typedef typename _Ht::allocator_type allocator_type;
+
+ hasher hash_funct() const { return _M_ht.hash_funct(); }
+ key_equal key_eq() const { return _M_ht.key_eq(); }
+ allocator_type get_allocator() const { return _M_ht.get_allocator(); }
+
+public:
+ hash_multiset()
+ : _M_ht(100, hasher(), key_equal(), allocator_type())
+ {
+ }
+ explicit hash_multiset(size_type __n)
+ : _M_ht(__n, hasher(), key_equal(), allocator_type())
+ {
+ }
+ hash_multiset(size_type __n, const hasher& __hf)
+ : _M_ht(__n, __hf, key_equal(), allocator_type())
+ {
+ }
+ hash_multiset(size_type __n, const hasher& __hf, const key_equal& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_ht(__n, __hf, __eql, __a)
+ {
+ }
+
+ template <class _InputIterator>
+ hash_multiset(_InputIterator __f, _InputIterator __l)
+ : _M_ht(100, hasher(), key_equal(), allocator_type())
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n)
+ : _M_ht(__n, hasher(), key_equal(), allocator_type())
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n,
+ const hasher& __hf)
+ : _M_ht(__n, __hf, key_equal(), allocator_type())
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+ template <class _InputIterator>
+ hash_multiset(_InputIterator __f, _InputIterator __l, size_type __n,
+ const hasher& __hf, const key_equal& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_ht(__n, __hf, __eql, __a)
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+
+public:
+ size_type size() const { return _M_ht.size(); }
+ size_type max_size() const { return _M_ht.max_size(); }
+ bool empty() const { return _M_ht.empty(); }
+ void swap(hash_multiset& hs) { _M_ht.swap(hs._M_ht); }
+
+ friend bool operator==<>(const hash_multiset&, const hash_multiset&);
+
+ iterator begin() const { return _M_ht.begin(); }
+ iterator end() const { return _M_ht.end(); }
+
+public:
+ iterator insert(const value_type& __obj)
+ {
+ return _M_ht.insert_equal(__obj);
+ }
+ template <class _InputIterator>
+ void insert(_InputIterator __f, _InputIterator __l)
+ {
+ _M_ht.insert_equal(__f, __l);
+ }
+ iterator insert_noresize(const value_type& __obj)
+ {
+ return _M_ht.insert_equal_noresize(__obj);
+ }
+
+ iterator find(const key_type& __key) const { return _M_ht.find(__key); }
+
+ size_type count(const key_type& __key) const { return _M_ht.count(__key); }
+
+ std::pair<iterator, iterator> equal_range(const key_type& __key) const
+ {
+ return _M_ht.equal_range(__key);
+ }
+
+ size_type erase(const key_type& __key) { return _M_ht.erase(__key); }
+ void erase(iterator __it) { _M_ht.erase(__it); }
+ void erase(iterator __f, iterator __l) { _M_ht.erase(__f, __l); }
+ void clear() { _M_ht.clear(); }
+
+public:
+ void resize(size_type __hint) { _M_ht.resize(__hint); }
+ size_type bucket_count() const { return _M_ht.bucket_count(); }
+ size_type max_bucket_count() const { return _M_ht.max_bucket_count(); }
+ size_type elems_in_bucket(size_type __n) const
+ {
+ return _M_ht.elems_in_bucket(__n);
+ }
+};
+
+template <class _Val, class _HashFcn, class _EqualKey, class _Alloc>
+bool operator==(const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1,
+ const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2)
+{
+ return __hs1._M_ht == __hs2._M_ht;
+}
+
+template <class _Val, class _HashFcn, class _EqualKey, class _Alloc>
+inline bool operator!=(
+ const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1,
+ const hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2)
+{
+ return !(__hs1 == __hs2);
+}
+
+template <class _Val, class _HashFcn, class _EqualKey, class _Alloc>
+inline void swap(hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs1,
+ hash_multiset<_Val, _HashFcn, _EqualKey, _Alloc>& __hs2)
+{
+ __hs1.swap(__hs2);
+}
+
+} // namespace @KWSYS_NAMESPACE@
+
+#if defined(__sgi) && !defined(__GNUC__) && (_MIPS_SIM != _MIPS_SIM_ABI32)
+# pragma reset woff 1174
+# pragma reset woff 1375
+#endif
+
+#if defined(_MSC_VER)
+# pragma warning(pop)
+#endif
+
+#endif
diff --git a/test/API/driver/kwsys/hashtable.hxx.in b/test/API/driver/kwsys/hashtable.hxx.in
new file mode 100644
index 0000000..8c4b002
--- /dev/null
+++ b/test/API/driver/kwsys/hashtable.hxx.in
@@ -0,0 +1,995 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+/*
+ * Copyright (c) 1996
+ * Silicon Graphics Computer Systems, Inc.
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Silicon Graphics makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ *
+ * Copyright (c) 1994
+ * Hewlett-Packard Company
+ *
+ * Permission to use, copy, modify, distribute and sell this software
+ * and its documentation for any purpose is hereby granted without fee,
+ * provided that the above copyright notice appear in all copies and
+ * that both that copyright notice and this permission notice appear
+ * in supporting documentation. Hewlett-Packard Company makes no
+ * representations about the suitability of this software for any
+ * purpose. It is provided "as is" without express or implied warranty.
+ *
+ */
+#ifdef __BORLANDC__
+# pragma warn - 8027 /* 'for' not inlined. */
+# pragma warn - 8026 /* 'exception' not inlined. */
+#endif
+
+#ifndef @KWSYS_NAMESPACE@_hashtable_hxx
+# define @KWSYS_NAMESPACE@_hashtable_hxx
+
+# include <@KWSYS_NAMESPACE@/Configure.hxx>
+
+# include <algorithm> // lower_bound
+# include <iterator> // iterator_traits
+# include <memory> // allocator
+# include <stddef.h> // size_t
+# include <utility> // pair
+# include <vector> // vector
+
+# if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4284)
+# pragma warning(disable : 4786)
+# pragma warning(disable : 4512) /* no assignment operator for class */
+# endif
+# if defined(__sgi) && !defined(__GNUC__)
+# pragma set woff 3970 /* pointer to int conversion */ 3321 3968
+# endif
+
+// In C++11, clang will warn about using dynamic exception specifications
+// as they are deprecated. But as this class is trying to faithfully
+// mimic unordered_set and unordered_map, we want to keep the 'throw()'
+// decorations below. So we suppress the warning.
+# if defined(__clang__) && defined(__has_warning)
+# if __has_warning("-Wdeprecated")
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wdeprecated"
+# endif
+# endif
+
+namespace @KWSYS_NAMESPACE@ {
+
+template <class _Val>
+struct _Hashtable_node
+{
+ _Hashtable_node* _M_next;
+ _Val _M_val;
+ void public_method_to_quiet_warning_about_all_methods_private();
+
+private:
+ void operator=(_Hashtable_node<_Val> const&) = delete;
+};
+
+template <class _Val, class _Key, class _HashFcn, class _ExtractKey,
+ class _EqualKey, class _Alloc = std::allocator<char> >
+class hashtable;
+
+template <class _Val, class _Key, class _HashFcn, class _ExtractKey,
+ class _EqualKey, class _Alloc>
+struct _Hashtable_iterator;
+
+template <class _Val, class _Key, class _HashFcn, class _ExtractKey,
+ class _EqualKey, class _Alloc>
+struct _Hashtable_const_iterator;
+
+template <class _Val, class _Key, class _HashFcn, class _ExtractKey,
+ class _EqualKey, class _Alloc>
+struct _Hashtable_iterator
+{
+ typedef hashtable<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, _Alloc>
+ _Hashtable;
+ typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey,
+ _Alloc>
+ iterator;
+ typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey,
+ _EqualKey, _Alloc>
+ const_iterator;
+ typedef _Hashtable_node<_Val> _Node;
+
+ typedef std::forward_iterator_tag iterator_category;
+ typedef _Val value_type;
+ typedef ptrdiff_t difference_type;
+ typedef size_t size_type;
+ typedef _Val& reference;
+ typedef _Val* pointer;
+
+ _Node* _M_cur;
+ _Hashtable* _M_ht;
+
+ _Hashtable_iterator(_Node* __n, _Hashtable* __tab)
+ : _M_cur(__n)
+ , _M_ht(__tab)
+ {
+ }
+ _Hashtable_iterator() {}
+ reference operator*() const { return _M_cur->_M_val; }
+ pointer operator->() const { return &(operator*()); }
+ iterator& operator++();
+ iterator operator++(int);
+ bool operator==(const iterator& __it) const { return _M_cur == __it._M_cur; }
+ bool operator!=(const iterator& __it) const { return _M_cur != __it._M_cur; }
+};
+
+template <class _Val, class _Key, class _HashFcn, class _ExtractKey,
+ class _EqualKey, class _Alloc>
+struct _Hashtable_const_iterator
+{
+ typedef hashtable<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey, _Alloc>
+ _Hashtable;
+ typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey,
+ _Alloc>
+ iterator;
+ typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey,
+ _EqualKey, _Alloc>
+ const_iterator;
+ typedef _Hashtable_node<_Val> _Node;
+
+ typedef std::forward_iterator_tag iterator_category;
+ typedef _Val value_type;
+ typedef ptrdiff_t difference_type;
+ typedef size_t size_type;
+ typedef const _Val& reference;
+ typedef const _Val* pointer;
+
+ const _Node* _M_cur;
+ const _Hashtable* _M_ht;
+
+ _Hashtable_const_iterator(const _Node* __n, const _Hashtable* __tab)
+ : _M_cur(__n)
+ , _M_ht(__tab)
+ {
+ }
+ _Hashtable_const_iterator() {}
+ _Hashtable_const_iterator(const iterator& __it)
+ : _M_cur(__it._M_cur)
+ , _M_ht(__it._M_ht)
+ {
+ }
+ reference operator*() const { return _M_cur->_M_val; }
+ pointer operator->() const { return &(operator*()); }
+ const_iterator& operator++();
+ const_iterator operator++(int);
+ bool operator==(const const_iterator& __it) const
+ {
+ return _M_cur == __it._M_cur;
+ }
+ bool operator!=(const const_iterator& __it) const
+ {
+ return _M_cur != __it._M_cur;
+ }
+};
+
+// Note: assumes long is at least 32 bits.
+enum
+{
+ _stl_num_primes = 31
+};
+
+// create a function with a static local to that function that returns
+// the static
+static inline const unsigned long* get_stl_prime_list()
+{
+
+ static const unsigned long _stl_prime_list[_stl_num_primes] = {
+ 5ul, 11ul, 23ul, 53ul, 97ul,
+ 193ul, 389ul, 769ul, 1543ul, 3079ul,
+ 6151ul, 12289ul, 24593ul, 49157ul, 98317ul,
+ 196613ul, 393241ul, 786433ul, 1572869ul, 3145739ul,
+ 6291469ul, 12582917ul, 25165843ul, 50331653ul, 100663319ul,
+ 201326611ul, 402653189ul, 805306457ul, 1610612741ul, 3221225473ul,
+ 4294967291ul
+ };
+
+ return &_stl_prime_list[0];
+}
+
+static inline size_t _stl_next_prime(size_t __n)
+{
+ const unsigned long* __first = get_stl_prime_list();
+ const unsigned long* __last = get_stl_prime_list() + (int)_stl_num_primes;
+ const unsigned long* pos = std::lower_bound(__first, __last, __n);
+ return pos == __last ? *(__last - 1) : *pos;
+}
+
+// Forward declaration of operator==.
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+class hashtable;
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+bool operator==(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1,
+ const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2);
+
+// Hashtables handle allocators a bit differently than other containers
+// do. If we're using standard-conforming allocators, then a hashtable
+// unconditionally has a member variable to hold its allocator, even if
+// it so happens that all instances of the allocator type are identical.
+// This is because, for hashtables, this extra storage is negligible.
+// Additionally, a base class wouldn't serve any other purposes; it
+// wouldn't, for example, simplify the exception-handling code.
+
+template <class _Val, class _Key, class _HashFcn, class _ExtractKey,
+ class _EqualKey, class _Alloc>
+class hashtable
+{
+public:
+ typedef _Key key_type;
+ typedef _Val value_type;
+ typedef _HashFcn hasher;
+ typedef _EqualKey key_equal;
+
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef value_type* pointer;
+ typedef const value_type* const_pointer;
+ typedef value_type& reference;
+ typedef const value_type& const_reference;
+
+ hasher hash_funct() const { return _M_hash; }
+ key_equal key_eq() const { return _M_equals; }
+
+private:
+ typedef _Hashtable_node<_Val> _Node;
+
+public:
+ typedef typename _Alloc::template rebind<_Val>::other allocator_type;
+ allocator_type get_allocator() const { return _M_node_allocator; }
+
+private:
+ typedef
+ typename _Alloc::template rebind<_Node>::other _M_node_allocator_type;
+ typedef
+ typename _Alloc::template rebind<_Node*>::other _M_node_ptr_allocator_type;
+ typedef std::vector<_Node*, _M_node_ptr_allocator_type> _M_buckets_type;
+
+private:
+ _M_node_allocator_type _M_node_allocator;
+ hasher _M_hash;
+ key_equal _M_equals;
+ _ExtractKey _M_get_key;
+ _M_buckets_type _M_buckets;
+ size_type _M_num_elements;
+
+ _Node* _M_get_node() { return _M_node_allocator.allocate(1); }
+ void _M_put_node(_Node* __p) { _M_node_allocator.deallocate(__p, 1); }
+
+public:
+ typedef _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey, _EqualKey,
+ _Alloc>
+ iterator;
+ typedef _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey,
+ _EqualKey, _Alloc>
+ const_iterator;
+
+ friend struct _Hashtable_iterator<_Val, _Key, _HashFcn, _ExtractKey,
+ _EqualKey, _Alloc>;
+ friend struct _Hashtable_const_iterator<_Val, _Key, _HashFcn, _ExtractKey,
+ _EqualKey, _Alloc>;
+
+public:
+ hashtable(size_type __n, const _HashFcn& __hf, const _EqualKey& __eql,
+ const _ExtractKey& __ext,
+ const allocator_type& __a = allocator_type())
+ : _M_node_allocator(__a)
+ , _M_hash(__hf)
+ , _M_equals(__eql)
+ , _M_get_key(__ext)
+ , _M_buckets(__a)
+ , _M_num_elements(0)
+ {
+ _M_initialize_buckets(__n);
+ }
+
+ hashtable(size_type __n, const _HashFcn& __hf, const _EqualKey& __eql,
+ const allocator_type& __a = allocator_type())
+ : _M_node_allocator(__a)
+ , _M_hash(__hf)
+ , _M_equals(__eql)
+ , _M_get_key(_ExtractKey())
+ , _M_buckets(__a)
+ , _M_num_elements(0)
+ {
+ _M_initialize_buckets(__n);
+ }
+
+ hashtable(const hashtable& __ht)
+ : _M_node_allocator(__ht.get_allocator())
+ , _M_hash(__ht._M_hash)
+ , _M_equals(__ht._M_equals)
+ , _M_get_key(__ht._M_get_key)
+ , _M_buckets(__ht.get_allocator())
+ , _M_num_elements(0)
+ {
+ _M_copy_from(__ht);
+ }
+
+ hashtable& operator=(const hashtable& __ht)
+ {
+ if (&__ht != this) {
+ clear();
+ _M_hash = __ht._M_hash;
+ _M_equals = __ht._M_equals;
+ _M_get_key = __ht._M_get_key;
+ _M_copy_from(__ht);
+ }
+ return *this;
+ }
+
+ ~hashtable() { clear(); }
+
+ size_type size() const { return _M_num_elements; }
+ size_type max_size() const { return size_type(-1); }
+ bool empty() const { return size() == 0; }
+
+ void swap(hashtable& __ht)
+ {
+ std::swap(_M_hash, __ht._M_hash);
+ std::swap(_M_equals, __ht._M_equals);
+ std::swap(_M_get_key, __ht._M_get_key);
+ _M_buckets.swap(__ht._M_buckets);
+ std::swap(_M_num_elements, __ht._M_num_elements);
+ }
+
+ iterator begin()
+ {
+ for (size_type __n = 0; __n < _M_buckets.size(); ++__n)
+ if (_M_buckets[__n])
+ return iterator(_M_buckets[__n], this);
+ return end();
+ }
+
+ iterator end() { return iterator(nullptr, this); }
+
+ const_iterator begin() const
+ {
+ for (size_type __n = 0; __n < _M_buckets.size(); ++__n)
+ if (_M_buckets[__n])
+ return const_iterator(_M_buckets[__n], this);
+ return end();
+ }
+
+ const_iterator end() const { return const_iterator(nullptr, this); }
+
+ friend bool operator==<>(const hashtable&, const hashtable&);
+
+public:
+ size_type bucket_count() const { return _M_buckets.size(); }
+
+ size_type max_bucket_count() const
+ {
+ return get_stl_prime_list()[(int)_stl_num_primes - 1];
+ }
+
+ size_type elems_in_bucket(size_type __bucket) const
+ {
+ size_type __result = 0;
+ for (_Node* __cur = _M_buckets[__bucket]; __cur; __cur = __cur->_M_next)
+ __result += 1;
+ return __result;
+ }
+
+ std::pair<iterator, bool> insert_unique(const value_type& __obj)
+ {
+ resize(_M_num_elements + 1);
+ return insert_unique_noresize(__obj);
+ }
+
+ iterator insert_equal(const value_type& __obj)
+ {
+ resize(_M_num_elements + 1);
+ return insert_equal_noresize(__obj);
+ }
+
+ std::pair<iterator, bool> insert_unique_noresize(const value_type& __obj);
+ iterator insert_equal_noresize(const value_type& __obj);
+
+ template <class _InputIterator>
+ void insert_unique(_InputIterator __f, _InputIterator __l)
+ {
+ insert_unique(
+ __f, __l,
+ typename std::iterator_traits<_InputIterator>::iterator_category());
+ }
+
+ template <class _InputIterator>
+ void insert_equal(_InputIterator __f, _InputIterator __l)
+ {
+ insert_equal(
+ __f, __l,
+ typename std::iterator_traits<_InputIterator>::iterator_category());
+ }
+
+ template <class _InputIterator>
+ void insert_unique(_InputIterator __f, _InputIterator __l,
+ std::input_iterator_tag)
+ {
+ for (; __f != __l; ++__f)
+ insert_unique(*__f);
+ }
+
+ template <class _InputIterator>
+ void insert_equal(_InputIterator __f, _InputIterator __l,
+ std::input_iterator_tag)
+ {
+ for (; __f != __l; ++__f)
+ insert_equal(*__f);
+ }
+
+ template <class _ForwardIterator>
+ void insert_unique(_ForwardIterator __f, _ForwardIterator __l,
+ std::forward_iterator_tag)
+ {
+ size_type __n = 0;
+ std::distance(__f, __l, __n);
+ resize(_M_num_elements + __n);
+ for (; __n > 0; --__n, ++__f)
+ insert_unique_noresize(*__f);
+ }
+
+ template <class _ForwardIterator>
+ void insert_equal(_ForwardIterator __f, _ForwardIterator __l,
+ std::forward_iterator_tag)
+ {
+ size_type __n = 0;
+ std::distance(__f, __l, __n);
+ resize(_M_num_elements + __n);
+ for (; __n > 0; --__n, ++__f)
+ insert_equal_noresize(*__f);
+ }
+
+ reference find_or_insert(const value_type& __obj);
+
+ iterator find(const key_type& __key)
+ {
+ size_type __n = _M_bkt_num_key(__key);
+ _Node* __first;
+ for (__first = _M_buckets[__n];
+ __first && !_M_equals(_M_get_key(__first->_M_val), __key);
+ __first = __first->_M_next) {
+ }
+ return iterator(__first, this);
+ }
+
+ const_iterator find(const key_type& __key) const
+ {
+ size_type __n = _M_bkt_num_key(__key);
+ const _Node* __first;
+ for (__first = _M_buckets[__n];
+ __first && !_M_equals(_M_get_key(__first->_M_val), __key);
+ __first = __first->_M_next) {
+ }
+ return const_iterator(__first, this);
+ }
+
+ size_type count(const key_type& __key) const
+ {
+ const size_type __n = _M_bkt_num_key(__key);
+ size_type __result = 0;
+
+ for (const _Node* __cur = _M_buckets[__n]; __cur; __cur = __cur->_M_next)
+ if (_M_equals(_M_get_key(__cur->_M_val), __key))
+ ++__result;
+ return __result;
+ }
+
+ std::pair<iterator, iterator> equal_range(const key_type& __key);
+
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_type& __key) const;
+
+ size_type erase(const key_type& __key);
+ void erase(const iterator& __it);
+ void erase(iterator __first, iterator __last);
+
+ void erase(const const_iterator& __it);
+ void erase(const_iterator __first, const_iterator __last);
+
+ void resize(size_type __num_elements_hint);
+ void clear();
+
+private:
+ size_type _M_next_size(size_type __n) const { return _stl_next_prime(__n); }
+
+ void _M_initialize_buckets(size_type __n)
+ {
+ const size_type __n_buckets = _M_next_size(__n);
+ _M_buckets.reserve(__n_buckets);
+ _M_buckets.insert(_M_buckets.end(), __n_buckets, (_Node*)nullptr);
+ _M_num_elements = 0;
+ }
+
+ size_type _M_bkt_num_key(const key_type& __key) const
+ {
+ return _M_bkt_num_key(__key, _M_buckets.size());
+ }
+
+ size_type _M_bkt_num(const value_type& __obj) const
+ {
+ return _M_bkt_num_key(_M_get_key(__obj));
+ }
+
+ size_type _M_bkt_num_key(const key_type& __key, size_t __n) const
+ {
+ return _M_hash(__key) % __n;
+ }
+
+ size_type _M_bkt_num(const value_type& __obj, size_t __n) const
+ {
+ return _M_bkt_num_key(_M_get_key(__obj), __n);
+ }
+
+ void construct(_Val* p, const _Val& v) { new (p) _Val(v); }
+ void destroy(_Val* p)
+ {
+ (void)p;
+ p->~_Val();
+ }
+
+ _Node* _M_new_node(const value_type& __obj)
+ {
+ _Node* __n = _M_get_node();
+ __n->_M_next = nullptr;
+ try {
+ construct(&__n->_M_val, __obj);
+ return __n;
+ } catch (...) {
+ _M_put_node(__n);
+ throw;
+ }
+ }
+
+ void _M_delete_node(_Node* __n)
+ {
+ destroy(&__n->_M_val);
+ _M_put_node(__n);
+ }
+
+ void _M_erase_bucket(const size_type __n, _Node* __first, _Node* __last);
+ void _M_erase_bucket(const size_type __n, _Node* __last);
+
+ void _M_copy_from(const hashtable& __ht);
+};
+
+template <class _Val, class _Key, class _HF, class _ExK, class _EqK,
+ class _All>
+_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>&
+_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++()
+{
+ const _Node* __old = _M_cur;
+ _M_cur = _M_cur->_M_next;
+ if (!_M_cur) {
+ size_type __bucket = _M_ht->_M_bkt_num(__old->_M_val);
+ while (!_M_cur && ++__bucket < _M_ht->_M_buckets.size())
+ _M_cur = _M_ht->_M_buckets[__bucket];
+ }
+ return *this;
+}
+
+template <class _Val, class _Key, class _HF, class _ExK, class _EqK,
+ class _All>
+inline _Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>
+_Hashtable_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++(int)
+{
+ iterator __tmp = *this;
+ ++*this;
+ return __tmp;
+}
+
+template <class _Val, class _Key, class _HF, class _ExK, class _EqK,
+ class _All>
+_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>&
+_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++()
+{
+ const _Node* __old = _M_cur;
+ _M_cur = _M_cur->_M_next;
+ if (!_M_cur) {
+ size_type __bucket = _M_ht->_M_bkt_num(__old->_M_val);
+ while (!_M_cur && ++__bucket < _M_ht->_M_buckets.size())
+ _M_cur = _M_ht->_M_buckets[__bucket];
+ }
+ return *this;
+}
+
+template <class _Val, class _Key, class _HF, class _ExK, class _EqK,
+ class _All>
+inline _Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>
+_Hashtable_const_iterator<_Val, _Key, _HF, _ExK, _EqK, _All>::operator++(int)
+{
+ const_iterator __tmp = *this;
+ ++*this;
+ return __tmp;
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+bool operator==(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1,
+ const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2)
+{
+ typedef typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_Node _Node;
+ if (__ht1._M_buckets.size() != __ht2._M_buckets.size())
+ return false;
+ for (int __n = 0; __n < __ht1._M_buckets.size(); ++__n) {
+ _Node* __cur1 = __ht1._M_buckets[__n];
+ _Node* __cur2 = __ht2._M_buckets[__n];
+ for (; __cur1 && __cur2 && __cur1->_M_val == __cur2->_M_val;
+ __cur1 = __cur1->_M_next, __cur2 = __cur2->_M_next) {
+ }
+ if (__cur1 || __cur2)
+ return false;
+ }
+ return true;
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+inline bool operator!=(const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht1,
+ const hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>& __ht2)
+{
+ return !(__ht1 == __ht2);
+}
+
+template <class _Val, class _Key, class _HF, class _Extract, class _EqKey,
+ class _All>
+inline void swap(hashtable<_Val, _Key, _HF, _Extract, _EqKey, _All>& __ht1,
+ hashtable<_Val, _Key, _HF, _Extract, _EqKey, _All>& __ht2)
+{
+ __ht1.swap(__ht2);
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+std::pair<typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::iterator, bool>
+hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::insert_unique_noresize(
+ const value_type& __obj)
+{
+ const size_type __n = _M_bkt_num(__obj);
+ _Node* __first = _M_buckets[__n];
+
+ for (_Node* __cur = __first; __cur; __cur = __cur->_M_next)
+ if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj)))
+ return std::pair<iterator, bool>(iterator(__cur, this), false);
+
+ _Node* __tmp = _M_new_node(__obj);
+ __tmp->_M_next = __first;
+ _M_buckets[__n] = __tmp;
+ ++_M_num_elements;
+ return std::pair<iterator, bool>(iterator(__tmp, this), true);
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::iterator
+hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::insert_equal_noresize(
+ const value_type& __obj)
+{
+ const size_type __n = _M_bkt_num(__obj);
+ _Node* __first = _M_buckets[__n];
+
+ for (_Node* __cur = __first; __cur; __cur = __cur->_M_next)
+ if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj))) {
+ _Node* __tmp = _M_new_node(__obj);
+ __tmp->_M_next = __cur->_M_next;
+ __cur->_M_next = __tmp;
+ ++_M_num_elements;
+ return iterator(__tmp, this);
+ }
+
+ _Node* __tmp = _M_new_node(__obj);
+ __tmp->_M_next = __first;
+ _M_buckets[__n] = __tmp;
+ ++_M_num_elements;
+ return iterator(__tmp, this);
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::reference
+hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::find_or_insert(
+ const value_type& __obj)
+{
+ resize(_M_num_elements + 1);
+
+ size_type __n = _M_bkt_num(__obj);
+ _Node* __first = _M_buckets[__n];
+
+ for (_Node* __cur = __first; __cur; __cur = __cur->_M_next)
+ if (_M_equals(_M_get_key(__cur->_M_val), _M_get_key(__obj)))
+ return __cur->_M_val;
+
+ _Node* __tmp = _M_new_node(__obj);
+ __tmp->_M_next = __first;
+ _M_buckets[__n] = __tmp;
+ ++_M_num_elements;
+ return __tmp->_M_val;
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+std::pair<typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::iterator,
+ typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::iterator>
+hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::equal_range(const key_type& __key)
+{
+ typedef std::pair<iterator, iterator> _Pii;
+ const size_type __n = _M_bkt_num_key(__key);
+
+ for (_Node* __first = _M_buckets[__n]; __first; __first = __first->_M_next)
+ if (_M_equals(_M_get_key(__first->_M_val), __key)) {
+ for (_Node* __cur = __first->_M_next; __cur; __cur = __cur->_M_next)
+ if (!_M_equals(_M_get_key(__cur->_M_val), __key))
+ return _Pii(iterator(__first, this), iterator(__cur, this));
+ for (size_type __m = __n + 1; __m < _M_buckets.size(); ++__m)
+ if (_M_buckets[__m])
+ return _Pii(iterator(__first, this),
+ iterator(_M_buckets[__m], this));
+ return _Pii(iterator(__first, this), end());
+ }
+ return _Pii(end(), end());
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+std::pair<typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::const_iterator,
+ typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::const_iterator>
+hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::equal_range(
+ const key_type& __key) const
+{
+ typedef std::pair<const_iterator, const_iterator> _Pii;
+ const size_type __n = _M_bkt_num_key(__key);
+
+ for (const _Node* __first = _M_buckets[__n]; __first;
+ __first = __first->_M_next) {
+ if (_M_equals(_M_get_key(__first->_M_val), __key)) {
+ for (const _Node* __cur = __first->_M_next; __cur;
+ __cur = __cur->_M_next)
+ if (!_M_equals(_M_get_key(__cur->_M_val), __key))
+ return _Pii(const_iterator(__first, this),
+ const_iterator(__cur, this));
+ for (size_type __m = __n + 1; __m < _M_buckets.size(); ++__m)
+ if (_M_buckets[__m])
+ return _Pii(const_iterator(__first, this),
+ const_iterator(_M_buckets[__m], this));
+ return _Pii(const_iterator(__first, this), end());
+ }
+ }
+ return _Pii(end(), end());
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+typename hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::size_type
+hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(const key_type& __key)
+{
+ const size_type __n = _M_bkt_num_key(__key);
+ _Node* __first = _M_buckets[__n];
+ size_type __erased = 0;
+
+ if (__first) {
+ _Node* __cur = __first;
+ _Node* __next = __cur->_M_next;
+ while (__next) {
+ if (_M_equals(_M_get_key(__next->_M_val), __key)) {
+ __cur->_M_next = __next->_M_next;
+ _M_delete_node(__next);
+ __next = __cur->_M_next;
+ ++__erased;
+ --_M_num_elements;
+ } else {
+ __cur = __next;
+ __next = __cur->_M_next;
+ }
+ }
+ if (_M_equals(_M_get_key(__first->_M_val), __key)) {
+ _M_buckets[__n] = __first->_M_next;
+ _M_delete_node(__first);
+ ++__erased;
+ --_M_num_elements;
+ }
+ }
+ return __erased;
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(const iterator& __it)
+{
+ _Node* __p = __it._M_cur;
+ if (__p) {
+ const size_type __n = _M_bkt_num(__p->_M_val);
+ _Node* __cur = _M_buckets[__n];
+
+ if (__cur == __p) {
+ _M_buckets[__n] = __cur->_M_next;
+ _M_delete_node(__cur);
+ --_M_num_elements;
+ } else {
+ _Node* __next = __cur->_M_next;
+ while (__next) {
+ if (__next == __p) {
+ __cur->_M_next = __next->_M_next;
+ _M_delete_node(__next);
+ --_M_num_elements;
+ break;
+ } else {
+ __cur = __next;
+ __next = __cur->_M_next;
+ }
+ }
+ }
+ }
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(iterator __first,
+ iterator __last)
+{
+ size_type __f_bucket =
+ __first._M_cur ? _M_bkt_num(__first._M_cur->_M_val) : _M_buckets.size();
+ size_type __l_bucket =
+ __last._M_cur ? _M_bkt_num(__last._M_cur->_M_val) : _M_buckets.size();
+
+ if (__first._M_cur == __last._M_cur)
+ return;
+ else if (__f_bucket == __l_bucket)
+ _M_erase_bucket(__f_bucket, __first._M_cur, __last._M_cur);
+ else {
+ _M_erase_bucket(__f_bucket, __first._M_cur, nullptr);
+ for (size_type __n = __f_bucket + 1; __n < __l_bucket; ++__n)
+ _M_erase_bucket(__n, nullptr);
+ if (__l_bucket != _M_buckets.size())
+ _M_erase_bucket(__l_bucket, __last._M_cur);
+ }
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+inline void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(
+ const_iterator __first, const_iterator __last)
+{
+ erase(iterator(const_cast<_Node*>(__first._M_cur),
+ const_cast<hashtable*>(__first._M_ht)),
+ iterator(const_cast<_Node*>(__last._M_cur),
+ const_cast<hashtable*>(__last._M_ht)));
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+inline void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::erase(
+ const const_iterator& __it)
+{
+ erase(iterator(const_cast<_Node*>(__it._M_cur),
+ const_cast<hashtable*>(__it._M_ht)));
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::resize(
+ size_type __num_elements_hint)
+{
+ const size_type __old_n = _M_buckets.size();
+ if (__num_elements_hint > __old_n) {
+ const size_type __n = _M_next_size(__num_elements_hint);
+ if (__n > __old_n) {
+ _M_buckets_type __tmp(__n, (_Node*)(nullptr),
+ _M_buckets.get_allocator());
+ try {
+ for (size_type __bucket = 0; __bucket < __old_n; ++__bucket) {
+ _Node* __first = _M_buckets[__bucket];
+ while (__first) {
+ size_type __new_bucket = _M_bkt_num(__first->_M_val, __n);
+ _M_buckets[__bucket] = __first->_M_next;
+ __first->_M_next = __tmp[__new_bucket];
+ __tmp[__new_bucket] = __first;
+ __first = _M_buckets[__bucket];
+ }
+ }
+ _M_buckets.swap(__tmp);
+ } catch (...) {
+ for (size_type __bucket = 0; __bucket < __tmp.size(); ++__bucket) {
+ while (__tmp[__bucket]) {
+ _Node* __next = __tmp[__bucket]->_M_next;
+ _M_delete_node(__tmp[__bucket]);
+ __tmp[__bucket] = __next;
+ }
+ }
+ throw;
+ }
+ }
+ }
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_erase_bucket(
+ const size_type __n, _Node* __first, _Node* __last)
+{
+ _Node* __cur = _M_buckets[__n];
+ if (__cur == __first)
+ _M_erase_bucket(__n, __last);
+ else {
+ _Node* __next;
+ for (__next = __cur->_M_next; __next != __first;
+ __cur = __next, __next = __cur->_M_next)
+ ;
+ while (__next != __last) {
+ __cur->_M_next = __next->_M_next;
+ _M_delete_node(__next);
+ __next = __cur->_M_next;
+ --_M_num_elements;
+ }
+ }
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_erase_bucket(
+ const size_type __n, _Node* __last)
+{
+ _Node* __cur = _M_buckets[__n];
+ while (__cur != __last) {
+ _Node* __next = __cur->_M_next;
+ _M_delete_node(__cur);
+ __cur = __next;
+ _M_buckets[__n] = __cur;
+ --_M_num_elements;
+ }
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::clear()
+{
+ for (size_type __i = 0; __i < _M_buckets.size(); ++__i) {
+ _Node* __cur = _M_buckets[__i];
+ while (__cur != nullptr) {
+ _Node* __next = __cur->_M_next;
+ _M_delete_node(__cur);
+ __cur = __next;
+ }
+ _M_buckets[__i] = nullptr;
+ }
+ _M_num_elements = 0;
+}
+
+template <class _Val, class _Key, class _HF, class _Ex, class _Eq, class _All>
+void hashtable<_Val, _Key, _HF, _Ex, _Eq, _All>::_M_copy_from(
+ const hashtable& __ht)
+{
+ _M_buckets.clear();
+ _M_buckets.reserve(__ht._M_buckets.size());
+ _M_buckets.insert(_M_buckets.end(), __ht._M_buckets.size(), (_Node*)nullptr);
+ try {
+ for (size_type __i = 0; __i < __ht._M_buckets.size(); ++__i) {
+ const _Node* __cur = __ht._M_buckets[__i];
+ if (__cur) {
+ _Node* __copy = _M_new_node(__cur->_M_val);
+ _M_buckets[__i] = __copy;
+
+ for (_Node* __next = __cur->_M_next; __next;
+ __cur = __next, __next = __cur->_M_next) {
+ __copy->_M_next = _M_new_node(__next->_M_val);
+ __copy = __copy->_M_next;
+ }
+ }
+ }
+ _M_num_elements = __ht._M_num_elements;
+ } catch (...) {
+ clear();
+ throw;
+ }
+}
+
+} // namespace @KWSYS_NAMESPACE@
+
+// Undo warning suppression.
+# if defined(__clang__) && defined(__has_warning)
+# if __has_warning("-Wdeprecated")
+# pragma clang diagnostic pop
+# endif
+# endif
+
+# if defined(_MSC_VER)
+# pragma warning(pop)
+# endif
+
+#endif
diff --git a/test/API/driver/kwsys/kwsysHeaderDump.pl b/test/API/driver/kwsys/kwsysHeaderDump.pl
new file mode 100644
index 0000000..e3391e7
--- /dev/null
+++ b/test/API/driver/kwsys/kwsysHeaderDump.pl
@@ -0,0 +1,41 @@
+#!/usr/bin/perl
+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+# file Copyright.txt or https://cmake.org/licensing#kwsys for details.
+
+if ( $#ARGV+1 < 2 )
+{
+ print "Usage: ./kwsysHeaderDump.pl <name> <header>\n";
+ exit(1);
+}
+
+$name = $ARGV[0];
+$max = 0;
+open(INFILE, $ARGV[1]);
+while (chomp ($line = <INFILE>))
+{
+ if (($line !~ /^\#/) &&
+ ($line =~ s/.*kwsys${name}_([A-Za-z0-9_]*).*/\1/) &&
+ ($i{$line}++ < 1))
+ {
+ push(@lines, "$line");
+ if (length($line) > $max)
+ {
+ $max = length($line);
+ }
+ }
+}
+close(INFILE);
+
+$width = $max + 13;
+print sprintf("#define %-${width}s kwsys_ns(${name})\n", "kwsys${name}");
+foreach $l (@lines)
+{
+ print sprintf("#define %-${width}s kwsys_ns(${name}_$l)\n",
+ "kwsys${name}_$l");
+}
+print "\n";
+print sprintf("# undef kwsys${name}\n");
+foreach $l (@lines)
+{
+ print sprintf("# undef kwsys${name}_$l\n");
+}
diff --git a/test/API/driver/kwsys/kwsysPlatformTests.cmake b/test/API/driver/kwsys/kwsysPlatformTests.cmake
new file mode 100644
index 0000000..28d3f68
--- /dev/null
+++ b/test/API/driver/kwsys/kwsysPlatformTests.cmake
@@ -0,0 +1,216 @@
+# Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+# file Copyright.txt or https://cmake.org/licensing#kwsys for details.
+
+SET(KWSYS_PLATFORM_TEST_FILE_C kwsysPlatformTestsC.c)
+SET(KWSYS_PLATFORM_TEST_FILE_CXX kwsysPlatformTestsCXX.cxx)
+
+MACRO(KWSYS_PLATFORM_TEST lang var description invert)
+ IF(NOT DEFINED ${var}_COMPILED)
+ MESSAGE(STATUS "${description}")
+ set(maybe_cxx_standard "")
+ if(CMAKE_VERSION VERSION_LESS 3.8 AND CMAKE_CXX_STANDARD)
+ set(maybe_cxx_standard "-DCMAKE_CXX_STANDARD=${CMAKE_CXX_STANDARD}")
+ endif()
+ TRY_COMPILE(${var}_COMPILED
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}}
+ COMPILE_DEFINITIONS -DTEST_${var} ${KWSYS_PLATFORM_TEST_DEFINES} ${KWSYS_PLATFORM_TEST_EXTRA_FLAGS}
+ CMAKE_FLAGS "-DLINK_LIBRARIES:STRING=${KWSYS_PLATFORM_TEST_LINK_LIBRARIES}"
+ ${maybe_cxx_standard}
+ OUTPUT_VARIABLE OUTPUT)
+ IF(${var}_COMPILED)
+ FILE(APPEND
+ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
+ "${description} compiled with the following output:\n${OUTPUT}\n\n")
+ ELSE()
+ FILE(APPEND
+ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
+ "${description} failed to compile with the following output:\n${OUTPUT}\n\n")
+ ENDIF()
+ IF(${invert} MATCHES INVERT)
+ IF(${var}_COMPILED)
+ MESSAGE(STATUS "${description} - no")
+ ELSE()
+ MESSAGE(STATUS "${description} - yes")
+ ENDIF()
+ ELSE()
+ IF(${var}_COMPILED)
+ MESSAGE(STATUS "${description} - yes")
+ ELSE()
+ MESSAGE(STATUS "${description} - no")
+ ENDIF()
+ ENDIF()
+ ENDIF()
+ IF(${invert} MATCHES INVERT)
+ IF(${var}_COMPILED)
+ SET(${var} 0)
+ ELSE()
+ SET(${var} 1)
+ ENDIF()
+ ELSE()
+ IF(${var}_COMPILED)
+ SET(${var} 1)
+ ELSE()
+ SET(${var} 0)
+ ENDIF()
+ ENDIF()
+ENDMACRO()
+
+MACRO(KWSYS_PLATFORM_TEST_RUN lang var description invert)
+ IF(NOT DEFINED ${var})
+ MESSAGE(STATUS "${description}")
+ TRY_RUN(${var} ${var}_COMPILED
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}}
+ COMPILE_DEFINITIONS -DTEST_${var} ${KWSYS_PLATFORM_TEST_DEFINES} ${KWSYS_PLATFORM_TEST_EXTRA_FLAGS}
+ OUTPUT_VARIABLE OUTPUT)
+
+ # Note that ${var} will be a 0 return value on success.
+ IF(${var}_COMPILED)
+ IF(${var})
+ FILE(APPEND
+ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
+ "${description} compiled but failed to run with the following output:\n${OUTPUT}\n\n")
+ ELSE()
+ FILE(APPEND
+ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
+ "${description} compiled and ran with the following output:\n${OUTPUT}\n\n")
+ ENDIF()
+ ELSE()
+ FILE(APPEND
+ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
+ "${description} failed to compile with the following output:\n${OUTPUT}\n\n")
+ SET(${var} -1 CACHE INTERNAL "${description} failed to compile.")
+ ENDIF()
+
+ IF(${invert} MATCHES INVERT)
+ IF(${var}_COMPILED)
+ IF(${var})
+ MESSAGE(STATUS "${description} - yes")
+ ELSE()
+ MESSAGE(STATUS "${description} - no")
+ ENDIF()
+ ELSE()
+ MESSAGE(STATUS "${description} - failed to compile")
+ ENDIF()
+ ELSE()
+ IF(${var}_COMPILED)
+ IF(${var})
+ MESSAGE(STATUS "${description} - no")
+ ELSE()
+ MESSAGE(STATUS "${description} - yes")
+ ENDIF()
+ ELSE()
+ MESSAGE(STATUS "${description} - failed to compile")
+ ENDIF()
+ ENDIF()
+ ENDIF()
+
+ IF(${invert} MATCHES INVERT)
+ IF(${var}_COMPILED)
+ IF(${var})
+ SET(${var} 1)
+ ELSE()
+ SET(${var} 0)
+ ENDIF()
+ ELSE()
+ SET(${var} 1)
+ ENDIF()
+ ELSE()
+ IF(${var}_COMPILED)
+ IF(${var})
+ SET(${var} 0)
+ ELSE()
+ SET(${var} 1)
+ ENDIF()
+ ELSE()
+ SET(${var} 0)
+ ENDIF()
+ ENDIF()
+ENDMACRO()
+
+MACRO(KWSYS_PLATFORM_C_TEST var description invert)
+ SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_C_TEST_DEFINES})
+ SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_C_TEST_EXTRA_FLAGS})
+ KWSYS_PLATFORM_TEST(C "${var}" "${description}" "${invert}")
+ SET(KWSYS_PLATFORM_TEST_DEFINES)
+ SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS)
+ENDMACRO()
+
+MACRO(KWSYS_PLATFORM_C_TEST_RUN var description invert)
+ SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_C_TEST_DEFINES})
+ SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_C_TEST_EXTRA_FLAGS})
+ KWSYS_PLATFORM_TEST_RUN(C "${var}" "${description}" "${invert}")
+ SET(KWSYS_PLATFORM_TEST_DEFINES)
+ SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS)
+ENDMACRO()
+
+MACRO(KWSYS_PLATFORM_CXX_TEST var description invert)
+ SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_CXX_TEST_DEFINES})
+ SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS})
+ SET(KWSYS_PLATFORM_TEST_LINK_LIBRARIES ${KWSYS_PLATFORM_CXX_TEST_LINK_LIBRARIES})
+ KWSYS_PLATFORM_TEST(CXX "${var}" "${description}" "${invert}")
+ SET(KWSYS_PLATFORM_TEST_DEFINES)
+ SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS)
+ SET(KWSYS_PLATFORM_TEST_LINK_LIBRARIES)
+ENDMACRO()
+
+MACRO(KWSYS_PLATFORM_CXX_TEST_RUN var description invert)
+ SET(KWSYS_PLATFORM_TEST_DEFINES ${KWSYS_PLATFORM_CXX_TEST_DEFINES})
+ SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS ${KWSYS_PLATFORM_CXX_TEST_EXTRA_FLAGS})
+ KWSYS_PLATFORM_TEST_RUN(CXX "${var}" "${description}" "${invert}")
+ SET(KWSYS_PLATFORM_TEST_DEFINES)
+ SET(KWSYS_PLATFORM_TEST_EXTRA_FLAGS)
+ENDMACRO()
+
+#-----------------------------------------------------------------------------
+# KWSYS_PLATFORM_INFO_TEST(lang var description)
+#
+# Compile test named by ${var} and store INFO strings extracted from binary.
+MACRO(KWSYS_PLATFORM_INFO_TEST lang var description)
+ # We can implement this macro on CMake 2.6 and above.
+ IF("${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION}" LESS 2.6)
+ SET(${var} "")
+ ELSE()
+ # Choose a location for the result binary.
+ SET(KWSYS_PLATFORM_INFO_FILE
+ ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_FILES_DIRECTORY}/${var}.bin)
+
+ # Compile the test binary.
+ IF(NOT EXISTS ${KWSYS_PLATFORM_INFO_FILE})
+ MESSAGE(STATUS "${description}")
+ TRY_COMPILE(${var}_COMPILED
+ ${CMAKE_CURRENT_BINARY_DIR}
+ ${CMAKE_CURRENT_SOURCE_DIR}/${KWSYS_PLATFORM_TEST_FILE_${lang}}
+ COMPILE_DEFINITIONS -DTEST_${var}
+ ${KWSYS_PLATFORM_${lang}_TEST_DEFINES}
+ ${KWSYS_PLATFORM_${lang}_TEST_EXTRA_FLAGS}
+ OUTPUT_VARIABLE OUTPUT
+ COPY_FILE ${KWSYS_PLATFORM_INFO_FILE}
+ )
+ IF(${var}_COMPILED)
+ FILE(APPEND
+ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeOutput.log
+ "${description} compiled with the following output:\n${OUTPUT}\n\n")
+ ELSE()
+ FILE(APPEND
+ ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
+ "${description} failed to compile with the following output:\n${OUTPUT}\n\n")
+ ENDIF()
+ IF(${var}_COMPILED)
+ MESSAGE(STATUS "${description} - compiled")
+ ELSE()
+ MESSAGE(STATUS "${description} - failed")
+ ENDIF()
+ ENDIF()
+
+ # Parse info strings out of the compiled binary.
+ IF(${var}_COMPILED)
+ FILE(STRINGS ${KWSYS_PLATFORM_INFO_FILE} ${var} REGEX "INFO:[A-Za-z0-9]+\\[[^]]*\\]")
+ ELSE()
+ SET(${var} "")
+ ENDIF()
+
+ SET(KWSYS_PLATFORM_INFO_FILE)
+ ENDIF()
+ENDMACRO()
diff --git a/test/API/driver/kwsys/kwsysPlatformTestsC.c b/test/API/driver/kwsys/kwsysPlatformTestsC.c
new file mode 100644
index 0000000..b0cf7ad
--- /dev/null
+++ b/test/API/driver/kwsys/kwsysPlatformTestsC.c
@@ -0,0 +1,108 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+/*
+ Macros to define main() in a cross-platform way.
+
+ Usage:
+
+ int KWSYS_PLATFORM_TEST_C_MAIN()
+ {
+ return 0;
+ }
+
+ int KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv)
+ {
+ (void)argc; (void)argv;
+ return 0;
+ }
+*/
+#if defined(__CLASSIC_C__)
+# define KWSYS_PLATFORM_TEST_C_MAIN() main()
+# define KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) \
+ main(argc, argv) int argc; \
+ char* argv[];
+#else
+# define KWSYS_PLATFORM_TEST_C_MAIN() main(void)
+# define KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv) \
+ main(int argc, char* argv[])
+#endif
+
+#ifdef TEST_KWSYS_C_HAS_PTRDIFF_T
+# include <stddef.h>
+int f(ptrdiff_t n)
+{
+ return n > 0;
+}
+int KWSYS_PLATFORM_TEST_C_MAIN()
+{
+ char* p = 0;
+ ptrdiff_t d = p - p;
+ (void)d;
+ return f(p - p);
+}
+#endif
+
+#ifdef TEST_KWSYS_C_HAS_SSIZE_T
+# include <unistd.h>
+int f(ssize_t n)
+{
+ return (int)n;
+}
+int KWSYS_PLATFORM_TEST_C_MAIN()
+{
+ ssize_t n = 0;
+ return f(n);
+}
+#endif
+
+#ifdef TEST_KWSYS_C_HAS_CLOCK_GETTIME_MONOTONIC
+# if defined(__APPLE__)
+# include <AvailabilityMacros.h>
+# if MAC_OS_X_VERSION_MIN_REQUIRED < 101200
+# error "clock_gettime not available on macOS < 10.12"
+# endif
+# endif
+# include <time.h>
+int KWSYS_PLATFORM_TEST_C_MAIN()
+{
+ struct timespec ts;
+ return clock_gettime(CLOCK_MONOTONIC, &ts);
+}
+#endif
+
+#ifdef TEST_KWSYS_C_TYPE_MACROS
+char* info_macros =
+# if defined(__SIZEOF_SHORT__)
+ "INFO:macro[__SIZEOF_SHORT__]\n"
+# endif
+# if defined(__SIZEOF_INT__)
+ "INFO:macro[__SIZEOF_INT__]\n"
+# endif
+# if defined(__SIZEOF_LONG__)
+ "INFO:macro[__SIZEOF_LONG__]\n"
+# endif
+# if defined(__SIZEOF_LONG_LONG__)
+ "INFO:macro[__SIZEOF_LONG_LONG__]\n"
+# endif
+# if defined(__SHORT_MAX__)
+ "INFO:macro[__SHORT_MAX__]\n"
+# endif
+# if defined(__INT_MAX__)
+ "INFO:macro[__INT_MAX__]\n"
+# endif
+# if defined(__LONG_MAX__)
+ "INFO:macro[__LONG_MAX__]\n"
+# endif
+# if defined(__LONG_LONG_MAX__)
+ "INFO:macro[__LONG_LONG_MAX__]\n"
+# endif
+ "";
+
+int KWSYS_PLATFORM_TEST_C_MAIN_ARGS(argc, argv)
+{
+ int require = 0;
+ require += info_macros[argc];
+ (void)argv;
+ return require;
+}
+#endif
diff --git a/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx b/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx
new file mode 100644
index 0000000..cfd5666
--- /dev/null
+++ b/test/API/driver/kwsys/kwsysPlatformTestsCXX.cxx
@@ -0,0 +1,335 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifdef TEST_KWSYS_CXX_HAS_CSTDIO
+# include <cstdio>
+int main()
+{
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_LONG_LONG
+long long f(long long n)
+{
+ return n;
+}
+int main()
+{
+ long long n = 0;
+ return static_cast<int>(f(n));
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS___INT64
+__int64 f(__int64 n)
+{
+ return n;
+}
+int main()
+{
+ __int64 n = 0;
+ return static_cast<int>(f(n));
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_STAT_HAS_ST_MTIM
+# include <sys/types.h>
+
+# include <sys/stat.h>
+# include <unistd.h>
+int main()
+{
+ struct stat stat1;
+ (void)stat1.st_mtim.tv_sec;
+ (void)stat1.st_mtim.tv_nsec;
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_STAT_HAS_ST_MTIMESPEC
+# include <sys/types.h>
+
+# include <sys/stat.h>
+# include <unistd.h>
+int main()
+{
+ struct stat stat1;
+ (void)stat1.st_mtimespec.tv_sec;
+ (void)stat1.st_mtimespec.tv_nsec;
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_SAME_LONG_AND___INT64
+void function(long**)
+{
+}
+int main()
+{
+ __int64** p = 0;
+ function(p);
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_SAME_LONG_LONG_AND___INT64
+void function(long long**)
+{
+}
+int main()
+{
+ __int64** p = 0;
+ function(p);
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_IOS_HAS_ISTREAM_LONG_LONG
+# include <iostream>
+int test_istream(std::istream& is, long long& x)
+{
+ return (is >> x) ? 1 : 0;
+}
+int main()
+{
+ long long x = 0;
+ return test_istream(std::cin, x);
+}
+#endif
+
+#ifdef TEST_KWSYS_IOS_HAS_OSTREAM_LONG_LONG
+# include <iostream>
+int test_ostream(std::ostream& os, long long x)
+{
+ return (os << x) ? 1 : 0;
+}
+int main()
+{
+ long long x = 0;
+ return test_ostream(std::cout, x);
+}
+#endif
+
+#ifdef TEST_KWSYS_IOS_HAS_ISTREAM___INT64
+# include <iostream>
+int test_istream(std::istream& is, __int64& x)
+{
+ return (is >> x) ? 1 : 0;
+}
+int main()
+{
+ __int64 x = 0;
+ return test_istream(std::cin, x);
+}
+#endif
+
+#ifdef TEST_KWSYS_IOS_HAS_OSTREAM___INT64
+# include <iostream>
+int test_ostream(std::ostream& os, __int64 x)
+{
+ return (os << x) ? 1 : 0;
+}
+int main()
+{
+ __int64 x = 0;
+ return test_ostream(std::cout, x);
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_SETENV
+# include <stdlib.h>
+int main()
+{
+ return setenv("A", "B", 1);
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_UNSETENV
+# include <stdlib.h>
+int main()
+{
+ unsetenv("A");
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_ENVIRON_IN_STDLIB_H
+# include <stdlib.h>
+int main()
+{
+ char* e = environ[0];
+ return e ? 0 : 1;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_GETLOADAVG
+// Match feature definitions from SystemInformation.cxx
+# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE)
+# define _GNU_SOURCE
+# endif
+# include <stdlib.h>
+int main()
+{
+ double loadavg[3] = { 0.0, 0.0, 0.0 };
+ return getloadavg(loadavg, 3);
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_RLIMIT64
+# include <sys/resource.h>
+int main()
+{
+ struct rlimit64 rlim;
+ return getrlimit64(0, &rlim);
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_ATOLL
+# include <stdlib.h>
+int main()
+{
+ const char* str = "1024";
+ return static_cast<int>(atoll(str));
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_ATOL
+# include <stdlib.h>
+int main()
+{
+ const char* str = "1024";
+ return static_cast<int>(atol(str));
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS__ATOI64
+# include <stdlib.h>
+int main()
+{
+ const char* str = "1024";
+ return static_cast<int>(_atoi64(str));
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_UTIMES
+# include <sys/time.h>
+int main()
+{
+ struct timeval* current_time = 0;
+ return utimes("/example", current_time);
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_UTIMENSAT
+# include <fcntl.h>
+# include <sys/stat.h>
+# if defined(__APPLE__)
+# include <AvailabilityMacros.h>
+# if MAC_OS_X_VERSION_MIN_REQUIRED < 101300
+# error "utimensat not available on macOS < 10.13"
+# endif
+# endif
+int main()
+{
+ struct timespec times[2] = { { 0, UTIME_OMIT }, { 0, UTIME_NOW } };
+ return utimensat(AT_FDCWD, "/example", times, AT_SYMLINK_NOFOLLOW);
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_BACKTRACE
+# if defined(__PATHSCALE__) || defined(__PATHCC__) || \
+ (defined(__LSB_VERSION__) && (__LSB_VERSION__ < 41))
+backtrace does not work with this compiler or os
+# endif
+# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE)
+# define _GNU_SOURCE
+# endif
+# include <execinfo.h>
+int main()
+{
+ void* stackSymbols[256];
+ backtrace(stackSymbols, 256);
+ backtrace_symbols(&stackSymbols[0], 1);
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_DLADDR
+# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE)
+# define _GNU_SOURCE
+# endif
+# include <dlfcn.h>
+int main()
+{
+ Dl_info info;
+ int ierr = dladdr((void*)main, &info);
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_CXXABI
+# if (defined(__GNUC__) || defined(__PGI)) && !defined(_GNU_SOURCE)
+# define _GNU_SOURCE
+# endif
+# if defined(__SUNPRO_CC) && __SUNPRO_CC >= 0x5130 && __linux && \
+ __SUNPRO_CC_COMPAT == 'G'
+# include <iostream>
+# endif
+# include <cxxabi.h>
+int main()
+{
+ int status = 0;
+ size_t bufferLen = 512;
+ char buffer[512] = { '\0' };
+ const char* function = "_ZN5kwsys17SystemInformation15GetProgramStackEii";
+ char* demangledFunction =
+ abi::__cxa_demangle(function, buffer, &bufferLen, &status);
+ return status;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_BORLAND_ASM
+int main()
+{
+ int a = 1;
+ __asm {
+ xor EBX, EBX;
+ mov a, EBX;
+ }
+
+ return a;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_BORLAND_ASM_CPUID
+int main()
+{
+ int a = 0;
+ __asm {
+ xor EAX, EAX;
+ cpuid;
+ mov a, EAX;
+ }
+
+ return a;
+}
+#endif
+
+#ifdef TEST_KWSYS_STL_HAS_WSTRING
+# include <string>
+void f(std::wstring*)
+{
+}
+int main()
+{
+ return 0;
+}
+#endif
+
+#ifdef TEST_KWSYS_CXX_HAS_EXT_STDIO_FILEBUF_H
+# include <ext/stdio_filebuf.h>
+int main()
+{
+ return 0;
+}
+#endif
diff --git a/test/API/driver/kwsys/kwsysPrivate.h b/test/API/driver/kwsys/kwsysPrivate.h
new file mode 100644
index 0000000..dd9c127
--- /dev/null
+++ b/test/API/driver/kwsys/kwsysPrivate.h
@@ -0,0 +1,34 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef KWSYS_NAMESPACE
+# error "Do not include kwsysPrivate.h outside of kwsys c and cxx files."
+#endif
+
+#ifndef _kwsysPrivate_h
+# define _kwsysPrivate_h
+
+/*
+ Define KWSYS_HEADER macro to help the c and cxx files include kwsys
+ headers from the configured namespace directory. The macro can be
+ used like this:
+
+ #include KWSYS_HEADER(Directory.hxx)
+ #include KWSYS_HEADER(std/vector)
+*/
+/* clang-format off */
+#define KWSYS_HEADER(x) KWSYS_HEADER0(KWSYS_NAMESPACE/x)
+/* clang-format on */
+# define KWSYS_HEADER0(x) KWSYS_HEADER1(x)
+# define KWSYS_HEADER1(x) <x>
+
+/*
+ Define KWSYS_NAMESPACE_STRING to be a string constant containing the
+ name configured for this instance of the kwsys library.
+*/
+# define KWSYS_NAMESPACE_STRING KWSYS_NAMESPACE_STRING0(KWSYS_NAMESPACE)
+# define KWSYS_NAMESPACE_STRING0(x) KWSYS_NAMESPACE_STRING1(x)
+# define KWSYS_NAMESPACE_STRING1(x) # x
+
+#else
+# error "kwsysPrivate.h included multiple times."
+#endif
diff --git a/test/API/driver/kwsys/testCommandLineArguments.cxx b/test/API/driver/kwsys/testCommandLineArguments.cxx
new file mode 100644
index 0000000..1778a9b
--- /dev/null
+++ b/test/API/driver/kwsys/testCommandLineArguments.cxx
@@ -0,0 +1,209 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(CommandLineArguments.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "CommandLineArguments.hxx.in"
+#endif
+
+#include <iostream>
+#include <vector>
+
+#include <stddef.h> /* size_t */
+#include <string.h> /* strcmp */
+
+static void* random_ptr = reinterpret_cast<void*>(0x123);
+
+static int argument(const char* arg, const char* value, void* call_data)
+{
+ std::cout << "Got argument: \"" << arg << "\" value: \""
+ << (value ? value : "(null)") << "\"" << std::endl;
+ if (call_data != random_ptr) {
+ std::cerr << "Problem processing call_data" << std::endl;
+ return 0;
+ }
+ return 1;
+}
+
+static int unknown_argument(const char* argument, void* call_data)
+{
+ std::cout << "Got unknown argument: \"" << argument << "\"" << std::endl;
+ if (call_data != random_ptr) {
+ std::cerr << "Problem processing call_data" << std::endl;
+ return 0;
+ }
+ return 1;
+}
+
+static bool CompareTwoItemsOnList(bool i1, bool i2)
+{
+ return i1 == i2;
+}
+static bool CompareTwoItemsOnList(int i1, int i2)
+{
+ return i1 == i2;
+}
+static bool CompareTwoItemsOnList(double i1, double i2)
+{
+ return i1 == i2;
+}
+static bool CompareTwoItemsOnList(const char* i1, const char* i2)
+{
+ return strcmp(i1, i2) == 0;
+}
+static bool CompareTwoItemsOnList(const std::string& i1, const std::string& i2)
+{
+ return i1 == i2;
+}
+
+int testCommandLineArguments(int argc, char* argv[])
+{
+ // Example run: ./testCommandLineArguments --some-int-variable 4
+ // --another-bool-variable --some-bool-variable=yes
+ // --some-stl-string-variable=foobar --set-bool-arg1 --set-bool-arg2
+ // --some-string-variable=hello
+
+ int res = 0;
+ kwsys::CommandLineArguments arg;
+ arg.Initialize(argc, argv);
+
+ // For error handling
+ arg.SetClientData(random_ptr);
+ arg.SetUnknownArgumentCallback(unknown_argument);
+
+ int some_int_variable = 10;
+ double some_double_variable = 10.10;
+ char* some_string_variable = nullptr;
+ std::string some_stl_string_variable;
+ bool some_bool_variable = false;
+ bool some_bool_variable1 = false;
+ bool bool_arg1 = false;
+ int bool_arg2 = 0;
+
+ std::vector<int> numbers_argument;
+ int valid_numbers[] = { 5, 1, 8, 3, 7, 1, 3, 9, 7, 1 };
+
+ std::vector<double> doubles_argument;
+ double valid_doubles[] = { 12.5, 1.31, 22 };
+
+ std::vector<bool> bools_argument;
+ bool valid_bools[] = { true, true, false };
+
+ std::vector<char*> strings_argument;
+ const char* valid_strings[] = { "andy", "bill", "brad", "ken" };
+
+ std::vector<std::string> stl_strings_argument;
+ std::string valid_stl_strings[] = { "ken", "brad", "bill", "andy" };
+
+ typedef kwsys::CommandLineArguments argT;
+
+ arg.AddArgument("--some-int-variable", argT::SPACE_ARGUMENT,
+ &some_int_variable, "Set some random int variable");
+ arg.AddArgument("--some-double-variable", argT::CONCAT_ARGUMENT,
+ &some_double_variable, "Set some random double variable");
+ arg.AddArgument("--some-string-variable", argT::EQUAL_ARGUMENT,
+ &some_string_variable, "Set some random string variable");
+ arg.AddArgument("--some-stl-string-variable", argT::EQUAL_ARGUMENT,
+ &some_stl_string_variable,
+ "Set some random stl string variable");
+ arg.AddArgument("--some-bool-variable", argT::EQUAL_ARGUMENT,
+ &some_bool_variable, "Set some random bool variable");
+ arg.AddArgument("--another-bool-variable", argT::NO_ARGUMENT,
+ &some_bool_variable1, "Set some random bool variable 1");
+ arg.AddBooleanArgument("--set-bool-arg1", &bool_arg1,
+ "Test AddBooleanArgument 1");
+ arg.AddBooleanArgument("--set-bool-arg2", &bool_arg2,
+ "Test AddBooleanArgument 2");
+ arg.AddArgument("--some-multi-argument", argT::MULTI_ARGUMENT,
+ &numbers_argument, "Some multiple values variable");
+ arg.AddArgument("-N", argT::SPACE_ARGUMENT, &doubles_argument,
+ "Some explicit multiple values variable");
+ arg.AddArgument("-BB", argT::CONCAT_ARGUMENT, &bools_argument,
+ "Some explicit multiple values variable");
+ arg.AddArgument("-SS", argT::EQUAL_ARGUMENT, &strings_argument,
+ "Some explicit multiple values variable");
+ arg.AddArgument("-SSS", argT::MULTI_ARGUMENT, &stl_strings_argument,
+ "Some explicit multiple values variable");
+
+ arg.AddCallback("-A", argT::NO_ARGUMENT, argument, random_ptr,
+ "Some option -A. This option has a multiline comment. It "
+ "should demonstrate how the code splits lines.");
+ arg.AddCallback("-B", argT::SPACE_ARGUMENT, argument, random_ptr,
+ "Option -B takes argument with space");
+ arg.AddCallback("-C", argT::EQUAL_ARGUMENT, argument, random_ptr,
+ "Option -C takes argument after =");
+ arg.AddCallback("-D", argT::CONCAT_ARGUMENT, argument, random_ptr,
+ "This option takes concatenated argument");
+ arg.AddCallback("--long1", argT::NO_ARGUMENT, argument, random_ptr, "-A");
+ arg.AddCallback("--long2", argT::SPACE_ARGUMENT, argument, random_ptr, "-B");
+ arg.AddCallback("--long3", argT::EQUAL_ARGUMENT, argument, random_ptr,
+ "Same as -C but a bit different");
+ arg.AddCallback("--long4", argT::CONCAT_ARGUMENT, argument, random_ptr,
+ "-C");
+
+ if (!arg.Parse()) {
+ std::cerr << "Problem parsing arguments" << std::endl;
+ res = 1;
+ }
+ std::cout << "Help: " << arg.GetHelp() << std::endl;
+
+ std::cout << "Some int variable was set to: " << some_int_variable
+ << std::endl;
+ std::cout << "Some double variable was set to: " << some_double_variable
+ << std::endl;
+ if (some_string_variable &&
+ strcmp(some_string_variable, "test string with space") == 0) {
+ std::cout << "Some string variable was set to: " << some_string_variable
+ << std::endl;
+ delete[] some_string_variable;
+ } else {
+ std::cerr << "Problem setting string variable" << std::endl;
+ res = 1;
+ }
+ size_t cc;
+#define CompareTwoLists(list1, list_valid, lsize) \
+ do { \
+ if (list1.size() != lsize) { \
+ std::cerr << "Problem setting " #list1 ". Size is: " << list1.size() \
+ << " should be: " << lsize << std::endl; \
+ res = 1; \
+ } else { \
+ std::cout << #list1 " argument set:"; \
+ for (cc = 0; cc < lsize; ++cc) { \
+ std::cout << " " << list1[cc]; \
+ if (!CompareTwoItemsOnList(list1[cc], list_valid[cc])) { \
+ std::cerr << "Problem setting " #list1 ". Value of " << cc \
+ << " is: [" << list1[cc] << "] <> [" << list_valid[cc] \
+ << "]" << std::endl; \
+ res = 1; \
+ break; \
+ } \
+ } \
+ std::cout << std::endl; \
+ } \
+ } while (0)
+ CompareTwoLists(numbers_argument, valid_numbers, 10);
+ CompareTwoLists(doubles_argument, valid_doubles, 3);
+ CompareTwoLists(bools_argument, valid_bools, 3);
+ CompareTwoLists(strings_argument, valid_strings, 4);
+ CompareTwoLists(stl_strings_argument, valid_stl_strings, 4);
+
+ std::cout << "Some STL String variable was set to: "
+ << some_stl_string_variable << std::endl;
+ std::cout << "Some bool variable was set to: " << some_bool_variable
+ << std::endl;
+ std::cout << "Some bool variable was set to: " << some_bool_variable1
+ << std::endl;
+ std::cout << "bool_arg1 variable was set to: " << bool_arg1 << std::endl;
+ std::cout << "bool_arg2 variable was set to: " << bool_arg2 << std::endl;
+ std::cout << std::endl;
+
+ for (cc = 0; cc < strings_argument.size(); ++cc) {
+ delete[] strings_argument[cc];
+ strings_argument[cc] = nullptr;
+ }
+ return res;
+}
diff --git a/test/API/driver/kwsys/testCommandLineArguments1.cxx b/test/API/driver/kwsys/testCommandLineArguments1.cxx
new file mode 100644
index 0000000..64561b1
--- /dev/null
+++ b/test/API/driver/kwsys/testCommandLineArguments1.cxx
@@ -0,0 +1,93 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(CommandLineArguments.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "CommandLineArguments.hxx.in"
+#endif
+
+#include <iostream>
+#include <vector>
+
+#include <assert.h> /* assert */
+#include <string.h> /* strcmp */
+
+int testCommandLineArguments1(int argc, char* argv[])
+{
+ kwsys::CommandLineArguments arg;
+ arg.Initialize(argc, argv);
+
+ int n = 0;
+ char* m = nullptr;
+ std::string p;
+ int res = 0;
+
+ typedef kwsys::CommandLineArguments argT;
+ arg.AddArgument("-n", argT::SPACE_ARGUMENT, &n, "Argument N");
+ arg.AddArgument("-m", argT::EQUAL_ARGUMENT, &m, "Argument M");
+ arg.AddBooleanArgument("-p", &p, "Argument P");
+
+ arg.StoreUnusedArguments(true);
+
+ if (!arg.Parse()) {
+ std::cerr << "Problem parsing arguments" << std::endl;
+ res = 1;
+ }
+ if (n != 24) {
+ std::cout << "Problem setting N. Value of N: " << n << std::endl;
+ res = 1;
+ }
+ if (!m || strcmp(m, "test value") != 0) {
+ std::cout << "Problem setting M. Value of M: " << m << std::endl;
+ res = 1;
+ }
+ if (p != "1") {
+ std::cout << "Problem setting P. Value of P: " << p << std::endl;
+ res = 1;
+ }
+ std::cout << "Value of N: " << n << std::endl;
+ std::cout << "Value of M: " << m << std::endl;
+ std::cout << "Value of P: " << p << std::endl;
+ if (m) {
+ delete[] m;
+ }
+
+ char** newArgv = nullptr;
+ int newArgc = 0;
+ arg.GetUnusedArguments(&newArgc, &newArgv);
+ int cc;
+ const char* valid_unused_args[9] = { nullptr,
+ "--ignored",
+ "--second-ignored",
+ "third-ignored",
+ "some",
+ "junk",
+ "at",
+ "the",
+ "end" };
+ if (newArgc != 9) {
+ std::cerr << "Bad number of unused arguments: " << newArgc << std::endl;
+ res = 1;
+ }
+ for (cc = 0; cc < newArgc; ++cc) {
+ assert(newArgv[cc]); /* Quiet Clang scan-build. */
+ std::cout << "Unused argument[" << cc << "] = [" << newArgv[cc] << "]"
+ << std::endl;
+ if (cc >= 9) {
+ std::cerr << "Too many unused arguments: " << cc << std::endl;
+ res = 1;
+ } else if (valid_unused_args[cc] &&
+ strcmp(valid_unused_args[cc], newArgv[cc]) != 0) {
+ std::cerr << "Bad unused argument [" << cc << "] \"" << newArgv[cc]
+ << "\" should be: \"" << valid_unused_args[cc] << "\""
+ << std::endl;
+ res = 1;
+ }
+ }
+ arg.DeleteRemainingArguments(newArgc, &newArgv);
+
+ return res;
+}
diff --git a/test/API/driver/kwsys/testConfigure.cxx b/test/API/driver/kwsys/testConfigure.cxx
new file mode 100644
index 0000000..a3c2ed3
--- /dev/null
+++ b/test/API/driver/kwsys/testConfigure.cxx
@@ -0,0 +1,30 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Configure.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Configure.hxx.in"
+#endif
+
+static bool testFallthrough(int n)
+{
+ int r = 0;
+ switch (n) {
+ case 1:
+ ++r;
+ KWSYS_FALLTHROUGH;
+ default:
+ ++r;
+ }
+ return r == 2;
+}
+
+int testConfigure(int, char* [])
+{
+ bool res = true;
+ res = testFallthrough(1) && res;
+ return res ? 0 : 1;
+}
diff --git a/test/API/driver/kwsys/testConsoleBuf.cxx b/test/API/driver/kwsys/testConsoleBuf.cxx
new file mode 100644
index 0000000..4b7ddf0
--- /dev/null
+++ b/test/API/driver/kwsys/testConsoleBuf.cxx
@@ -0,0 +1,782 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+
+// Ignore Windows version levels defined by command-line flags. This
+// source needs access to all APIs available on the host in order for
+// the test to run properly. The test binary is not installed anyway.
+#undef _WIN32_WINNT
+#undef NTDDI_VERSION
+
+#include KWSYS_HEADER(Encoding.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Encoding.hxx.in"
+#endif
+
+#if defined(_WIN32)
+
+# include <algorithm>
+# include <iomanip>
+# include <iostream>
+# include <stdexcept>
+# include <string.h>
+# include <wchar.h>
+# include <windows.h>
+
+# include "testConsoleBuf.hxx"
+
+# if defined(_MSC_VER) && _MSC_VER >= 1800
+# define KWSYS_WINDOWS_DEPRECATED_GetVersion
+# endif
+// يونيكود
+static const WCHAR UnicodeInputTestString[] =
+ L"\u064A\u0648\u0646\u064A\u0643\u0648\u062F!";
+static UINT TestCodepage = KWSYS_ENCODING_DEFAULT_CODEPAGE;
+
+static const DWORD waitTimeout = 10 * 1000;
+static STARTUPINFO startupInfo;
+static PROCESS_INFORMATION processInfo;
+static HANDLE beforeInputEvent;
+static HANDLE afterOutputEvent;
+static std::string encodedInputTestString;
+static std::string encodedTestString;
+
+static void displayError(DWORD errorCode)
+{
+ std::cerr.setf(std::ios::hex, std::ios::basefield);
+ std::cerr << "Failed with error: 0x" << errorCode << "!" << std::endl;
+ LPWSTR message;
+ if (FormatMessageW(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM,
+ nullptr, errorCode, 0, (LPWSTR)&message, 0, nullptr)) {
+ std::cerr << "Error message: " << kwsys::Encoding::ToNarrow(message)
+ << std::endl;
+ HeapFree(GetProcessHeap(), 0, message);
+ } else {
+ std::cerr << "FormatMessage() failed with error: 0x" << GetLastError()
+ << "!" << std::endl;
+ }
+ std::cerr.unsetf(std::ios::hex);
+}
+
+std::basic_streambuf<char>* errstream(const char* unused)
+{
+ static_cast<void>(unused);
+ return std::cerr.rdbuf();
+}
+
+std::basic_streambuf<wchar_t>* errstream(const wchar_t* unused)
+{
+ static_cast<void>(unused);
+ return std::wcerr.rdbuf();
+}
+
+template <typename T>
+static void dumpBuffers(const T* expected, const T* received, size_t size)
+{
+ std::basic_ostream<T> err(errstream(expected));
+ err << "Expected output: '" << std::basic_string<T>(expected, size) << "'"
+ << std::endl;
+ if (err.fail()) {
+ err.clear();
+ err << "--- Error while outputting ---" << std::endl;
+ }
+ err << "Received output: '" << std::basic_string<T>(received, size) << "'"
+ << std::endl;
+ if (err.fail()) {
+ err.clear();
+ err << "--- Error while outputting ---" << std::endl;
+ }
+ std::cerr << "Expected output | Received output" << std::endl;
+ for (size_t i = 0; i < size; i++) {
+ std::cerr << std::setbase(16) << std::setfill('0') << " "
+ << "0x" << std::setw(8) << static_cast<unsigned int>(expected[i])
+ << " | "
+ << "0x" << std::setw(8)
+ << static_cast<unsigned int>(received[i]);
+ if (static_cast<unsigned int>(expected[i]) !=
+ static_cast<unsigned int>(received[i])) {
+ std::cerr << " MISMATCH!";
+ }
+ std::cerr << std::endl;
+ }
+ std::cerr << std::endl;
+}
+
+static bool createProcess(HANDLE hIn, HANDLE hOut, HANDLE hErr)
+{
+ BOOL bInheritHandles = FALSE;
+ DWORD dwCreationFlags = 0;
+ memset(&processInfo, 0, sizeof(processInfo));
+ memset(&startupInfo, 0, sizeof(startupInfo));
+ startupInfo.cb = sizeof(startupInfo);
+ startupInfo.dwFlags = STARTF_USESHOWWINDOW;
+ startupInfo.wShowWindow = SW_HIDE;
+ if (hIn || hOut || hErr) {
+ startupInfo.dwFlags |= STARTF_USESTDHANDLES;
+ startupInfo.hStdInput = hIn;
+ startupInfo.hStdOutput = hOut;
+ startupInfo.hStdError = hErr;
+ bInheritHandles = TRUE;
+ }
+
+ WCHAR cmd[MAX_PATH];
+ if (GetModuleFileNameW(nullptr, cmd, MAX_PATH) == 0) {
+ std::cerr << "GetModuleFileName failed!" << std::endl;
+ return false;
+ }
+ WCHAR* p = cmd + wcslen(cmd);
+ while (p > cmd && *p != L'\\')
+ p--;
+ *(p + 1) = 0;
+ wcscat(cmd, cmdConsoleBufChild);
+ wcscat(cmd, L".exe");
+
+ bool success =
+ CreateProcessW(nullptr, // No module name (use command line)
+ cmd, // Command line
+ nullptr, // Process handle not inheritable
+ nullptr, // Thread handle not inheritable
+ bInheritHandles, // Set handle inheritance
+ dwCreationFlags,
+ nullptr, // Use parent's environment block
+ nullptr, // Use parent's starting directory
+ &startupInfo, // Pointer to STARTUPINFO structure
+ &processInfo) !=
+ 0; // Pointer to PROCESS_INFORMATION structure
+ if (!success) {
+ DWORD lastError = GetLastError();
+ std::cerr << "CreateProcess(" << kwsys::Encoding::ToNarrow(cmd) << ")"
+ << std::endl;
+ displayError(lastError);
+ }
+ return success;
+}
+
+static void finishProcess(bool success)
+{
+ if (success) {
+ success =
+ WaitForSingleObject(processInfo.hProcess, waitTimeout) == WAIT_OBJECT_0;
+ };
+ if (!success) {
+ TerminateProcess(processInfo.hProcess, 1);
+ }
+ CloseHandle(processInfo.hProcess);
+ CloseHandle(processInfo.hThread);
+}
+
+static bool createPipe(PHANDLE readPipe, PHANDLE writePipe)
+{
+ SECURITY_ATTRIBUTES securityAttributes;
+ securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES);
+ securityAttributes.bInheritHandle = TRUE;
+ securityAttributes.lpSecurityDescriptor = nullptr;
+ return CreatePipe(readPipe, writePipe, &securityAttributes, 0) == 0 ? false
+ : true;
+}
+
+static void finishPipe(HANDLE readPipe, HANDLE writePipe)
+{
+ if (readPipe != INVALID_HANDLE_VALUE) {
+ CloseHandle(readPipe);
+ }
+ if (writePipe != INVALID_HANDLE_VALUE) {
+ CloseHandle(writePipe);
+ }
+}
+
+static HANDLE createFile(LPCWSTR fileName)
+{
+ SECURITY_ATTRIBUTES securityAttributes;
+ securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES);
+ securityAttributes.bInheritHandle = TRUE;
+ securityAttributes.lpSecurityDescriptor = nullptr;
+
+ HANDLE file =
+ CreateFileW(fileName, GENERIC_READ | GENERIC_WRITE,
+ 0, // do not share
+ &securityAttributes,
+ CREATE_ALWAYS, // overwrite existing
+ FILE_ATTRIBUTE_TEMPORARY | FILE_FLAG_DELETE_ON_CLOSE,
+ nullptr); // no template
+ if (file == INVALID_HANDLE_VALUE) {
+ DWORD lastError = GetLastError();
+ std::cerr << "CreateFile(" << kwsys::Encoding::ToNarrow(fileName) << ")"
+ << std::endl;
+ displayError(lastError);
+ }
+ return file;
+}
+
+static void finishFile(HANDLE file)
+{
+ if (file != INVALID_HANDLE_VALUE) {
+ CloseHandle(file);
+ }
+}
+
+# ifndef MAPVK_VK_TO_VSC
+# define MAPVK_VK_TO_VSC (0)
+# endif
+
+static void writeInputKeyEvent(INPUT_RECORD inputBuffer[], WCHAR chr)
+{
+ inputBuffer[0].EventType = KEY_EVENT;
+ inputBuffer[0].Event.KeyEvent.bKeyDown = TRUE;
+ inputBuffer[0].Event.KeyEvent.wRepeatCount = 1;
+ SHORT keyCode = VkKeyScanW(chr);
+ if (keyCode == -1) {
+ // Character can't be entered with current keyboard layout
+ // Just set any, it doesn't really matter
+ keyCode = 'K';
+ }
+ inputBuffer[0].Event.KeyEvent.wVirtualKeyCode = LOBYTE(keyCode);
+ inputBuffer[0].Event.KeyEvent.wVirtualScanCode = MapVirtualKey(
+ inputBuffer[0].Event.KeyEvent.wVirtualKeyCode, MAPVK_VK_TO_VSC);
+ inputBuffer[0].Event.KeyEvent.uChar.UnicodeChar = chr;
+ inputBuffer[0].Event.KeyEvent.dwControlKeyState = 0;
+ if ((HIBYTE(keyCode) & 1) == 1) {
+ inputBuffer[0].Event.KeyEvent.dwControlKeyState |= SHIFT_PRESSED;
+ }
+ if ((HIBYTE(keyCode) & 2) == 2) {
+ inputBuffer[0].Event.KeyEvent.dwControlKeyState |= RIGHT_CTRL_PRESSED;
+ }
+ if ((HIBYTE(keyCode) & 4) == 4) {
+ inputBuffer[0].Event.KeyEvent.dwControlKeyState |= RIGHT_ALT_PRESSED;
+ }
+ inputBuffer[1].EventType = inputBuffer[0].EventType;
+ inputBuffer[1].Event.KeyEvent.bKeyDown = FALSE;
+ inputBuffer[1].Event.KeyEvent.wRepeatCount = 1;
+ inputBuffer[1].Event.KeyEvent.wVirtualKeyCode =
+ inputBuffer[0].Event.KeyEvent.wVirtualKeyCode;
+ inputBuffer[1].Event.KeyEvent.wVirtualScanCode =
+ inputBuffer[0].Event.KeyEvent.wVirtualScanCode;
+ inputBuffer[1].Event.KeyEvent.uChar.UnicodeChar =
+ inputBuffer[0].Event.KeyEvent.uChar.UnicodeChar;
+ inputBuffer[1].Event.KeyEvent.dwControlKeyState = 0;
+}
+
+static int testPipe()
+{
+ int didFail = 1;
+ HANDLE inPipeRead = INVALID_HANDLE_VALUE;
+ HANDLE inPipeWrite = INVALID_HANDLE_VALUE;
+ HANDLE outPipeRead = INVALID_HANDLE_VALUE;
+ HANDLE outPipeWrite = INVALID_HANDLE_VALUE;
+ HANDLE errPipeRead = INVALID_HANDLE_VALUE;
+ HANDLE errPipeWrite = INVALID_HANDLE_VALUE;
+ UINT currentCodepage = GetConsoleCP();
+ char buffer[200];
+ char buffer2[200];
+ try {
+ if (!createPipe(&inPipeRead, &inPipeWrite) ||
+ !createPipe(&outPipeRead, &outPipeWrite) ||
+ !createPipe(&errPipeRead, &errPipeWrite)) {
+ throw std::runtime_error("createFile failed!");
+ }
+ if (TestCodepage == CP_ACP) {
+ TestCodepage = GetACP();
+ }
+ if (!SetConsoleCP(TestCodepage)) {
+ throw std::runtime_error("SetConsoleCP failed!");
+ }
+
+ DWORD bytesWritten = 0;
+ if (!WriteFile(inPipeWrite, encodedInputTestString.c_str(),
+ (DWORD)encodedInputTestString.size(), &bytesWritten,
+ nullptr) ||
+ bytesWritten == 0) {
+ throw std::runtime_error("WriteFile failed!");
+ }
+
+ if (createProcess(inPipeRead, outPipeWrite, errPipeWrite)) {
+ try {
+ DWORD status;
+ if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) !=
+ WAIT_OBJECT_0) {
+ std::cerr.setf(std::ios::hex, std::ios::basefield);
+ std::cerr << "WaitForSingleObject returned unexpected status 0x"
+ << status << std::endl;
+ std::cerr.unsetf(std::ios::hex);
+ throw std::runtime_error("WaitForSingleObject failed!");
+ }
+ DWORD bytesRead = 0;
+ if (!ReadFile(outPipeRead, buffer, sizeof(buffer), &bytesRead,
+ nullptr) ||
+ bytesRead == 0) {
+ throw std::runtime_error("ReadFile#1 failed!");
+ }
+ buffer[bytesRead] = 0;
+ if ((bytesRead <
+ encodedTestString.size() + 1 + encodedInputTestString.size() &&
+ !ReadFile(outPipeRead, buffer + bytesRead,
+ sizeof(buffer) - bytesRead, &bytesRead, nullptr)) ||
+ bytesRead == 0) {
+ throw std::runtime_error("ReadFile#2 failed!");
+ }
+ if (memcmp(buffer, encodedTestString.c_str(),
+ encodedTestString.size()) == 0 &&
+ memcmp(buffer + encodedTestString.size() + 1,
+ encodedInputTestString.c_str(),
+ encodedInputTestString.size()) == 0) {
+ bytesRead = 0;
+ if (!ReadFile(errPipeRead, buffer2, sizeof(buffer2), &bytesRead,
+ nullptr) ||
+ bytesRead == 0) {
+ throw std::runtime_error("ReadFile#3 failed!");
+ }
+ buffer2[bytesRead] = 0;
+ didFail = encodedTestString.compare(0, std::string::npos, buffer2,
+ encodedTestString.size()) == 0
+ ? 0
+ : 1;
+ }
+ if (didFail != 0) {
+ std::cerr << "Pipe's output didn't match expected output!"
+ << std::endl;
+ dumpBuffers<char>(encodedTestString.c_str(), buffer,
+ encodedTestString.size());
+ dumpBuffers<char>(encodedInputTestString.c_str(),
+ buffer + encodedTestString.size() + 1,
+ encodedInputTestString.size());
+ dumpBuffers<char>(encodedTestString.c_str(), buffer2,
+ encodedTestString.size());
+ }
+ } catch (const std::runtime_error& ex) {
+ DWORD lastError = GetLastError();
+ std::cerr << "In function testPipe, line " << __LINE__ << ": "
+ << ex.what() << std::endl;
+ displayError(lastError);
+ }
+ finishProcess(didFail == 0);
+ }
+ } catch (const std::runtime_error& ex) {
+ DWORD lastError = GetLastError();
+ std::cerr << "In function testPipe, line " << __LINE__ << ": " << ex.what()
+ << std::endl;
+ displayError(lastError);
+ }
+ finishPipe(inPipeRead, inPipeWrite);
+ finishPipe(outPipeRead, outPipeWrite);
+ finishPipe(errPipeRead, errPipeWrite);
+ SetConsoleCP(currentCodepage);
+ return didFail;
+}
+
+static int testFile()
+{
+ int didFail = 1;
+ HANDLE inFile = INVALID_HANDLE_VALUE;
+ HANDLE outFile = INVALID_HANDLE_VALUE;
+ HANDLE errFile = INVALID_HANDLE_VALUE;
+ try {
+ if ((inFile = createFile(L"stdinFile.txt")) == INVALID_HANDLE_VALUE ||
+ (outFile = createFile(L"stdoutFile.txt")) == INVALID_HANDLE_VALUE ||
+ (errFile = createFile(L"stderrFile.txt")) == INVALID_HANDLE_VALUE) {
+ throw std::runtime_error("createFile failed!");
+ }
+ DWORD bytesWritten = 0;
+ char buffer[200];
+ char buffer2[200];
+
+ int length;
+ if ((length = WideCharToMultiByte(TestCodepage, 0, UnicodeInputTestString,
+ -1, buffer, sizeof(buffer), nullptr,
+ nullptr)) == 0) {
+ throw std::runtime_error("WideCharToMultiByte failed!");
+ }
+ buffer[length - 1] = '\n';
+ if (!WriteFile(inFile, buffer, length, &bytesWritten, nullptr) ||
+ bytesWritten == 0) {
+ throw std::runtime_error("WriteFile failed!");
+ }
+ if (SetFilePointer(inFile, 0, 0, FILE_BEGIN) == INVALID_SET_FILE_POINTER) {
+ throw std::runtime_error("SetFilePointer failed!");
+ }
+
+ if (createProcess(inFile, outFile, errFile)) {
+ DWORD bytesRead = 0;
+ try {
+ DWORD status;
+ if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) !=
+ WAIT_OBJECT_0) {
+ std::cerr.setf(std::ios::hex, std::ios::basefield);
+ std::cerr << "WaitForSingleObject returned unexpected status 0x"
+ << status << std::endl;
+ std::cerr.unsetf(std::ios::hex);
+ throw std::runtime_error("WaitForSingleObject failed!");
+ }
+ if (SetFilePointer(outFile, 0, 0, FILE_BEGIN) ==
+ INVALID_SET_FILE_POINTER) {
+ throw std::runtime_error("SetFilePointer#1 failed!");
+ }
+ if (!ReadFile(outFile, buffer, sizeof(buffer), &bytesRead, nullptr) ||
+ bytesRead == 0) {
+ throw std::runtime_error("ReadFile#1 failed!");
+ }
+ buffer[bytesRead] = 0;
+ if (memcmp(buffer, encodedTestString.c_str(),
+ encodedTestString.size()) == 0 &&
+ memcmp(buffer + encodedTestString.size() + 1,
+ encodedInputTestString.c_str(),
+ encodedInputTestString.size()) == 0) {
+ bytesRead = 0;
+ if (SetFilePointer(errFile, 0, 0, FILE_BEGIN) ==
+ INVALID_SET_FILE_POINTER) {
+ throw std::runtime_error("SetFilePointer#2 failed!");
+ }
+
+ if (!ReadFile(errFile, buffer2, sizeof(buffer2), &bytesRead,
+ nullptr) ||
+ bytesRead == 0) {
+ throw std::runtime_error("ReadFile#2 failed!");
+ }
+ buffer2[bytesRead] = 0;
+ didFail = encodedTestString.compare(0, std::string::npos, buffer2,
+ encodedTestString.size()) == 0
+ ? 0
+ : 1;
+ }
+ if (didFail != 0) {
+ std::cerr << "File's output didn't match expected output!"
+ << std::endl;
+ dumpBuffers<char>(encodedTestString.c_str(), buffer,
+ encodedTestString.size());
+ dumpBuffers<char>(encodedInputTestString.c_str(),
+ buffer + encodedTestString.size() + 1,
+ encodedInputTestString.size());
+ dumpBuffers<char>(encodedTestString.c_str(), buffer2,
+ encodedTestString.size());
+ }
+ } catch (const std::runtime_error& ex) {
+ DWORD lastError = GetLastError();
+ std::cerr << "In function testFile, line " << __LINE__ << ": "
+ << ex.what() << std::endl;
+ displayError(lastError);
+ }
+ finishProcess(didFail == 0);
+ }
+ } catch (const std::runtime_error& ex) {
+ DWORD lastError = GetLastError();
+ std::cerr << "In function testFile, line " << __LINE__ << ": " << ex.what()
+ << std::endl;
+ displayError(lastError);
+ }
+ finishFile(inFile);
+ finishFile(outFile);
+ finishFile(errFile);
+ return didFail;
+}
+
+# ifndef _WIN32_WINNT_VISTA
+# define _WIN32_WINNT_VISTA 0x0600
+# endif
+
+static int testConsole()
+{
+ int didFail = 1;
+ HANDLE parentIn = GetStdHandle(STD_INPUT_HANDLE);
+ HANDLE parentOut = GetStdHandle(STD_OUTPUT_HANDLE);
+ HANDLE parentErr = GetStdHandle(STD_ERROR_HANDLE);
+ HANDLE hIn = parentIn;
+ HANDLE hOut = parentOut;
+ DWORD consoleMode;
+ bool newConsole = false;
+ bool forceNewConsole = false;
+ bool restoreConsole = false;
+ LPCWSTR TestFaceName = L"Lucida Console";
+ const DWORD TestFontFamily = 0x00000036;
+ const DWORD TestFontSize = 0x000c0000;
+ HKEY hConsoleKey;
+ WCHAR FaceName[200];
+ FaceName[0] = 0;
+ DWORD FaceNameSize = sizeof(FaceName);
+ DWORD FontFamily = TestFontFamily;
+ DWORD FontSize = TestFontSize;
+# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersion
+# pragma warning(push)
+# ifdef __INTEL_COMPILER
+# pragma warning(disable : 1478)
+# elif defined __clang__
+# pragma clang diagnostic push
+# pragma clang diagnostic ignored "-Wdeprecated-declarations"
+# else
+# pragma warning(disable : 4996)
+# endif
+# endif
+ const bool isVistaOrGreater =
+ LOBYTE(LOWORD(GetVersion())) >= HIBYTE(_WIN32_WINNT_VISTA);
+# ifdef KWSYS_WINDOWS_DEPRECATED_GetVersion
+# ifdef __clang__
+# pragma clang diagnostic pop
+# else
+# pragma warning(pop)
+# endif
+# endif
+ if (!isVistaOrGreater) {
+ if (RegOpenKeyExW(HKEY_CURRENT_USER, L"Console", 0, KEY_READ | KEY_WRITE,
+ &hConsoleKey) == ERROR_SUCCESS) {
+ DWORD dwordSize = sizeof(DWORD);
+ if (RegQueryValueExW(hConsoleKey, L"FontFamily", nullptr, nullptr,
+ (LPBYTE)&FontFamily, &dwordSize) == ERROR_SUCCESS) {
+ if (FontFamily != TestFontFamily) {
+ RegQueryValueExW(hConsoleKey, L"FaceName", nullptr, nullptr,
+ (LPBYTE)FaceName, &FaceNameSize);
+ RegQueryValueExW(hConsoleKey, L"FontSize", nullptr, nullptr,
+ (LPBYTE)&FontSize, &dwordSize);
+
+ RegSetValueExW(hConsoleKey, L"FontFamily", 0, REG_DWORD,
+ (BYTE*)&TestFontFamily, sizeof(TestFontFamily));
+ RegSetValueExW(hConsoleKey, L"FaceName", 0, REG_SZ,
+ (BYTE*)TestFaceName,
+ (DWORD)((wcslen(TestFaceName) + 1) * sizeof(WCHAR)));
+ RegSetValueExW(hConsoleKey, L"FontSize", 0, REG_DWORD,
+ (BYTE*)&TestFontSize, sizeof(TestFontSize));
+
+ restoreConsole = true;
+ forceNewConsole = true;
+ }
+ } else {
+ std::cerr << "RegGetValueW(FontFamily) failed!" << std::endl;
+ }
+ RegCloseKey(hConsoleKey);
+ } else {
+ std::cerr << "RegOpenKeyExW(HKEY_CURRENT_USER\\Console) failed!"
+ << std::endl;
+ }
+ }
+ if (forceNewConsole || GetConsoleMode(parentOut, &consoleMode) == 0) {
+ // Not a real console, let's create new one.
+ FreeConsole();
+ if (!AllocConsole()) {
+ std::cerr << "AllocConsole failed!" << std::endl;
+ return didFail;
+ }
+ SECURITY_ATTRIBUTES securityAttributes;
+ securityAttributes.nLength = sizeof(SECURITY_ATTRIBUTES);
+ securityAttributes.bInheritHandle = TRUE;
+ securityAttributes.lpSecurityDescriptor = nullptr;
+ hIn = CreateFileW(L"CONIN$", GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, &securityAttributes,
+ OPEN_EXISTING, 0, nullptr);
+ if (hIn == INVALID_HANDLE_VALUE) {
+ DWORD lastError = GetLastError();
+ std::cerr << "CreateFile(CONIN$)" << std::endl;
+ displayError(lastError);
+ }
+ hOut = CreateFileW(L"CONOUT$", GENERIC_READ | GENERIC_WRITE,
+ FILE_SHARE_READ | FILE_SHARE_WRITE, &securityAttributes,
+ OPEN_EXISTING, 0, nullptr);
+ if (hOut == INVALID_HANDLE_VALUE) {
+ DWORD lastError = GetLastError();
+ std::cerr << "CreateFile(CONOUT$)" << std::endl;
+ displayError(lastError);
+ }
+ SetStdHandle(STD_INPUT_HANDLE, hIn);
+ SetStdHandle(STD_OUTPUT_HANDLE, hOut);
+ SetStdHandle(STD_ERROR_HANDLE, hOut);
+ newConsole = true;
+ }
+
+# if _WIN32_WINNT >= _WIN32_WINNT_VISTA
+ if (isVistaOrGreater) {
+ CONSOLE_FONT_INFOEX consoleFont;
+ memset(&consoleFont, 0, sizeof(consoleFont));
+ consoleFont.cbSize = sizeof(consoleFont);
+ HMODULE kernel32 = LoadLibraryW(L"kernel32.dll");
+ typedef BOOL(WINAPI * GetCurrentConsoleFontExFunc)(
+ HANDLE hConsoleOutput, BOOL bMaximumWindow,
+ PCONSOLE_FONT_INFOEX lpConsoleCurrentFontEx);
+ typedef BOOL(WINAPI * SetCurrentConsoleFontExFunc)(
+ HANDLE hConsoleOutput, BOOL bMaximumWindow,
+ PCONSOLE_FONT_INFOEX lpConsoleCurrentFontEx);
+ GetCurrentConsoleFontExFunc getConsoleFont =
+ (GetCurrentConsoleFontExFunc)GetProcAddress(kernel32,
+ "GetCurrentConsoleFontEx");
+ SetCurrentConsoleFontExFunc setConsoleFont =
+ (SetCurrentConsoleFontExFunc)GetProcAddress(kernel32,
+ "SetCurrentConsoleFontEx");
+ if (getConsoleFont(hOut, FALSE, &consoleFont)) {
+ if (consoleFont.FontFamily != TestFontFamily) {
+ consoleFont.FontFamily = TestFontFamily;
+ wcscpy(consoleFont.FaceName, TestFaceName);
+ if (!setConsoleFont(hOut, FALSE, &consoleFont)) {
+ std::cerr << "SetCurrentConsoleFontEx failed!" << std::endl;
+ }
+ }
+ } else {
+ std::cerr << "GetCurrentConsoleFontEx failed!" << std::endl;
+ }
+ } else {
+# endif
+ if (restoreConsole &&
+ RegOpenKeyExW(HKEY_CURRENT_USER, L"Console", 0, KEY_WRITE,
+ &hConsoleKey) == ERROR_SUCCESS) {
+ RegSetValueExW(hConsoleKey, L"FontFamily", 0, REG_DWORD,
+ (BYTE*)&FontFamily, sizeof(FontFamily));
+ if (FaceName[0] != 0) {
+ RegSetValueExW(hConsoleKey, L"FaceName", 0, REG_SZ, (BYTE*)FaceName,
+ FaceNameSize);
+ } else {
+ RegDeleteValueW(hConsoleKey, L"FaceName");
+ }
+ RegSetValueExW(hConsoleKey, L"FontSize", 0, REG_DWORD, (BYTE*)&FontSize,
+ sizeof(FontSize));
+ RegCloseKey(hConsoleKey);
+ }
+# if _WIN32_WINNT >= _WIN32_WINNT_VISTA
+ }
+# endif
+
+ if (createProcess(nullptr, nullptr, nullptr)) {
+ try {
+ DWORD status;
+ if ((status = WaitForSingleObject(beforeInputEvent, waitTimeout)) !=
+ WAIT_OBJECT_0) {
+ std::cerr.setf(std::ios::hex, std::ios::basefield);
+ std::cerr << "WaitForSingleObject returned unexpected status 0x"
+ << status << std::endl;
+ std::cerr.unsetf(std::ios::hex);
+ throw std::runtime_error("WaitForSingleObject#1 failed!");
+ }
+ INPUT_RECORD inputBuffer[(sizeof(UnicodeInputTestString) /
+ sizeof(UnicodeInputTestString[0])) *
+ 2];
+ memset(&inputBuffer, 0, sizeof(inputBuffer));
+ unsigned int i;
+ for (i = 0; i < (sizeof(UnicodeInputTestString) /
+ sizeof(UnicodeInputTestString[0]) -
+ 1);
+ i++) {
+ writeInputKeyEvent(&inputBuffer[i * 2], UnicodeInputTestString[i]);
+ }
+ writeInputKeyEvent(&inputBuffer[i * 2], VK_RETURN);
+ DWORD eventsWritten = 0;
+ // We need to wait a bit before writing to console so child process have
+ // started waiting for input on stdin.
+ Sleep(300);
+ if (!WriteConsoleInputW(hIn, inputBuffer,
+ sizeof(inputBuffer) / sizeof(inputBuffer[0]),
+ &eventsWritten) ||
+ eventsWritten == 0) {
+ throw std::runtime_error("WriteConsoleInput failed!");
+ }
+ if ((status = WaitForSingleObject(afterOutputEvent, waitTimeout)) !=
+ WAIT_OBJECT_0) {
+ std::cerr.setf(std::ios::hex, std::ios::basefield);
+ std::cerr << "WaitForSingleObject returned unexpected status 0x"
+ << status << std::endl;
+ std::cerr.unsetf(std::ios::hex);
+ throw std::runtime_error("WaitForSingleObject#2 failed!");
+ }
+ CONSOLE_SCREEN_BUFFER_INFO screenBufferInfo;
+ if (!GetConsoleScreenBufferInfo(hOut, &screenBufferInfo)) {
+ throw std::runtime_error("GetConsoleScreenBufferInfo failed!");
+ }
+
+ COORD coord;
+ DWORD charsRead = 0;
+ coord.X = 0;
+ coord.Y = screenBufferInfo.dwCursorPosition.Y - 4;
+ WCHAR* outputBuffer = new WCHAR[screenBufferInfo.dwSize.X * 4];
+ if (!ReadConsoleOutputCharacterW(hOut, outputBuffer,
+ screenBufferInfo.dwSize.X * 4, coord,
+ &charsRead) ||
+ charsRead == 0) {
+ delete[] outputBuffer;
+ throw std::runtime_error("ReadConsoleOutputCharacter failed!");
+ }
+ std::wstring wideTestString = kwsys::Encoding::ToWide(encodedTestString);
+ std::replace(wideTestString.begin(), wideTestString.end(), '\0', ' ');
+ std::wstring wideInputTestString =
+ kwsys::Encoding::ToWide(encodedInputTestString);
+ if (memcmp(outputBuffer, wideTestString.c_str(),
+ wideTestString.size() * sizeof(wchar_t)) == 0 &&
+ memcmp(outputBuffer + screenBufferInfo.dwSize.X * 1,
+ wideTestString.c_str(),
+ wideTestString.size() * sizeof(wchar_t)) == 0 &&
+ memcmp(outputBuffer + screenBufferInfo.dwSize.X * 2,
+ UnicodeInputTestString,
+ sizeof(UnicodeInputTestString) - sizeof(WCHAR)) == 0 &&
+ memcmp(outputBuffer + screenBufferInfo.dwSize.X * 3,
+ wideInputTestString.c_str(),
+ (wideInputTestString.size() - 1) * sizeof(wchar_t)) == 0) {
+ didFail = 0;
+ } else {
+ std::cerr << "Console's output didn't match expected output!"
+ << std::endl;
+ dumpBuffers<wchar_t>(wideTestString.c_str(), outputBuffer,
+ wideTestString.size());
+ dumpBuffers<wchar_t>(wideTestString.c_str(),
+ outputBuffer + screenBufferInfo.dwSize.X * 1,
+ wideTestString.size());
+ dumpBuffers<wchar_t>(
+ UnicodeInputTestString, outputBuffer + screenBufferInfo.dwSize.X * 2,
+ (sizeof(UnicodeInputTestString) - 1) / sizeof(WCHAR));
+ dumpBuffers<wchar_t>(wideInputTestString.c_str(),
+ outputBuffer + screenBufferInfo.dwSize.X * 3,
+ wideInputTestString.size() - 1);
+ }
+ delete[] outputBuffer;
+ } catch (const std::runtime_error& ex) {
+ DWORD lastError = GetLastError();
+ std::cerr << "In function testConsole, line " << __LINE__ << ": "
+ << ex.what() << std::endl;
+ displayError(lastError);
+ }
+ finishProcess(didFail == 0);
+ }
+ if (newConsole) {
+ SetStdHandle(STD_INPUT_HANDLE, parentIn);
+ SetStdHandle(STD_OUTPUT_HANDLE, parentOut);
+ SetStdHandle(STD_ERROR_HANDLE, parentErr);
+ CloseHandle(hIn);
+ CloseHandle(hOut);
+ FreeConsole();
+ }
+ return didFail;
+}
+
+#endif
+
+int testConsoleBuf(int, char* [])
+{
+ int ret = 0;
+
+#if defined(_WIN32)
+ beforeInputEvent = CreateEventW(nullptr,
+ FALSE, // auto-reset event
+ FALSE, // initial state is nonsignaled
+ BeforeInputEventName); // object name
+ if (!beforeInputEvent) {
+ std::cerr << "CreateEvent#1 failed " << GetLastError() << std::endl;
+ return 1;
+ }
+
+ afterOutputEvent = CreateEventW(nullptr, FALSE, FALSE, AfterOutputEventName);
+ if (!afterOutputEvent) {
+ std::cerr << "CreateEvent#2 failed " << GetLastError() << std::endl;
+ return 1;
+ }
+
+ encodedTestString = kwsys::Encoding::ToNarrow(std::wstring(
+ UnicodeTestString, sizeof(UnicodeTestString) / sizeof(wchar_t) - 1));
+ encodedInputTestString = kwsys::Encoding::ToNarrow(
+ std::wstring(UnicodeInputTestString,
+ sizeof(UnicodeInputTestString) / sizeof(wchar_t) - 1));
+ encodedInputTestString += "\n";
+
+ ret |= testPipe();
+ ret |= testFile();
+ ret |= testConsole();
+
+ CloseHandle(beforeInputEvent);
+ CloseHandle(afterOutputEvent);
+#endif
+
+ return ret;
+}
diff --git a/test/API/driver/kwsys/testConsoleBuf.hxx b/test/API/driver/kwsys/testConsoleBuf.hxx
new file mode 100644
index 0000000..e93cb4f
--- /dev/null
+++ b/test/API/driver/kwsys/testConsoleBuf.hxx
@@ -0,0 +1,17 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef testConsoleBuf_hxx
+#define testConsoleBuf_hxx
+
+static const wchar_t cmdConsoleBufChild[] = L"testConsoleBufChild";
+
+static const wchar_t BeforeInputEventName[] = L"BeforeInputEvent";
+static const wchar_t AfterOutputEventName[] = L"AfterOutputEvent";
+
+// यूनिकोड είναι здорово!
+static const wchar_t UnicodeTestString[] =
+ L"\u092F\u0942\u0928\u093F\u0915\u094B\u0921 "
+ L"\u03B5\u03AF\u03BD\0\u03B1\u03B9 "
+ L"\u0437\u0434\u043E\u0440\u043E\u0432\u043E!";
+
+#endif
diff --git a/test/API/driver/kwsys/testConsoleBufChild.cxx b/test/API/driver/kwsys/testConsoleBufChild.cxx
new file mode 100644
index 0000000..3c8fdc2
--- /dev/null
+++ b/test/API/driver/kwsys/testConsoleBufChild.cxx
@@ -0,0 +1,55 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+
+#include KWSYS_HEADER(ConsoleBuf.hxx)
+#include KWSYS_HEADER(Encoding.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "ConsoleBuf.hxx.in"
+# include "Encoding.hxx.in"
+#endif
+
+#include <iostream>
+
+#include "testConsoleBuf.hxx"
+
+int main(int argc, const char* argv[])
+{
+#if defined(_WIN32)
+ kwsys::ConsoleBuf::Manager out(std::cout);
+ kwsys::ConsoleBuf::Manager err(std::cerr, true);
+ kwsys::ConsoleBuf::Manager in(std::cin);
+
+ if (argc > 1) {
+ std::cout << argv[1] << std::endl;
+ std::cerr << argv[1] << std::endl;
+ } else {
+ std::string str = kwsys::Encoding::ToNarrow(std::wstring(
+ UnicodeTestString, sizeof(UnicodeTestString) / sizeof(wchar_t) - 1));
+ std::cout << str << std::endl;
+ std::cerr << str << std::endl;
+ }
+
+ std::string input;
+ HANDLE event = OpenEventW(EVENT_MODIFY_STATE, FALSE, BeforeInputEventName);
+ if (event) {
+ SetEvent(event);
+ CloseHandle(event);
+ }
+
+ std::cin >> input;
+ std::cout << input << std::endl;
+ event = OpenEventW(EVENT_MODIFY_STATE, FALSE, AfterOutputEventName);
+ if (event) {
+ SetEvent(event);
+ CloseHandle(event);
+ }
+#else
+ static_cast<void>(argc);
+ static_cast<void>(argv);
+#endif
+ return 0;
+}
diff --git a/test/API/driver/kwsys/testDirectory.cxx b/test/API/driver/kwsys/testDirectory.cxx
new file mode 100644
index 0000000..b1ab0c8
--- /dev/null
+++ b/test/API/driver/kwsys/testDirectory.cxx
@@ -0,0 +1,110 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Directory.hxx)
+#include KWSYS_HEADER(Encoding.hxx)
+#include KWSYS_HEADER(SystemTools.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Directory.hxx.in"
+# include "Encoding.hxx.in"
+# include "SystemTools.hxx.in"
+#endif
+
+#include <fstream>
+#include <iostream>
+#include <sstream>
+
+#include <testSystemTools.h>
+
+int _doLongPathTest()
+{
+ using namespace kwsys;
+ static const int LONG_PATH_THRESHOLD = 512;
+ int res = 0;
+ std::string topdir(TEST_SYSTEMTOOLS_BINARY_DIR "/directory_testing/");
+ std::stringstream testpathstrm;
+ std::string testdirpath;
+ std::string extendedtestdirpath;
+
+ testpathstrm << topdir;
+ size_t pathlen = testpathstrm.str().length();
+ testpathstrm.seekp(0, std::ios_base::end);
+ while (pathlen < LONG_PATH_THRESHOLD) {
+ testpathstrm << "0123456789/";
+ pathlen = testpathstrm.str().length();
+ }
+
+ testdirpath = testpathstrm.str();
+#ifdef _WIN32
+ extendedtestdirpath =
+ Encoding::ToNarrow(SystemTools::ConvertToWindowsExtendedPath(testdirpath));
+#else
+ extendedtestdirpath = testdirpath;
+#endif
+
+ if (SystemTools::MakeDirectory(extendedtestdirpath)) {
+ std::ofstream testfile1(
+ (extendedtestdirpath + "longfilepathtest1.txt").c_str());
+ std::ofstream testfile2(
+ (extendedtestdirpath + "longfilepathtest2.txt").c_str());
+ testfile1 << "foo";
+ testfile2 << "bar";
+ testfile1.close();
+ testfile2.close();
+
+ Directory testdir;
+ // Set res to failure if the directory doesn't load
+ res += !testdir.Load(testdirpath);
+ // Increment res failure if the directory appears empty
+ res += testdir.GetNumberOfFiles() == 0;
+ // Increment res failures if the path has changed from
+ // what was provided.
+ res += testdirpath != testdir.GetPath();
+
+ SystemTools::RemoveADirectory(topdir);
+ } else {
+ std::cerr << "Failed to create directory with long path: "
+ << extendedtestdirpath << std::endl;
+ res += 1;
+ }
+ return res;
+}
+
+int _copyDirectoryTest()
+{
+ using namespace kwsys;
+ const std::string source(TEST_SYSTEMTOOLS_BINARY_DIR
+ "/directory_testing/copyDirectoryTestSrc");
+ if (SystemTools::PathExists(source)) {
+ std::cerr << source << " shouldn't exist before test" << std::endl;
+ return 1;
+ }
+ const std::string destination(TEST_SYSTEMTOOLS_BINARY_DIR
+ "/directory_testing/copyDirectoryTestDst");
+ if (SystemTools::PathExists(destination)) {
+ std::cerr << destination << " shouldn't exist before test" << std::endl;
+ return 2;
+ }
+ const bool copysuccess = SystemTools::CopyADirectory(source, destination);
+ const bool destinationexists = SystemTools::PathExists(destination);
+ if (copysuccess) {
+ std::cerr << "CopyADirectory should have returned false" << std::endl;
+ SystemTools::RemoveADirectory(destination);
+ return 3;
+ }
+ if (destinationexists) {
+ std::cerr << "CopyADirectory returned false, but destination directory"
+ << " has been created" << std::endl;
+ SystemTools::RemoveADirectory(destination);
+ return 4;
+ }
+ return 0;
+}
+
+int testDirectory(int, char* [])
+{
+ return _doLongPathTest() + _copyDirectoryTest();
+}
diff --git a/test/API/driver/kwsys/testDynamicLoader.cxx b/test/API/driver/kwsys/testDynamicLoader.cxx
new file mode 100644
index 0000000..2421ac0
--- /dev/null
+++ b/test/API/driver/kwsys/testDynamicLoader.cxx
@@ -0,0 +1,133 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+
+#include KWSYS_HEADER(DynamicLoader.hxx)
+
+#if defined(__BEOS__) || defined(__HAIKU__)
+# include <be/kernel/OS.h> /* disable_debugger() API. */
+#endif
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "DynamicLoader.hxx.in"
+#endif
+
+#include <iostream>
+#include <string>
+
+// Include with <> instead of "" to avoid getting any in-source copy
+// left on disk.
+#include <testSystemTools.h>
+
+static std::string GetLibName(const char* lname, const char* subdir = nullptr)
+{
+ // Construct proper name of lib
+ std::string slname;
+ slname = EXECUTABLE_OUTPUT_PATH;
+ if (subdir) {
+ slname += "/";
+ slname += subdir;
+ }
+#ifdef CMAKE_INTDIR
+ slname += "/";
+ slname += CMAKE_INTDIR;
+#endif
+ slname += "/";
+ slname += kwsys::DynamicLoader::LibPrefix();
+ slname += lname;
+ slname += kwsys::DynamicLoader::LibExtension();
+
+ return slname;
+}
+
+/* libname = Library name (proper prefix, proper extension)
+ * System = symbol to lookup in libname
+ * r1: should OpenLibrary succeed ?
+ * r2: should GetSymbolAddress succeed ?
+ * r3: should CloseLibrary succeed ?
+ */
+static int TestDynamicLoader(const char* libname, const char* symbol, int r1,
+ int r2, int r3, int flags = 0)
+{
+ std::cerr << "Testing: " << libname << std::endl;
+ kwsys::DynamicLoader::LibraryHandle l =
+ kwsys::DynamicLoader::OpenLibrary(libname, flags);
+ // If result is incompatible with expectation just fails (xor):
+ if ((r1 && !l) || (!r1 && l)) {
+ std::cerr << "OpenLibrary: " << kwsys::DynamicLoader::LastError()
+ << std::endl;
+ return 1;
+ }
+ kwsys::DynamicLoader::SymbolPointer f =
+ kwsys::DynamicLoader::GetSymbolAddress(l, symbol);
+ if ((r2 && !f) || (!r2 && f)) {
+ std::cerr << "GetSymbolAddress: " << kwsys::DynamicLoader::LastError()
+ << std::endl;
+ return 1;
+ }
+#ifndef __APPLE__
+ int s = kwsys::DynamicLoader::CloseLibrary(l);
+ if ((r3 && !s) || (!r3 && s)) {
+ std::cerr << "CloseLibrary: " << kwsys::DynamicLoader::LastError()
+ << std::endl;
+ return 1;
+ }
+#else
+ (void)r3;
+#endif
+ return 0;
+}
+
+int testDynamicLoader(int argc, char* argv[])
+{
+#if defined(_WIN32)
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
+#elif defined(__BEOS__) || defined(__HAIKU__)
+ disable_debugger(1);
+#endif
+ int res = 0;
+ if (argc == 3) {
+ // User specify a libname and symbol to check.
+ res = TestDynamicLoader(argv[1], argv[2], 1, 1, 1);
+ return res;
+ }
+
+// dlopen() on Syllable before 11/22/2007 doesn't return 0 on error
+#ifndef __SYLLABLE__
+ // Make sure that inexistent lib is giving correct result
+ res += TestDynamicLoader("azerty_", "foo_bar", 0, 0, 0);
+ // Make sure that random binary file cannot be assimilated as dylib
+ res += TestDynamicLoader(TEST_SYSTEMTOOLS_SOURCE_DIR "/testSystemTools.bin",
+ "wp", 0, 0, 0);
+#endif
+
+#ifdef __linux__
+ // This one is actually fun to test, since dlopen is by default
+ // loaded...wonder why :)
+ res += TestDynamicLoader("foobar.lib", "dlopen", 0, 1, 0);
+ res += TestDynamicLoader("libdl.so", "dlopen", 1, 1, 1);
+ res += TestDynamicLoader("libdl.so", "TestDynamicLoader", 1, 0, 1);
+#endif
+ // Now try on the generated library
+ std::string libname = GetLibName(KWSYS_NAMESPACE_STRING "TestDynload");
+ res += TestDynamicLoader(libname.c_str(), "dummy", 1, 0, 1);
+ res += TestDynamicLoader(libname.c_str(), "TestDynamicLoaderSymbolPointer",
+ 1, 1, 1);
+ res += TestDynamicLoader(libname.c_str(), "_TestDynamicLoaderSymbolPointer",
+ 1, 0, 1);
+ res += TestDynamicLoader(libname.c_str(), "TestDynamicLoaderData", 1, 1, 1);
+ res += TestDynamicLoader(libname.c_str(), "_TestDynamicLoaderData", 1, 0, 1);
+
+#ifdef _WIN32
+ libname = GetLibName(KWSYS_NAMESPACE_STRING "TestDynloadUse", "dynloaddir");
+ res += TestDynamicLoader(libname.c_str(), "dummy", 0, 0, 0);
+ res += TestDynamicLoader(libname.c_str(), "TestLoad", 1, 1, 1,
+ kwsys::DynamicLoader::SearchBesideLibrary);
+ res += TestDynamicLoader(libname.c_str(), "_TestLoad", 1, 0, 1,
+ kwsys::DynamicLoader::SearchBesideLibrary);
+#endif
+
+ return res;
+}
diff --git a/test/API/driver/kwsys/testDynload.c b/test/API/driver/kwsys/testDynload.c
new file mode 100644
index 0000000..c49f747
--- /dev/null
+++ b/test/API/driver/kwsys/testDynload.c
@@ -0,0 +1,13 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifdef _WIN32
+# define DL_EXPORT __declspec(dllexport)
+#else
+# define DL_EXPORT
+#endif
+
+DL_EXPORT int TestDynamicLoaderData = 0;
+
+DL_EXPORT void TestDynamicLoaderSymbolPointer()
+{
+}
diff --git a/test/API/driver/kwsys/testDynloadImpl.c b/test/API/driver/kwsys/testDynloadImpl.c
new file mode 100644
index 0000000..2b9069b
--- /dev/null
+++ b/test/API/driver/kwsys/testDynloadImpl.c
@@ -0,0 +1,10 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+
+#include "testDynloadImpl.h"
+
+int TestDynamicLoaderImplData = 0;
+
+void TestDynamicLoaderImplSymbolPointer()
+{
+}
diff --git a/test/API/driver/kwsys/testDynloadImpl.h b/test/API/driver/kwsys/testDynloadImpl.h
new file mode 100644
index 0000000..d0c9dfb
--- /dev/null
+++ b/test/API/driver/kwsys/testDynloadImpl.h
@@ -0,0 +1,15 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifdef _WIN32
+# ifdef BUILDING_TestDynloadImpl
+# define DLIMPL_EXPORT __declspec(dllexport)
+# else
+# define DLIMPL_EXPORT __declspec(dllimport)
+# endif
+#else
+# define DLIMPL_EXPORT
+#endif
+
+DLIMPL_EXPORT int TestDynamicLoaderImplData;
+
+DLIMPL_EXPORT void TestDynamicLoaderImplSymbolPointer();
diff --git a/test/API/driver/kwsys/testDynloadUse.c b/test/API/driver/kwsys/testDynloadUse.c
new file mode 100644
index 0000000..5402add
--- /dev/null
+++ b/test/API/driver/kwsys/testDynloadUse.c
@@ -0,0 +1,15 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "testDynloadImpl.h"
+
+#ifdef _WIN32
+# define DL_EXPORT __declspec(dllexport)
+#else
+# define DL_EXPORT
+#endif
+
+DL_EXPORT int TestLoad()
+{
+ TestDynamicLoaderImplSymbolPointer();
+ return TestDynamicLoaderImplData;
+}
diff --git a/test/API/driver/kwsys/testEncode.c b/test/API/driver/kwsys/testEncode.c
new file mode 100644
index 0000000..b7b6dd8
--- /dev/null
+++ b/test/API/driver/kwsys/testEncode.c
@@ -0,0 +1,67 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(MD5.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "MD5.h.in"
+#endif
+
+#include <stdio.h>
+#include <string.h>
+
+static const unsigned char testMD5input1[] =
+ " A quick brown fox jumps over the lazy dog.\n"
+ " This is sample text for MD5 sum input.\n";
+static const char testMD5output1[] = "8f146af46ed4f267921bb937d4d3500c";
+
+static const int testMD5input2len = 28;
+static const unsigned char testMD5input2[] = "the cow jumped over the moon";
+static const char testMD5output2[] = "a2ad137b746138fae4e5adca9c85d3ae";
+
+static int testMD5_1(kwsysMD5* md5)
+{
+ char md5out[33];
+ kwsysMD5_Initialize(md5);
+ kwsysMD5_Append(md5, testMD5input1, -1);
+ kwsysMD5_FinalizeHex(md5, md5out);
+ md5out[32] = 0;
+ printf("md5sum 1: expected [%s]\n"
+ " got [%s]\n",
+ testMD5output1, md5out);
+ return (strcmp(md5out, testMD5output1) != 0) ? 1 : 0;
+}
+
+static int testMD5_2(kwsysMD5* md5)
+{
+ unsigned char digest[16];
+ char md5out[33];
+ kwsysMD5_Initialize(md5);
+ kwsysMD5_Append(md5, testMD5input2, testMD5input2len);
+ kwsysMD5_Finalize(md5, digest);
+ kwsysMD5_DigestToHex(digest, md5out);
+ md5out[32] = 0;
+ printf("md5sum 2: expected [%s]\n"
+ " got [%s]\n",
+ testMD5output2, md5out);
+ return (strcmp(md5out, testMD5output2) != 0) ? 1 : 0;
+}
+
+int testEncode(int argc, char* argv[])
+{
+ int result = 0;
+ (void)argc;
+ (void)argv;
+
+ /* Test MD5 digest. */
+ {
+ kwsysMD5* md5 = kwsysMD5_New();
+ result |= testMD5_1(md5);
+ result |= testMD5_2(md5);
+ kwsysMD5_Delete(md5);
+ }
+
+ return result;
+}
diff --git a/test/API/driver/kwsys/testEncoding.cxx b/test/API/driver/kwsys/testEncoding.cxx
new file mode 100644
index 0000000..988697b
--- /dev/null
+++ b/test/API/driver/kwsys/testEncoding.cxx
@@ -0,0 +1,286 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4786)
+#endif
+
+#include KWSYS_HEADER(Encoding.hxx)
+#include KWSYS_HEADER(Encoding.h)
+
+#include <algorithm>
+#include <iostream>
+#include <locale.h>
+#include <stdlib.h>
+#include <string.h>
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "Encoding.h.in"
+# include "Encoding.hxx.in"
+#endif
+
+static const unsigned char helloWorldStrings[][32] = {
+ // English
+ { 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd', 0 },
+ // Japanese
+ { 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x93, 0xE3, 0x81, 0xAB, 0xE3, 0x81,
+ 0xA1, 0xE3, 0x81, 0xAF, 0xE4, 0xB8, 0x96, 0xE7, 0x95, 0x8C, 0 },
+ // Arabic
+ { 0xD9, 0x85, 0xD8, 0xB1, 0xD8, 0xAD, 0xD8, 0xA8, 0xD8, 0xA7, 0x20, 0xD8,
+ 0xA7, 0xD9, 0x84, 0xD8, 0xB9, 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x85, 0 },
+ // Yiddish
+ { 0xD7, 0x94, 0xD7, 0xA2, 0xD7, 0x9C, 0xD7, 0x90, 0x20, 0xD7,
+ 0x95, 0xD7, 0x95, 0xD7, 0xA2, 0xD7, 0x9C, 0xD7, 0x98, 0 },
+ // Russian
+ { 0xD0, 0xBF, 0xD1, 0x80, 0xD0, 0xB8, 0xD0, 0xB2, 0xD0, 0xB5,
+ 0xD1, 0x82, 0x20, 0xD0, 0xBC, 0xD0, 0xB8, 0xD1, 0x80, 0 },
+ // Latin
+ { 0x4D, 0x75, 0x6E, 0x64, 0x75, 0x73, 0x20, 0x73, 0x61, 0x6C, 0x76, 0x65,
+ 0 },
+ // Swahili
+ { 0x68, 0x75, 0x6A, 0x61, 0x6D, 0x62, 0x6F, 0x20, 0x44, 0x75, 0x6E, 0x69,
+ 0x61, 0 },
+ // Icelandic
+ { 0x48, 0x61, 0x6C, 0x6C, 0xC3, 0xB3, 0x20, 0x68, 0x65, 0x69, 0x6D, 0x75,
+ 0x72, 0 },
+ { 0 }
+};
+
+static int testHelloWorldEncoding()
+{
+ int ret = 0;
+ for (int i = 0; helloWorldStrings[i][0] != 0; i++) {
+ std::string str = reinterpret_cast<const char*>(helloWorldStrings[i]);
+ std::cout << str << std::endl;
+ std::wstring wstr = kwsys::Encoding::ToWide(str);
+ std::string str2 = kwsys::Encoding::ToNarrow(wstr);
+ wchar_t* c_wstr = kwsysEncoding_DupToWide(str.c_str());
+ char* c_str2 = kwsysEncoding_DupToNarrow(c_wstr);
+ if (!wstr.empty() && (str != str2 || strcmp(c_str2, str.c_str()))) {
+ std::cout << "converted string was different: " << str2 << std::endl;
+ std::cout << "converted string was different: " << c_str2 << std::endl;
+ ret++;
+ }
+ free(c_wstr);
+ free(c_str2);
+ }
+ return ret;
+}
+
+static int testRobustEncoding()
+{
+ // test that the conversion functions handle invalid
+ // unicode correctly/gracefully
+
+ // we manipulate the format flags of stdout, remember
+ // the original state here to restore before return
+ std::ios::fmtflags const& flags = std::cout.flags();
+
+ int ret = 0;
+ char cstr[] = { (char)-1, 0 };
+ // this conversion could fail
+ std::wstring wstr = kwsys::Encoding::ToWide(cstr);
+
+ wstr = kwsys::Encoding::ToWide(nullptr);
+ if (wstr != L"") {
+ const wchar_t* wcstr = wstr.c_str();
+ std::cout << "ToWide(NULL) returned";
+ for (size_t i = 0; i < wstr.size(); i++) {
+ std::cout << " " << std::hex << (int)wcstr[i];
+ }
+ std::cout << std::endl;
+ ret++;
+ }
+ wstr = kwsys::Encoding::ToWide("");
+ if (wstr != L"") {
+ const wchar_t* wcstr = wstr.c_str();
+ std::cout << "ToWide(\"\") returned";
+ for (size_t i = 0; i < wstr.size(); i++) {
+ std::cout << " " << std::hex << (int)wcstr[i];
+ }
+ std::cout << std::endl;
+ ret++;
+ }
+
+#ifdef _WIN32
+ // 16 bit wchar_t - we make an invalid surrogate pair
+ wchar_t cwstr[] = { 0xD801, 0xDA00, 0 };
+ // this conversion could fail
+ std::string win_str = kwsys::Encoding::ToNarrow(cwstr);
+#endif
+
+ std::string str = kwsys::Encoding::ToNarrow(nullptr);
+ if (str != "") {
+ std::cout << "ToNarrow(NULL) returned " << str << std::endl;
+ ret++;
+ }
+
+ str = kwsys::Encoding::ToNarrow(L"");
+ if (wstr != L"") {
+ std::cout << "ToNarrow(\"\") returned " << str << std::endl;
+ ret++;
+ }
+
+ std::cout.flags(flags);
+ return ret;
+}
+
+static int testWithNulls()
+{
+ int ret = 0;
+ std::vector<std::string> strings;
+ strings.push_back(std::string("ab") + '\0' + 'c');
+ strings.push_back(std::string("d") + '\0' + '\0' + 'e');
+ strings.push_back(std::string() + '\0' + 'f');
+ strings.push_back(std::string() + '\0' + '\0' + "gh");
+ strings.push_back(std::string("ij") + '\0');
+ strings.push_back(std::string("k") + '\0' + '\0');
+ strings.push_back(std::string("\0\0\0\0", 4) + "lmn" +
+ std::string("\0\0\0\0", 4));
+ for (std::vector<std::string>::iterator it = strings.begin();
+ it != strings.end(); ++it) {
+ std::wstring wstr = kwsys::Encoding::ToWide(*it);
+ std::string str = kwsys::Encoding::ToNarrow(wstr);
+ std::string s(*it);
+ std::replace(s.begin(), s.end(), '\0', ' ');
+ std::cout << "'" << s << "' (" << it->size() << ")" << std::endl;
+ if (str != *it) {
+ std::replace(str.begin(), str.end(), '\0', ' ');
+ std::cout << "string with null was different: '" << str << "' ("
+ << str.size() << ")" << std::endl;
+ ret++;
+ }
+ }
+ return ret;
+}
+
+static int testCommandLineArguments()
+{
+ int status = 0;
+
+ char const* argv[2] = { "./app.exe", (char const*)helloWorldStrings[1] };
+
+ kwsys::Encoding::CommandLineArguments args(2, argv);
+ kwsys::Encoding::CommandLineArguments arg2 =
+ kwsys::Encoding::CommandLineArguments(args);
+
+ char const* const* u8_argv = args.argv();
+ for (int i = 0; i < args.argc(); i++) {
+ char const* u8_arg = u8_argv[i];
+ if (strcmp(argv[i], u8_arg) != 0) {
+ std::cout << "argv[" << i << "] " << argv[i] << " != " << u8_arg
+ << std::endl;
+ status++;
+ }
+ }
+
+ kwsys::Encoding::CommandLineArguments args3 =
+ kwsys::Encoding::CommandLineArguments::Main(2, argv);
+
+ return status;
+}
+
+static int testToWindowsExtendedPath()
+{
+#ifdef _WIN32
+ int ret = 0;
+ if (kwsys::Encoding::ToWindowsExtendedPath(
+ "L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") !=
+ L"\\\\?\\L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\""
+ << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath(
+ "L:/Local Mojo/Hex Power Pack/Iffy Voodoo") !=
+ L"\\\\?\\L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"L:/Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath(
+ "\\\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") !=
+ L"\\\\?\\UNC\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"\\\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\""
+ << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath(
+ "//Foo/Local Mojo/Hex Power Pack/Iffy Voodoo") !=
+ L"\\\\?\\UNC\\Foo\\Local Mojo\\Hex Power Pack\\Iffy Voodoo") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"//Foo/Local Mojo/Hex Power Pack/Iffy Voodoo\""
+ << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath("//") != L"//") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"//\"" << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\") != L"\\\\.\\") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"\\\\.\\\"" << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X") != L"\\\\.\\X") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"\\\\.\\X\"" << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X:") != L"\\\\?\\X:") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"\\\\.\\X:\"" << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath("\\\\.\\X:\\") !=
+ L"\\\\?\\X:\\") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"\\\\.\\X:\\\"" << std::endl;
+ ++ret;
+ }
+
+ if (kwsys::Encoding::ToWindowsExtendedPath("NUL") != L"\\\\.\\NUL") {
+ std::cout << "Problem with ToWindowsExtendedPath "
+ << "\"NUL\"" << std::endl;
+ ++ret;
+ }
+
+ return ret;
+#else
+ return 0;
+#endif
+}
+
+int testEncoding(int, char* [])
+{
+ const char* loc = setlocale(LC_ALL, "");
+ if (loc) {
+ std::cout << "Locale: " << loc << std::endl;
+ } else {
+ std::cout << "Locale: None" << std::endl;
+ }
+
+ int ret = 0;
+
+ ret |= testHelloWorldEncoding();
+ ret |= testRobustEncoding();
+ ret |= testCommandLineArguments();
+ ret |= testWithNulls();
+ ret |= testToWindowsExtendedPath();
+
+ return ret;
+}
diff --git a/test/API/driver/kwsys/testFStream.cxx b/test/API/driver/kwsys/testFStream.cxx
new file mode 100644
index 0000000..5009e98
--- /dev/null
+++ b/test/API/driver/kwsys/testFStream.cxx
@@ -0,0 +1,113 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4786)
+#endif
+
+#include KWSYS_HEADER(FStream.hxx)
+#include <string.h>
+#ifdef __BORLANDC__
+# include <mem.h> /* memcmp */
+#endif
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "FStream.hxx.in"
+#endif
+
+#include <iostream>
+
+static int testNoFile()
+{
+ kwsys::ifstream in_file("NoSuchFile.txt");
+ if (in_file) {
+ return 1;
+ }
+
+ return 0;
+}
+
+static const int num_test_files = 7;
+static const int max_test_file_size = 45;
+
+static kwsys::FStream::BOM expected_bom[num_test_files] = {
+ kwsys::FStream::BOM_None, kwsys::FStream::BOM_None,
+ kwsys::FStream::BOM_UTF8, kwsys::FStream::BOM_UTF16LE,
+ kwsys::FStream::BOM_UTF16BE, kwsys::FStream::BOM_UTF32LE,
+ kwsys::FStream::BOM_UTF32BE
+};
+
+static unsigned char expected_bom_data[num_test_files][5] = {
+ { 0 },
+ { 0 },
+ { 3, 0xEF, 0xBB, 0xBF },
+ { 2, 0xFF, 0xFE },
+ { 2, 0xFE, 0xFF },
+ { 4, 0xFF, 0xFE, 0x00, 0x00 },
+ { 4, 0x00, 0x00, 0xFE, 0xFF },
+};
+
+static unsigned char file_data[num_test_files][max_test_file_size] = {
+ { 1, 'H' },
+ { 11, 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd' },
+ { 11, 'H', 'e', 'l', 'l', 'o', ' ', 'W', 'o', 'r', 'l', 'd' },
+ { 22, 0x48, 0x00, 0x65, 0x00, 0x6C, 0x00, 0x6C, 0x00, 0x6F, 0x00, 0x20,
+ 0x00, 0x57, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x6C, 0x00, 0x64, 0x00 },
+ { 22, 0x00, 0x48, 0x00, 0x65, 0x00, 0x6C, 0x00, 0x6C, 0x00, 0x6F, 0x00,
+ 0x20, 0x00, 0x57, 0x00, 0x6F, 0x00, 0x72, 0x00, 0x6C, 0x00, 0x64 },
+ { 44, 0x48, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00,
+ 0x00, 0x6C, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
+ 0x00, 0x57, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00, 0x72, 0x00, 0x00,
+ 0x00, 0x6C, 0x00, 0x00, 0x00, 0x64, 0x00, 0x00, 0x00 },
+ { 44, 0x00, 0x00, 0x00, 0x48, 0x00, 0x00, 0x00, 0x65, 0x00, 0x00, 0x00,
+ 0x6C, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00,
+ 0x20, 0x00, 0x00, 0x00, 0x57, 0x00, 0x00, 0x00, 0x6F, 0x00, 0x00, 0x00,
+ 0x72, 0x00, 0x00, 0x00, 0x6C, 0x00, 0x00, 0x00, 0x64 },
+};
+
+static int testBOM()
+{
+ // test various encodings in binary mode
+ for (int i = 0; i < num_test_files; i++) {
+ {
+ kwsys::ofstream out("bom.txt", kwsys::ofstream::binary);
+ out.write(reinterpret_cast<const char*>(expected_bom_data[i] + 1),
+ *expected_bom_data[i]);
+ out.write(reinterpret_cast<const char*>(file_data[i] + 1),
+ file_data[i][0]);
+ }
+
+ kwsys::ifstream in("bom.txt", kwsys::ofstream::binary);
+ kwsys::FStream::BOM bom = kwsys::FStream::ReadBOM(in);
+ if (bom != expected_bom[i]) {
+ std::cout << "Unexpected BOM " << i << std::endl;
+ return 1;
+ }
+ char data[max_test_file_size];
+ in.read(data, file_data[i][0]);
+ if (!in.good()) {
+ std::cout << "Unable to read data " << i << std::endl;
+ return 1;
+ }
+
+ if (memcmp(data, file_data[i] + 1, file_data[i][0]) != 0) {
+ std::cout << "Incorrect read data " << i << std::endl;
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+int testFStream(int, char* [])
+{
+ int ret = 0;
+
+ ret |= testNoFile();
+ ret |= testBOM();
+
+ return ret;
+}
diff --git a/test/API/driver/kwsys/testFail.c b/test/API/driver/kwsys/testFail.c
new file mode 100644
index 0000000..82caeac
--- /dev/null
+++ b/test/API/driver/kwsys/testFail.c
@@ -0,0 +1,24 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+int testFail(int argc, char* argv[])
+{
+ char* env = getenv("DASHBOARD_TEST_FROM_CTEST");
+ int oldCtest = 0;
+ if (env) {
+ if (strcmp(env, "1") == 0) {
+ oldCtest = 1;
+ }
+ printf("DASHBOARD_TEST_FROM_CTEST = %s\n", env);
+ }
+ printf("%s: This test intentionally fails\n", argv[0]);
+ if (oldCtest) {
+ printf("The version of ctest is not able to handle intentionally failing "
+ "tests, so pass.\n");
+ return 0;
+ }
+ return argc;
+}
diff --git a/test/API/driver/kwsys/testHashSTL.cxx b/test/API/driver/kwsys/testHashSTL.cxx
new file mode 100644
index 0000000..4ed2f89
--- /dev/null
+++ b/test/API/driver/kwsys/testHashSTL.cxx
@@ -0,0 +1,64 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(hash_map.hxx)
+#include KWSYS_HEADER(hash_set.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "hash_map.hxx.in"
+# include "hash_set.hxx.in"
+#endif
+
+#include <iostream>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4786)
+#endif
+
+#if defined(__sgi) && !defined(__GNUC__)
+# pragma set woff 1468 /* inline function cannot be explicitly instantiated \
+ */
+#endif
+
+template class kwsys::hash_map<const char*, int>;
+template class kwsys::hash_set<int>;
+
+static bool test_hash_map()
+{
+ typedef kwsys::hash_map<const char*, int> mtype;
+ mtype m;
+ const char* keys[] = { "hello", "world" };
+ m[keys[0]] = 1;
+ m.insert(mtype::value_type(keys[1], 2));
+ int sum = 0;
+ for (mtype::iterator mi = m.begin(); mi != m.end(); ++mi) {
+ std::cout << "Found entry [" << mi->first << "," << mi->second << "]"
+ << std::endl;
+ sum += mi->second;
+ }
+ return sum == 3;
+}
+
+static bool test_hash_set()
+{
+ typedef kwsys::hash_set<int> stype;
+ stype s;
+ s.insert(1);
+ s.insert(2);
+ int sum = 0;
+ for (stype::iterator si = s.begin(); si != s.end(); ++si) {
+ std::cout << "Found entry [" << *si << "]" << std::endl;
+ sum += *si;
+ }
+ return sum == 3;
+}
+
+int testHashSTL(int, char* [])
+{
+ bool result = true;
+ result = test_hash_map() && result;
+ result = test_hash_set() && result;
+ return result ? 0 : 1;
+}
diff --git a/test/API/driver/kwsys/testProcess.c b/test/API/driver/kwsys/testProcess.c
new file mode 100644
index 0000000..39aaa23
--- /dev/null
+++ b/test/API/driver/kwsys/testProcess.c
@@ -0,0 +1,728 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Process.h)
+#include KWSYS_HEADER(Encoding.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "Encoding.h.in"
+# include "Process.h.in"
+#endif
+
+#include <assert.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(_WIN32)
+# include <windows.h>
+#else
+# include <signal.h>
+# include <unistd.h>
+#endif
+
+#if defined(__BORLANDC__)
+# pragma warn - 8060 /* possibly incorrect assignment */
+#endif
+
+/* Platform-specific sleep functions. */
+
+#if defined(__BEOS__) && !defined(__ZETA__)
+/* BeOS 5 doesn't have usleep(), but it has snooze(), which is identical. */
+# include <be/kernel/OS.h>
+static inline void testProcess_usleep(unsigned int usec)
+{
+ snooze(usec);
+}
+#elif defined(_WIN32)
+/* Windows can only sleep in millisecond intervals. */
+static void testProcess_usleep(unsigned int usec)
+{
+ Sleep(usec / 1000);
+}
+#else
+# define testProcess_usleep usleep
+#endif
+
+#if defined(_WIN32)
+static void testProcess_sleep(unsigned int sec)
+{
+ Sleep(sec * 1000);
+}
+#else
+static void testProcess_sleep(unsigned int sec)
+{
+ sleep(sec);
+}
+#endif
+
+int runChild(const char* cmd[], int state, int exception, int value, int share,
+ int output, int delay, double timeout, int poll, int repeat,
+ int disown, int createNewGroup, unsigned int interruptDelay);
+
+static int test1(int argc, const char* argv[])
+{
+ /* This is a very basic functional test of kwsysProcess. It is repeated
+ numerous times to verify that there are no resource leaks in kwsysProcess
+ that eventually lead to an error. Many versions of OS X will fail after
+ 256 leaked file handles, so 257 iterations seems to be a good test. On
+ the other hand, too many iterations will cause the test to time out -
+ especially if the test is instrumented with e.g. valgrind.
+
+ If you have problems with this test timing out on your system, or want to
+ run more than 257 iterations, you can change the number of iterations by
+ setting the KWSYS_TEST_PROCESS_1_COUNT environment variable. */
+ (void)argc;
+ (void)argv;
+ fprintf(stdout, "Output on stdout from test returning 0.\n");
+ fprintf(stderr, "Output on stderr from test returning 0.\n");
+ return 0;
+}
+
+static int test2(int argc, const char* argv[])
+{
+ (void)argc;
+ (void)argv;
+ fprintf(stdout, "Output on stdout from test returning 123.\n");
+ fprintf(stderr, "Output on stderr from test returning 123.\n");
+ return 123;
+}
+
+static int test3(int argc, const char* argv[])
+{
+ (void)argc;
+ (void)argv;
+ fprintf(stdout, "Output before sleep on stdout from timeout test.\n");
+ fprintf(stderr, "Output before sleep on stderr from timeout test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ testProcess_sleep(15);
+ fprintf(stdout, "Output after sleep on stdout from timeout test.\n");
+ fprintf(stderr, "Output after sleep on stderr from timeout test.\n");
+ return 0;
+}
+
+static int test4(int argc, const char* argv[])
+{
+#ifndef CRASH_USING_ABORT
+ /* Prepare a pointer to an invalid address. Don't use null, because
+ dereferencing null is undefined behaviour and compilers are free to
+ do whatever they want. ex: Clang will warn at compile time, or even
+ optimize away the write. We hope to 'outsmart' them by using
+ 'volatile' and a slightly larger address, based on a runtime value. */
+ volatile int* invalidAddress = 0;
+ invalidAddress += argc ? 1 : 2;
+#endif
+
+#if defined(_WIN32)
+ /* Avoid error diagnostic popups since we are crashing on purpose. */
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX);
+#elif defined(__BEOS__) || defined(__HAIKU__)
+ /* Avoid error diagnostic popups since we are crashing on purpose. */
+ disable_debugger(1);
+#endif
+ (void)argc;
+ (void)argv;
+ fprintf(stdout, "Output before crash on stdout from crash test.\n");
+ fprintf(stderr, "Output before crash on stderr from crash test.\n");
+ fflush(stdout);
+ fflush(stderr);
+#ifdef CRASH_USING_ABORT
+ abort();
+#else
+ assert(invalidAddress); /* Quiet Clang scan-build. */
+ /* Provoke deliberate crash by writing to the invalid address. */
+ *invalidAddress = 0;
+#endif
+ fprintf(stdout, "Output after crash on stdout from crash test.\n");
+ fprintf(stderr, "Output after crash on stderr from crash test.\n");
+ return 0;
+}
+
+static int test5(int argc, const char* argv[])
+{
+ int r;
+ const char* cmd[4];
+ (void)argc;
+ cmd[0] = argv[0];
+ cmd[1] = "run";
+ cmd[2] = "4";
+ cmd[3] = 0;
+ fprintf(stdout, "Output on stdout before recursive test.\n");
+ fprintf(stderr, "Output on stderr before recursive test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ r = runChild(cmd, kwsysProcess_State_Exception,
+#ifdef CRASH_USING_ABORT
+ kwsysProcess_Exception_Other,
+#else
+ kwsysProcess_Exception_Fault,
+#endif
+ 1, 1, 1, 0, 15, 0, 1, 0, 0, 0);
+ fprintf(stdout, "Output on stdout after recursive test.\n");
+ fprintf(stderr, "Output on stderr after recursive test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ return r;
+}
+
+#define TEST6_SIZE (4096 * 2)
+static void test6(int argc, const char* argv[])
+{
+ int i;
+ char runaway[TEST6_SIZE + 1];
+ (void)argc;
+ (void)argv;
+ for (i = 0; i < TEST6_SIZE; ++i) {
+ runaway[i] = '.';
+ }
+ runaway[TEST6_SIZE] = '\n';
+
+ /* Generate huge amounts of output to test killing. */
+ for (;;) {
+ fwrite(runaway, 1, TEST6_SIZE + 1, stdout);
+ fflush(stdout);
+ }
+}
+
+/* Define MINPOLL to be one more than the number of times output is
+ written. Define MAXPOLL to be the largest number of times a loop
+ delaying 1/10th of a second should ever have to poll. */
+#define MINPOLL 5
+#define MAXPOLL 20
+static int test7(int argc, const char* argv[])
+{
+ (void)argc;
+ (void)argv;
+ fprintf(stdout, "Output on stdout before sleep.\n");
+ fprintf(stderr, "Output on stderr before sleep.\n");
+ fflush(stdout);
+ fflush(stderr);
+ /* Sleep for 1 second. */
+ testProcess_sleep(1);
+ fprintf(stdout, "Output on stdout after sleep.\n");
+ fprintf(stderr, "Output on stderr after sleep.\n");
+ fflush(stdout);
+ fflush(stderr);
+ return 0;
+}
+
+static int test8(int argc, const char* argv[])
+{
+ /* Create a disowned grandchild to test handling of processes
+ that exit before their children. */
+ int r;
+ const char* cmd[4];
+ (void)argc;
+ cmd[0] = argv[0];
+ cmd[1] = "run";
+ cmd[2] = "108";
+ cmd[3] = 0;
+ fprintf(stdout, "Output on stdout before grandchild test.\n");
+ fprintf(stderr, "Output on stderr before grandchild test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ r = runChild(cmd, kwsysProcess_State_Disowned, kwsysProcess_Exception_None,
+ 1, 1, 1, 0, 10, 0, 1, 1, 0, 0);
+ fprintf(stdout, "Output on stdout after grandchild test.\n");
+ fprintf(stderr, "Output on stderr after grandchild test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ return r;
+}
+
+static int test8_grandchild(int argc, const char* argv[])
+{
+ (void)argc;
+ (void)argv;
+ fprintf(stdout, "Output on stdout from grandchild before sleep.\n");
+ fprintf(stderr, "Output on stderr from grandchild before sleep.\n");
+ fflush(stdout);
+ fflush(stderr);
+ /* TODO: Instead of closing pipes here leave them open to make sure
+ the grandparent can stop listening when the parent exits. This
+ part of the test cannot be enabled until the feature is
+ implemented. */
+ fclose(stdout);
+ fclose(stderr);
+ testProcess_sleep(15);
+ return 0;
+}
+
+static int test9(int argc, const char* argv[])
+{
+ /* Test Ctrl+C behavior: the root test program will send a Ctrl+C to this
+ process. Here, we start a child process that sleeps for a long time
+ while ignoring signals. The test is successful if this process waits
+ for the child to return before exiting from the Ctrl+C handler.
+
+ WARNING: This test will falsely pass if the share parameter of runChild
+ was set to 0 when invoking the test9 process. */
+ int r;
+ const char* cmd[4];
+ (void)argc;
+ cmd[0] = argv[0];
+ cmd[1] = "run";
+ cmd[2] = "109";
+ cmd[3] = 0;
+ fprintf(stdout, "Output on stdout before grandchild test.\n");
+ fprintf(stderr, "Output on stderr before grandchild test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ r = runChild(cmd, kwsysProcess_State_Exited, kwsysProcess_Exception_None, 0,
+ 1, 1, 0, 30, 0, 1, 0, 0, 0);
+ /* This sleep will avoid a race condition between this function exiting
+ normally and our Ctrl+C handler exiting abnormally after the process
+ exits. */
+ testProcess_sleep(1);
+ fprintf(stdout, "Output on stdout after grandchild test.\n");
+ fprintf(stderr, "Output on stderr after grandchild test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ return r;
+}
+
+#if defined(_WIN32)
+static BOOL WINAPI test9_grandchild_handler(DWORD dwCtrlType)
+{
+ /* Ignore all Ctrl+C/Break signals. We must use an actual handler function
+ instead of using SetConsoleCtrlHandler(NULL, TRUE) so that we can also
+ ignore Ctrl+Break in addition to Ctrl+C. */
+ (void)dwCtrlType;
+ return TRUE;
+}
+#endif
+
+static int test9_grandchild(int argc, const char* argv[])
+{
+ /* The grandchild just sleeps for a few seconds while ignoring signals. */
+ (void)argc;
+ (void)argv;
+#if defined(_WIN32)
+ if (!SetConsoleCtrlHandler(test9_grandchild_handler, TRUE)) {
+ return 1;
+ }
+#else
+ struct sigaction sa;
+ memset(&sa, 0, sizeof(sa));
+ sa.sa_handler = SIG_IGN;
+ sigemptyset(&sa.sa_mask);
+ if (sigaction(SIGINT, &sa, 0) < 0) {
+ return 1;
+ }
+#endif
+ fprintf(stdout, "Output on stdout from grandchild before sleep.\n");
+ fprintf(stderr, "Output on stderr from grandchild before sleep.\n");
+ fflush(stdout);
+ fflush(stderr);
+ /* Sleep for 9 seconds. */
+ testProcess_sleep(9);
+ fprintf(stdout, "Output on stdout from grandchild after sleep.\n");
+ fprintf(stderr, "Output on stderr from grandchild after sleep.\n");
+ fflush(stdout);
+ fflush(stderr);
+ return 0;
+}
+
+static int test10(int argc, const char* argv[])
+{
+ /* Test Ctrl+C behavior: the root test program will send a Ctrl+C to this
+ process. Here, we start a child process that sleeps for a long time and
+ processes signals normally. However, this grandchild is created in a new
+ process group - ensuring that Ctrl+C we receive is sent to our process
+ groups. We make sure it exits anyway. */
+ int r;
+ const char* cmd[4];
+ (void)argc;
+ cmd[0] = argv[0];
+ cmd[1] = "run";
+ cmd[2] = "110";
+ cmd[3] = 0;
+ fprintf(stdout, "Output on stdout before grandchild test.\n");
+ fprintf(stderr, "Output on stderr before grandchild test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ r =
+ runChild(cmd, kwsysProcess_State_Exception,
+ kwsysProcess_Exception_Interrupt, 0, 1, 1, 0, 30, 0, 1, 0, 1, 0);
+ fprintf(stdout, "Output on stdout after grandchild test.\n");
+ fprintf(stderr, "Output on stderr after grandchild test.\n");
+ fflush(stdout);
+ fflush(stderr);
+ return r;
+}
+
+static int test10_grandchild(int argc, const char* argv[])
+{
+ /* The grandchild just sleeps for a few seconds and handles signals. */
+ (void)argc;
+ (void)argv;
+ fprintf(stdout, "Output on stdout from grandchild before sleep.\n");
+ fprintf(stderr, "Output on stderr from grandchild before sleep.\n");
+ fflush(stdout);
+ fflush(stderr);
+ /* Sleep for 6 seconds. */
+ testProcess_sleep(6);
+ fprintf(stdout, "Output on stdout from grandchild after sleep.\n");
+ fprintf(stderr, "Output on stderr from grandchild after sleep.\n");
+ fflush(stdout);
+ fflush(stderr);
+ return 0;
+}
+
+static int runChild2(kwsysProcess* kp, const char* cmd[], int state,
+ int exception, int value, int share, int output,
+ int delay, double timeout, int poll, int disown,
+ int createNewGroup, unsigned int interruptDelay)
+{
+ int result = 0;
+ char* data = 0;
+ int length = 0;
+ double userTimeout = 0;
+ double* pUserTimeout = 0;
+ kwsysProcess_SetCommand(kp, cmd);
+ if (timeout >= 0) {
+ kwsysProcess_SetTimeout(kp, timeout);
+ }
+ if (share) {
+ kwsysProcess_SetPipeShared(kp, kwsysProcess_Pipe_STDOUT, 1);
+ kwsysProcess_SetPipeShared(kp, kwsysProcess_Pipe_STDERR, 1);
+ }
+ if (disown) {
+ kwsysProcess_SetOption(kp, kwsysProcess_Option_Detach, 1);
+ }
+ if (createNewGroup) {
+ kwsysProcess_SetOption(kp, kwsysProcess_Option_CreateProcessGroup, 1);
+ }
+ kwsysProcess_Execute(kp);
+
+ if (poll) {
+ pUserTimeout = &userTimeout;
+ }
+
+ if (interruptDelay) {
+ testProcess_sleep(interruptDelay);
+ kwsysProcess_Interrupt(kp);
+ }
+
+ if (!share && !disown) {
+ int p;
+ while ((p = kwsysProcess_WaitForData(kp, &data, &length, pUserTimeout))) {
+ if (output) {
+ if (poll && p == kwsysProcess_Pipe_Timeout) {
+ fprintf(stdout, "WaitForData timeout reached.\n");
+ fflush(stdout);
+
+ /* Count the number of times we polled without getting data.
+ If it is excessive then kill the child and fail. */
+ if (++poll >= MAXPOLL) {
+ fprintf(stdout, "Poll count reached limit %d.\n", MAXPOLL);
+ kwsysProcess_Kill(kp);
+ }
+ } else {
+ fwrite(data, 1, (size_t)length, stdout);
+ fflush(stdout);
+ }
+ }
+ if (poll) {
+ /* Delay to avoid busy loop during polling. */
+ testProcess_usleep(100000);
+ }
+ if (delay) {
+/* Purposely sleeping only on Win32 to let pipe fill up. */
+#if defined(_WIN32)
+ testProcess_usleep(100000);
+#endif
+ }
+ }
+ }
+
+ if (disown) {
+ kwsysProcess_Disown(kp);
+ } else {
+ kwsysProcess_WaitForExit(kp, 0);
+ }
+
+ switch (kwsysProcess_GetState(kp)) {
+ case kwsysProcess_State_Starting:
+ printf("No process has been executed.\n");
+ break;
+ case kwsysProcess_State_Executing:
+ printf("The process is still executing.\n");
+ break;
+ case kwsysProcess_State_Expired:
+ printf("Child was killed when timeout expired.\n");
+ break;
+ case kwsysProcess_State_Exited:
+ printf("Child exited with value = %d\n", kwsysProcess_GetExitValue(kp));
+ result = ((exception != kwsysProcess_GetExitException(kp)) ||
+ (value != kwsysProcess_GetExitValue(kp)));
+ break;
+ case kwsysProcess_State_Killed:
+ printf("Child was killed by parent.\n");
+ break;
+ case kwsysProcess_State_Exception:
+ printf("Child terminated abnormally: %s\n",
+ kwsysProcess_GetExceptionString(kp));
+ result = ((exception != kwsysProcess_GetExitException(kp)) ||
+ (value != kwsysProcess_GetExitValue(kp)));
+ break;
+ case kwsysProcess_State_Disowned:
+ printf("Child was disowned.\n");
+ break;
+ case kwsysProcess_State_Error:
+ printf("Error in administrating child process: [%s]\n",
+ kwsysProcess_GetErrorString(kp));
+ break;
+ }
+
+ if (result) {
+ if (exception != kwsysProcess_GetExitException(kp)) {
+ fprintf(stderr,
+ "Mismatch in exit exception. "
+ "Should have been %d, was %d.\n",
+ exception, kwsysProcess_GetExitException(kp));
+ }
+ if (value != kwsysProcess_GetExitValue(kp)) {
+ fprintf(stderr,
+ "Mismatch in exit value. "
+ "Should have been %d, was %d.\n",
+ value, kwsysProcess_GetExitValue(kp));
+ }
+ }
+
+ if (kwsysProcess_GetState(kp) != state) {
+ fprintf(stderr,
+ "Mismatch in state. "
+ "Should have been %d, was %d.\n",
+ state, kwsysProcess_GetState(kp));
+ result = 1;
+ }
+
+ /* We should have polled more times than there were data if polling
+ was enabled. */
+ if (poll && poll < MINPOLL) {
+ fprintf(stderr, "Poll count is %d, which is less than %d.\n", poll,
+ MINPOLL);
+ result = 1;
+ }
+
+ return result;
+}
+
+/**
+ * Runs a child process and blocks until it returns. Arguments as follows:
+ *
+ * cmd = Command line to run.
+ * state = Expected return value of kwsysProcess_GetState after exit.
+ * exception = Expected return value of kwsysProcess_GetExitException.
+ * value = Expected return value of kwsysProcess_GetExitValue.
+ * share = Whether to share stdout/stderr child pipes with our pipes
+ * by way of kwsysProcess_SetPipeShared. If false, new pipes
+ * are created.
+ * output = If !share && !disown, whether to write the child's stdout
+ * and stderr output to our stdout.
+ * delay = If !share && !disown, adds an additional short delay to
+ * the pipe loop to allow the pipes to fill up; Windows only.
+ * timeout = Non-zero to sets a timeout in seconds via
+ * kwsysProcess_SetTimeout.
+ * poll = If !share && !disown, we count the number of 0.1 second
+ * intervals where the child pipes had no new data. We fail
+ * if not in the bounds of MINPOLL/MAXPOLL.
+ * repeat = Number of times to run the process.
+ * disown = If set, the process is disowned.
+ * createNewGroup = If set, the process is created in a new process group.
+ * interruptDelay = If non-zero, number of seconds to delay before
+ * interrupting the process. Note that this delay will occur
+ * BEFORE any reading/polling of pipes occurs and before any
+ * detachment occurs.
+ */
+int runChild(const char* cmd[], int state, int exception, int value, int share,
+ int output, int delay, double timeout, int poll, int repeat,
+ int disown, int createNewGroup, unsigned int interruptDelay)
+{
+ int result = 1;
+ kwsysProcess* kp = kwsysProcess_New();
+ if (!kp) {
+ fprintf(stderr, "kwsysProcess_New returned NULL!\n");
+ return 1;
+ }
+ while (repeat-- > 0) {
+ result = runChild2(kp, cmd, state, exception, value, share, output, delay,
+ timeout, poll, disown, createNewGroup, interruptDelay);
+ if (result) {
+ break;
+ }
+ }
+ kwsysProcess_Delete(kp);
+ return result;
+}
+
+int main(int argc, const char* argv[])
+{
+ int n = 0;
+
+#ifdef _WIN32
+ int i;
+ char new_args[10][_MAX_PATH];
+ LPWSTR* w_av = CommandLineToArgvW(GetCommandLineW(), &argc);
+ for (i = 0; i < argc; i++) {
+ kwsysEncoding_wcstombs(new_args[i], w_av[i], _MAX_PATH);
+ argv[i] = new_args[i];
+ }
+ LocalFree(w_av);
+#endif
+
+#if 0
+ {
+ HANDLE out = GetStdHandle(STD_OUTPUT_HANDLE);
+ DuplicateHandle(GetCurrentProcess(), out,
+ GetCurrentProcess(), &out, 0, FALSE,
+ DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE);
+ SetStdHandle(STD_OUTPUT_HANDLE, out);
+ }
+ {
+ HANDLE out = GetStdHandle(STD_ERROR_HANDLE);
+ DuplicateHandle(GetCurrentProcess(), out,
+ GetCurrentProcess(), &out, 0, FALSE,
+ DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE);
+ SetStdHandle(STD_ERROR_HANDLE, out);
+ }
+#endif
+ if (argc == 2) {
+ n = atoi(argv[1]);
+ } else if (argc == 3 && strcmp(argv[1], "run") == 0) {
+ n = atoi(argv[2]);
+ }
+ /* Check arguments. */
+ if (((n >= 1 && n <= 10) || n == 108 || n == 109 || n == 110) && argc == 3) {
+ /* This is the child process for a requested test number. */
+ switch (n) {
+ case 1:
+ return test1(argc, argv);
+ case 2:
+ return test2(argc, argv);
+ case 3:
+ return test3(argc, argv);
+ case 4:
+ return test4(argc, argv);
+ case 5:
+ return test5(argc, argv);
+ case 6:
+ test6(argc, argv);
+ return 0;
+ case 7:
+ return test7(argc, argv);
+ case 8:
+ return test8(argc, argv);
+ case 9:
+ return test9(argc, argv);
+ case 10:
+ return test10(argc, argv);
+ case 108:
+ return test8_grandchild(argc, argv);
+ case 109:
+ return test9_grandchild(argc, argv);
+ case 110:
+ return test10_grandchild(argc, argv);
+ }
+ fprintf(stderr, "Invalid test number %d.\n", n);
+ return 1;
+ } else if (n >= 1 && n <= 10) {
+ /* This is the parent process for a requested test number. */
+ int states[10] = {
+ kwsysProcess_State_Exited, kwsysProcess_State_Exited,
+ kwsysProcess_State_Expired, kwsysProcess_State_Exception,
+ kwsysProcess_State_Exited, kwsysProcess_State_Expired,
+ kwsysProcess_State_Exited, kwsysProcess_State_Exited,
+ kwsysProcess_State_Expired, /* Ctrl+C handler test */
+ kwsysProcess_State_Exception /* Process group test */
+ };
+ int exceptions[10] = {
+ kwsysProcess_Exception_None, kwsysProcess_Exception_None,
+ kwsysProcess_Exception_None,
+#ifdef CRASH_USING_ABORT
+ kwsysProcess_Exception_Other,
+#else
+ kwsysProcess_Exception_Fault,
+#endif
+ kwsysProcess_Exception_None, kwsysProcess_Exception_None,
+ kwsysProcess_Exception_None, kwsysProcess_Exception_None,
+ kwsysProcess_Exception_None, kwsysProcess_Exception_Interrupt
+ };
+ int values[10] = { 0, 123, 1, 1, 0, 0, 0, 0, 1, 1 };
+ int shares[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 };
+ int outputs[10] = { 1, 1, 1, 1, 1, 0, 1, 1, 1, 1 };
+ int delays[10] = { 0, 0, 0, 0, 0, 1, 0, 0, 0, 0 };
+ double timeouts[10] = { 10, 10, 10, 30, 30, 10, -1, 10, 6, 4 };
+ int polls[10] = { 0, 0, 0, 0, 0, 0, 1, 0, 0, 0 };
+ int repeat[10] = { 257, 1, 1, 1, 1, 1, 1, 1, 1, 1 };
+ int createNewGroups[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1 };
+ unsigned int interruptDelays[10] = { 0, 0, 0, 0, 0, 0, 0, 0, 3, 2 };
+ int r;
+ const char* cmd[4];
+#ifdef _WIN32
+ char* argv0 = 0;
+#endif
+ char* test1IterationsStr = getenv("KWSYS_TEST_PROCESS_1_COUNT");
+ if (test1IterationsStr) {
+ long int test1Iterations = strtol(test1IterationsStr, 0, 10);
+ if (test1Iterations > 10 && test1Iterations != LONG_MAX) {
+ repeat[0] = (int)test1Iterations;
+ }
+ }
+#ifdef _WIN32
+ if (n == 0 && (argv0 = strdup(argv[0]))) {
+ /* Try converting to forward slashes to see if it works. */
+ char* c;
+ for (c = argv0; *c; ++c) {
+ if (*c == '\\') {
+ *c = '/';
+ }
+ }
+ cmd[0] = argv0;
+ } else {
+ cmd[0] = argv[0];
+ }
+#else
+ cmd[0] = argv[0];
+#endif
+ cmd[1] = "run";
+ cmd[2] = argv[1];
+ cmd[3] = 0;
+ fprintf(stdout, "Output on stdout before test %d.\n", n);
+ fprintf(stderr, "Output on stderr before test %d.\n", n);
+ fflush(stdout);
+ fflush(stderr);
+ r = runChild(cmd, states[n - 1], exceptions[n - 1], values[n - 1],
+ shares[n - 1], outputs[n - 1], delays[n - 1], timeouts[n - 1],
+ polls[n - 1], repeat[n - 1], 0, createNewGroups[n - 1],
+ interruptDelays[n - 1]);
+ fprintf(stdout, "Output on stdout after test %d.\n", n);
+ fprintf(stderr, "Output on stderr after test %d.\n", n);
+ fflush(stdout);
+ fflush(stderr);
+#if defined(_WIN32)
+ free(argv0);
+#endif
+ return r;
+ } else if (argc > 2 && strcmp(argv[1], "0") == 0) {
+ /* This is the special debugging test to run a given command
+ line. */
+ const char** cmd = argv + 2;
+ int state = kwsysProcess_State_Exited;
+ int exception = kwsysProcess_Exception_None;
+ int value = 0;
+ double timeout = 0;
+ int r =
+ runChild(cmd, state, exception, value, 0, 1, 0, timeout, 0, 1, 0, 0, 0);
+ return r;
+ } else {
+ /* Improper usage. */
+ fprintf(stdout, "Usage: %s <test number>\n", argv[0]);
+ return 1;
+ }
+}
diff --git a/test/API/driver/kwsys/testSharedForward.c.in b/test/API/driver/kwsys/testSharedForward.c.in
new file mode 100644
index 0000000..b3eb413
--- /dev/null
+++ b/test/API/driver/kwsys/testSharedForward.c.in
@@ -0,0 +1,27 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#if defined(CMAKE_INTDIR)
+# define CONFIG_DIR_PRE CMAKE_INTDIR "/"
+# define CONFIG_DIR_POST "/" CMAKE_INTDIR
+#else
+# define CONFIG_DIR_PRE ""
+# define CONFIG_DIR_POST ""
+#endif
+#define @KWSYS_NAMESPACE@_SHARED_FORWARD_DIR_BUILD "@EXEC_DIR@"
+#define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_BUILD "." CONFIG_DIR_POST
+#define @KWSYS_NAMESPACE@_SHARED_FORWARD_PATH_INSTALL 0
+#define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_BUILD \
+ CONFIG_DIR_PRE "@KWSYS_NAMESPACE@TestProcess"
+#define @KWSYS_NAMESPACE@_SHARED_FORWARD_EXE_INSTALL \
+ "@KWSYS_NAMESPACE@TestProcess"
+#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_COMMAND "--command"
+#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_PRINT "--print"
+#define @KWSYS_NAMESPACE@_SHARED_FORWARD_OPTION_LDD "--ldd"
+#if defined(CMAKE_INTDIR)
+# define @KWSYS_NAMESPACE@_SHARED_FORWARD_CONFIG_NAME CMAKE_INTDIR
+#endif
+#include <@KWSYS_NAMESPACE@/SharedForward.h>
+int main(int argc, char** argv)
+{
+ return @KWSYS_NAMESPACE@_shared_forward_to_real(argc, argv);
+}
diff --git a/test/API/driver/kwsys/testSystemInformation.cxx b/test/API/driver/kwsys/testSystemInformation.cxx
new file mode 100644
index 0000000..154517e
--- /dev/null
+++ b/test/API/driver/kwsys/testSystemInformation.cxx
@@ -0,0 +1,106 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(SystemInformation.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "SystemInformation.hxx.in"
+#endif
+
+#include <iostream>
+
+#if defined(KWSYS_USE_LONG_LONG)
+# if defined(KWSYS_IOS_HAS_OSTREAM_LONG_LONG)
+# define iostreamLongLong(x) (x)
+# else
+# define iostreamLongLong(x) ((long)x)
+# endif
+#elif defined(KWSYS_USE___INT64)
+# if defined(KWSYS_IOS_HAS_OSTREAM___INT64)
+# define iostreamLongLong(x) (x)
+# else
+# define iostreamLongLong(x) ((long)x)
+# endif
+#else
+# error "No Long Long"
+#endif
+
+#define printMethod(info, m) std::cout << #m << ": " << info.m() << "\n"
+
+#define printMethod2(info, m, unit) \
+ std::cout << #m << ": " << info.m() << " " << unit << "\n"
+
+#define printMethod3(info, m, unit) \
+ std::cout << #m << ": " << iostreamLongLong(info.m) << " " << unit << "\n"
+
+int testSystemInformation(int, char* [])
+{
+ std::cout << "CTEST_FULL_OUTPUT\n"; // avoid truncation
+
+ kwsys::SystemInformation info;
+ info.RunCPUCheck();
+ info.RunOSCheck();
+ info.RunMemoryCheck();
+ printMethod(info, GetOSName);
+ printMethod(info, GetOSIsLinux);
+ printMethod(info, GetOSIsApple);
+ printMethod(info, GetOSIsWindows);
+ printMethod(info, GetHostname);
+ printMethod(info, GetFullyQualifiedDomainName);
+ printMethod(info, GetOSRelease);
+ printMethod(info, GetOSVersion);
+ printMethod(info, GetOSPlatform);
+ printMethod(info, Is64Bits);
+ printMethod(info, GetVendorString);
+ printMethod(info, GetVendorID);
+ printMethod(info, GetTypeID);
+ printMethod(info, GetFamilyID);
+ printMethod(info, GetModelID);
+ printMethod(info, GetExtendedProcessorName);
+ printMethod(info, GetSteppingCode);
+ printMethod(info, GetProcessorSerialNumber);
+ printMethod2(info, GetProcessorCacheSize, "KB");
+ printMethod(info, GetLogicalProcessorsPerPhysical);
+ printMethod2(info, GetProcessorClockFrequency, "MHz");
+ printMethod(info, GetNumberOfLogicalCPU);
+ printMethod(info, GetNumberOfPhysicalCPU);
+ printMethod(info, DoesCPUSupportCPUID);
+ printMethod(info, GetProcessorAPICID);
+ printMethod2(info, GetTotalVirtualMemory, "MB");
+ printMethod2(info, GetAvailableVirtualMemory, "MB");
+ printMethod2(info, GetTotalPhysicalMemory, "MB");
+ printMethod2(info, GetAvailablePhysicalMemory, "MB");
+ printMethod3(info, GetHostMemoryTotal(), "KiB");
+ printMethod3(info, GetHostMemoryAvailable("KWSHL"), "KiB");
+ printMethod3(info, GetProcMemoryAvailable("KWSHL", "KWSPL"), "KiB");
+ printMethod3(info, GetHostMemoryUsed(), "KiB");
+ printMethod3(info, GetProcMemoryUsed(), "KiB");
+ printMethod(info, GetLoadAverage);
+
+ for (long int i = 0; i <= 31; i++) {
+ if (info.DoesCPUSupportFeature(static_cast<long int>(1) << i)) {
+ std::cout << "CPU feature " << i << "\n";
+ }
+ }
+
+ /* test stack trace
+ */
+ std::cout << "Program Stack:" << std::endl
+ << kwsys::SystemInformation::GetProgramStack(0, 0) << std::endl
+ << std::endl;
+
+ /* test segv handler
+ info.SetStackTraceOnError(1);
+ double *d = (double*)100;
+ *d=0;
+ */
+
+ /* test abort handler
+ info.SetStackTraceOnError(1);
+ abort();
+ */
+
+ return 0;
+}
diff --git a/test/API/driver/kwsys/testSystemTools.bin b/test/API/driver/kwsys/testSystemTools.bin
new file mode 100644
index 0000000..961a404
--- /dev/null
+++ b/test/API/driver/kwsys/testSystemTools.bin
Binary files differ
diff --git a/test/API/driver/kwsys/testSystemTools.cxx b/test/API/driver/kwsys/testSystemTools.cxx
new file mode 100644
index 0000000..1f3a15b
--- /dev/null
+++ b/test/API/driver/kwsys/testSystemTools.cxx
@@ -0,0 +1,1128 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4786)
+#endif
+
+#include KWSYS_HEADER(FStream.hxx)
+#include KWSYS_HEADER(SystemTools.hxx)
+
+// Work-around CMake dependency scanning limitation. This must
+// duplicate the above list of headers.
+#if 0
+# include "FStream.hxx.in"
+# include "SystemTools.hxx.in"
+#endif
+
+// Include with <> instead of "" to avoid getting any in-source copy
+// left on disk.
+#include <testSystemTools.h>
+
+#include <iostream>
+#include <sstream>
+#include <stdlib.h> /* free */
+#include <string.h> /* strcmp */
+#if defined(_WIN32) && !defined(__CYGWIN__)
+# include <io.h> /* _umask (MSVC) / umask (Borland) */
+# ifdef _MSC_VER
+# define umask _umask // Note this is still umask on Borland
+# endif
+#endif
+#include <sys/stat.h> /* umask (POSIX), _S_I* constants (Windows) */
+// Visual C++ does not define mode_t (note that Borland does, however).
+#if defined(_MSC_VER)
+typedef unsigned short mode_t;
+#endif
+
+static const char* toUnixPaths[][2] = {
+ { "/usr/local/bin/passwd", "/usr/local/bin/passwd" },
+ { "/usr/lo cal/bin/pa sswd", "/usr/lo cal/bin/pa sswd" },
+ { "/usr/lo\\ cal/bin/pa\\ sswd", "/usr/lo/ cal/bin/pa/ sswd" },
+ { "c:/usr/local/bin/passwd", "c:/usr/local/bin/passwd" },
+ { "c:/usr/lo cal/bin/pa sswd", "c:/usr/lo cal/bin/pa sswd" },
+ { "c:/usr/lo\\ cal/bin/pa\\ sswd", "c:/usr/lo/ cal/bin/pa/ sswd" },
+ { "\\usr\\local\\bin\\passwd", "/usr/local/bin/passwd" },
+ { "\\usr\\lo cal\\bin\\pa sswd", "/usr/lo cal/bin/pa sswd" },
+ { "\\usr\\lo\\ cal\\bin\\pa\\ sswd", "/usr/lo/ cal/bin/pa/ sswd" },
+ { "c:\\usr\\local\\bin\\passwd", "c:/usr/local/bin/passwd" },
+ { "c:\\usr\\lo cal\\bin\\pa sswd", "c:/usr/lo cal/bin/pa sswd" },
+ { "c:\\usr\\lo\\ cal\\bin\\pa\\ sswd", "c:/usr/lo/ cal/bin/pa/ sswd" },
+ { "\\\\usr\\local\\bin\\passwd", "//usr/local/bin/passwd" },
+ { "\\\\usr\\lo cal\\bin\\pa sswd", "//usr/lo cal/bin/pa sswd" },
+ { "\\\\usr\\lo\\ cal\\bin\\pa\\ sswd", "//usr/lo/ cal/bin/pa/ sswd" },
+ { nullptr, nullptr }
+};
+
+static bool CheckConvertToUnixSlashes(std::string const& input,
+ std::string const& output)
+{
+ std::string result = input;
+ kwsys::SystemTools::ConvertToUnixSlashes(result);
+ if (result != output) {
+ std::cerr << "Problem with ConvertToUnixSlashes - input: " << input
+ << " output: " << result << " expected: " << output << std::endl;
+ return false;
+ }
+ return true;
+}
+
+static const char* checkEscapeChars[][4] = {
+ { "1 foo 2 bar 2", "12", "\\", "\\1 foo \\2 bar \\2" },
+ { " {} ", "{}", "#", " #{#} " },
+ { nullptr, nullptr, nullptr, nullptr }
+};
+
+static bool CheckEscapeChars(std::string const& input,
+ const char* chars_to_escape, char escape_char,
+ std::string const& output)
+{
+ std::string result = kwsys::SystemTools::EscapeChars(
+ input.c_str(), chars_to_escape, escape_char);
+ if (result != output) {
+ std::cerr << "Problem with CheckEscapeChars - input: " << input
+ << " output: " << result << " expected: " << output << std::endl;
+ return false;
+ }
+ return true;
+}
+
+static bool CheckFileOperations()
+{
+ bool res = true;
+ const std::string testNonExistingFile(TEST_SYSTEMTOOLS_SOURCE_DIR
+ "/testSystemToolsNonExistingFile");
+ const std::string testDotFile(TEST_SYSTEMTOOLS_SOURCE_DIR "/.");
+ const std::string testBinFile(TEST_SYSTEMTOOLS_SOURCE_DIR
+ "/testSystemTools.bin");
+ const std::string testTxtFile(TEST_SYSTEMTOOLS_SOURCE_DIR
+ "/testSystemTools.cxx");
+ const std::string testNewDir(TEST_SYSTEMTOOLS_BINARY_DIR
+ "/testSystemToolsNewDir");
+ const std::string testNewFile(testNewDir + "/testNewFile.txt");
+
+ if (kwsys::SystemTools::DetectFileType(testNonExistingFile.c_str()) !=
+ kwsys::SystemTools::FileTypeUnknown) {
+ std::cerr << "Problem with DetectFileType - failed to detect type of: "
+ << testNonExistingFile << std::endl;
+ res = false;
+ }
+
+ if (kwsys::SystemTools::DetectFileType(testDotFile.c_str()) !=
+ kwsys::SystemTools::FileTypeUnknown) {
+ std::cerr << "Problem with DetectFileType - failed to detect type of: "
+ << testDotFile << std::endl;
+ res = false;
+ }
+
+ if (kwsys::SystemTools::DetectFileType(testBinFile.c_str()) !=
+ kwsys::SystemTools::FileTypeBinary) {
+ std::cerr << "Problem with DetectFileType - failed to detect type of: "
+ << testBinFile << std::endl;
+ res = false;
+ }
+
+ if (kwsys::SystemTools::DetectFileType(testTxtFile.c_str()) !=
+ kwsys::SystemTools::FileTypeText) {
+ std::cerr << "Problem with DetectFileType - failed to detect type of: "
+ << testTxtFile << std::endl;
+ res = false;
+ }
+
+ if (kwsys::SystemTools::FileLength(testBinFile) != 766) {
+ std::cerr << "Problem with FileLength - incorrect length for: "
+ << testBinFile << std::endl;
+ res = false;
+ }
+
+ kwsys::SystemTools::Stat_t buf;
+ if (kwsys::SystemTools::Stat(testTxtFile.c_str(), &buf) != 0) {
+ std::cerr << "Problem with Stat - unable to stat text file: "
+ << testTxtFile << std::endl;
+ res = false;
+ }
+
+ if (kwsys::SystemTools::Stat(testBinFile, &buf) != 0) {
+ std::cerr << "Problem with Stat - unable to stat bin file: " << testBinFile
+ << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::MakeDirectory(testNewDir)) {
+ std::cerr << "Problem with MakeDirectory for: " << testNewDir << std::endl;
+ res = false;
+ }
+ // calling it again should just return true
+ if (!kwsys::SystemTools::MakeDirectory(testNewDir)) {
+ std::cerr << "Problem with second call to MakeDirectory for: "
+ << testNewDir << std::endl;
+ res = false;
+ }
+ // calling with 0 pointer should return false
+ if (kwsys::SystemTools::MakeDirectory(nullptr)) {
+ std::cerr << "Problem with MakeDirectory(0)" << std::endl;
+ res = false;
+ }
+ // calling with an empty string should return false
+ if (kwsys::SystemTools::MakeDirectory(std::string())) {
+ std::cerr << "Problem with MakeDirectory(std::string())" << std::endl;
+ res = false;
+ }
+ // check existence
+ if (!kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) {
+ std::cerr << "Problem with FileExists as C string and not file for: "
+ << testNewDir << std::endl;
+ res = false;
+ }
+ // check existence
+ if (!kwsys::SystemTools::PathExists(testNewDir)) {
+ std::cerr << "Problem with PathExists for: " << testNewDir << std::endl;
+ res = false;
+ }
+ // remove it
+ if (!kwsys::SystemTools::RemoveADirectory(testNewDir)) {
+ std::cerr << "Problem with RemoveADirectory for: " << testNewDir
+ << std::endl;
+ res = false;
+ }
+ // check existence
+ if (kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) {
+ std::cerr << "After RemoveADirectory: "
+ << "Problem with FileExists as C string and not file for: "
+ << testNewDir << std::endl;
+ res = false;
+ }
+ // check existence
+ if (kwsys::SystemTools::PathExists(testNewDir)) {
+ std::cerr << "After RemoveADirectory: "
+ << "Problem with PathExists for: " << testNewDir << std::endl;
+ res = false;
+ }
+ // create it using the char* version
+ if (!kwsys::SystemTools::MakeDirectory(testNewDir.c_str())) {
+ std::cerr << "Problem with second call to MakeDirectory as C string for: "
+ << testNewDir << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::Touch(testNewFile, true)) {
+ std::cerr << "Problem with Touch for: " << testNewFile << std::endl;
+ res = false;
+ }
+ // calling MakeDirectory with something that is no file should fail
+ if (kwsys::SystemTools::MakeDirectory(testNewFile)) {
+ std::cerr << "Problem with to MakeDirectory for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ // calling with 0 pointer should return false
+ if (kwsys::SystemTools::FileExists(nullptr)) {
+ std::cerr << "Problem with FileExists(0)" << std::endl;
+ res = false;
+ }
+ if (kwsys::SystemTools::FileExists(nullptr, true)) {
+ std::cerr << "Problem with FileExists(0) as file" << std::endl;
+ res = false;
+ }
+ // calling with an empty string should return false
+ if (kwsys::SystemTools::FileExists(std::string())) {
+ std::cerr << "Problem with FileExists(std::string())" << std::endl;
+ res = false;
+ }
+ // FileExists(x, true) should return false on a directory
+ if (kwsys::SystemTools::FileExists(testNewDir, true)) {
+ std::cerr << "Problem with FileExists as file for: " << testNewDir
+ << std::endl;
+ res = false;
+ }
+ if (kwsys::SystemTools::FileExists(testNewDir.c_str(), true)) {
+ std::cerr << "Problem with FileExists as C string and file for: "
+ << testNewDir << std::endl;
+ res = false;
+ }
+ // FileExists(x, false) should return true even on a directory
+ if (!kwsys::SystemTools::FileExists(testNewDir, false)) {
+ std::cerr << "Problem with FileExists as not file for: " << testNewDir
+ << std::endl;
+ res = false;
+ }
+ if (!kwsys::SystemTools::FileExists(testNewDir.c_str(), false)) {
+ std::cerr << "Problem with FileExists as C string and not file for: "
+ << testNewDir << std::endl;
+ res = false;
+ }
+ // should work, was created as new file before
+ if (!kwsys::SystemTools::FileExists(testNewFile)) {
+ std::cerr << "Problem with FileExists for: " << testNewFile << std::endl;
+ res = false;
+ }
+ if (!kwsys::SystemTools::FileExists(testNewFile.c_str())) {
+ std::cerr << "Problem with FileExists as C string for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+ if (!kwsys::SystemTools::FileExists(testNewFile, true)) {
+ std::cerr << "Problem with FileExists as file for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+ if (!kwsys::SystemTools::FileExists(testNewFile.c_str(), true)) {
+ std::cerr << "Problem with FileExists as C string and file for: "
+ << testNewFile << std::endl;
+ res = false;
+ }
+
+ // calling with an empty string should return false
+ if (kwsys::SystemTools::PathExists(std::string())) {
+ std::cerr << "Problem with PathExists(std::string())" << std::endl;
+ res = false;
+ }
+ // PathExists(x) should return true on a directory
+ if (!kwsys::SystemTools::PathExists(testNewDir)) {
+ std::cerr << "Problem with PathExists for: " << testNewDir << std::endl;
+ res = false;
+ }
+ // should work, was created as new file before
+ if (!kwsys::SystemTools::PathExists(testNewFile)) {
+ std::cerr << "Problem with PathExists for: " << testNewFile << std::endl;
+ res = false;
+ }
+
+// Reset umask
+#if defined(_WIN32) && !defined(__CYGWIN__)
+ // NOTE: Windows doesn't support toggling _S_IREAD.
+ mode_t fullMask = _S_IWRITE;
+#else
+ // On a normal POSIX platform, we can toggle all permissions.
+ mode_t fullMask = S_IRWXU | S_IRWXG | S_IRWXO;
+#endif
+ mode_t orig_umask = umask(fullMask);
+
+ // Test file permissions without umask
+ mode_t origPerm, thisPerm;
+ if (!kwsys::SystemTools::GetPermissions(testNewFile, origPerm)) {
+ std::cerr << "Problem with GetPermissions (1) for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::SetPermissions(testNewFile, 0)) {
+ std::cerr << "Problem with SetPermissions (1) for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) {
+ std::cerr << "Problem with GetPermissions (2) for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ if ((thisPerm & fullMask) != 0) {
+ std::cerr << "SetPermissions failed to set permissions (1) for: "
+ << testNewFile << ": actual = " << thisPerm
+ << "; expected = " << 0 << std::endl;
+ res = false;
+ }
+
+ // While we're at it, check proper TestFileAccess functionality.
+ if (kwsys::SystemTools::TestFileAccess(testNewFile,
+ kwsys::TEST_FILE_WRITE)) {
+ std::cerr
+ << "TestFileAccess incorrectly indicated that this is a writable file:"
+ << testNewFile << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::TestFileAccess(testNewFile, kwsys::TEST_FILE_OK)) {
+ std::cerr
+ << "TestFileAccess incorrectly indicated that this file does not exist:"
+ << testNewFile << std::endl;
+ res = false;
+ }
+
+ // Test restoring/setting full permissions.
+ if (!kwsys::SystemTools::SetPermissions(testNewFile, fullMask)) {
+ std::cerr << "Problem with SetPermissions (2) for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) {
+ std::cerr << "Problem with GetPermissions (3) for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ if ((thisPerm & fullMask) != fullMask) {
+ std::cerr << "SetPermissions failed to set permissions (2) for: "
+ << testNewFile << ": actual = " << thisPerm
+ << "; expected = " << fullMask << std::endl;
+ res = false;
+ }
+
+ // Test setting file permissions while honoring umask
+ if (!kwsys::SystemTools::SetPermissions(testNewFile, fullMask, true)) {
+ std::cerr << "Problem with SetPermissions (3) for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::GetPermissions(testNewFile, thisPerm)) {
+ std::cerr << "Problem with GetPermissions (4) for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ if ((thisPerm & fullMask) != 0) {
+ std::cerr << "SetPermissions failed to honor umask for: " << testNewFile
+ << ": actual = " << thisPerm << "; expected = " << 0
+ << std::endl;
+ res = false;
+ }
+
+ // Restore umask
+ umask(orig_umask);
+
+ // Restore file permissions
+ if (!kwsys::SystemTools::SetPermissions(testNewFile, origPerm)) {
+ std::cerr << "Problem with SetPermissions (4) for: " << testNewFile
+ << std::endl;
+ res = false;
+ }
+
+ // Remove the test file
+ if (!kwsys::SystemTools::RemoveFile(testNewFile)) {
+ std::cerr << "Problem with RemoveFile: " << testNewFile << std::endl;
+ res = false;
+ }
+
+ std::string const testFileMissing(testNewDir + "/testMissingFile.txt");
+ if (!kwsys::SystemTools::RemoveFile(testFileMissing)) {
+ std::string const& msg = kwsys::SystemTools::GetLastSystemError();
+ std::cerr << "RemoveFile(\"" << testFileMissing << "\") failed: " << msg
+ << "\n";
+ res = false;
+ }
+
+ std::string const testFileMissingDir(testNewDir + "/missing/file.txt");
+ if (!kwsys::SystemTools::RemoveFile(testFileMissingDir)) {
+ std::string const& msg = kwsys::SystemTools::GetLastSystemError();
+ std::cerr << "RemoveFile(\"" << testFileMissingDir << "\") failed: " << msg
+ << "\n";
+ res = false;
+ }
+
+ kwsys::SystemTools::Touch(testNewFile, true);
+ if (!kwsys::SystemTools::RemoveADirectory(testNewDir)) {
+ std::cerr << "Problem with RemoveADirectory for: " << testNewDir
+ << std::endl;
+ res = false;
+ }
+
+#ifdef KWSYS_TEST_SYSTEMTOOLS_LONG_PATHS
+ // Perform the same file and directory creation and deletion tests but
+ // with paths > 256 characters in length.
+
+ const std::string testNewLongDir(
+ TEST_SYSTEMTOOLS_BINARY_DIR
+ "/"
+ "012345678901234567890123456789012345678901234567890123456789"
+ "012345678901234567890123456789012345678901234567890123456789"
+ "012345678901234567890123456789012345678901234567890123456789"
+ "012345678901234567890123456789012345678901234567890123456789"
+ "01234567890123");
+ const std::string testNewLongFile(
+ testNewLongDir +
+ "/"
+ "012345678901234567890123456789012345678901234567890123456789"
+ "012345678901234567890123456789012345678901234567890123456789"
+ "012345678901234567890123456789012345678901234567890123456789"
+ "012345678901234567890123456789012345678901234567890123456789"
+ "0123456789.txt");
+
+ if (!kwsys::SystemTools::MakeDirectory(testNewLongDir)) {
+ std::cerr << "Problem with MakeDirectory for: " << testNewLongDir
+ << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::Touch(testNewLongFile.c_str(), true)) {
+ std::cerr << "Problem with Touch for: " << testNewLongFile << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::RemoveFile(testNewLongFile)) {
+ std::cerr << "Problem with RemoveFile: " << testNewLongFile << std::endl;
+ res = false;
+ }
+
+ kwsys::SystemTools::Touch(testNewLongFile.c_str(), true);
+ if (!kwsys::SystemTools::RemoveADirectory(testNewLongDir)) {
+ std::cerr << "Problem with RemoveADirectory for: " << testNewLongDir
+ << std::endl;
+ res = false;
+ }
+#endif
+
+ return res;
+}
+
+static bool CheckStringOperations()
+{
+ bool res = true;
+
+ std::string test = "mary had a little lamb.";
+ if (kwsys::SystemTools::CapitalizedWords(test) !=
+ "Mary Had A Little Lamb.") {
+ std::cerr << "Problem with CapitalizedWords " << '"' << test << '"'
+ << std::endl;
+ res = false;
+ }
+
+ test = "Mary Had A Little Lamb.";
+ if (kwsys::SystemTools::UnCapitalizedWords(test) !=
+ "mary had a little lamb.") {
+ std::cerr << "Problem with UnCapitalizedWords " << '"' << test << '"'
+ << std::endl;
+ res = false;
+ }
+
+ test = "MaryHadTheLittleLamb.";
+ if (kwsys::SystemTools::AddSpaceBetweenCapitalizedWords(test) !=
+ "Mary Had The Little Lamb.") {
+ std::cerr << "Problem with AddSpaceBetweenCapitalizedWords " << '"' << test
+ << '"' << std::endl;
+ res = false;
+ }
+
+ char* cres =
+ kwsys::SystemTools::AppendStrings("Mary Had A", " Little Lamb.");
+ if (strcmp(cres, "Mary Had A Little Lamb.")) {
+ std::cerr << "Problem with AppendStrings "
+ << "\"Mary Had A\" \" Little Lamb.\"" << std::endl;
+ res = false;
+ }
+ delete[] cres;
+
+ cres = kwsys::SystemTools::AppendStrings("Mary Had", " A ", "Little Lamb.");
+ if (strcmp(cres, "Mary Had A Little Lamb.")) {
+ std::cerr << "Problem with AppendStrings "
+ << "\"Mary Had\" \" A \" \"Little Lamb.\"" << std::endl;
+ res = false;
+ }
+ delete[] cres;
+
+ if (kwsys::SystemTools::CountChar("Mary Had A Little Lamb.", 'a') != 3) {
+ std::cerr << "Problem with CountChar "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+
+ cres = kwsys::SystemTools::RemoveChars("Mary Had A Little Lamb.", "aeiou");
+ if (strcmp(cres, "Mry Hd A Lttl Lmb.")) {
+ std::cerr << "Problem with RemoveChars "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+ delete[] cres;
+
+ cres = kwsys::SystemTools::RemoveCharsButUpperHex("Mary Had A Little Lamb.");
+ if (strcmp(cres, "A")) {
+ std::cerr << "Problem with RemoveCharsButUpperHex "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+ delete[] cres;
+
+ char* cres2 = strdup("Mary Had A Little Lamb.");
+ kwsys::SystemTools::ReplaceChars(cres2, "aeiou", 'X');
+ if (strcmp(cres2, "MXry HXd A LXttlX LXmb.")) {
+ std::cerr << "Problem with ReplaceChars "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+ free(cres2);
+
+ if (!kwsys::SystemTools::StringStartsWith("Mary Had A Little Lamb.",
+ "Mary ")) {
+ std::cerr << "Problem with StringStartsWith "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+
+ if (!kwsys::SystemTools::StringEndsWith("Mary Had A Little Lamb.",
+ " Lamb.")) {
+ std::cerr << "Problem with StringEndsWith "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+
+ cres = kwsys::SystemTools::DuplicateString("Mary Had A Little Lamb.");
+ if (strcmp(cres, "Mary Had A Little Lamb.")) {
+ std::cerr << "Problem with DuplicateString "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+ delete[] cres;
+
+ test = "Mary Had A Little Lamb.";
+ if (kwsys::SystemTools::CropString(test, 13) != "Mary ...Lamb.") {
+ std::cerr << "Problem with CropString "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+
+ std::vector<std::string> lines;
+ kwsys::SystemTools::Split("Mary Had A Little Lamb.", lines, ' ');
+ if (lines[0] != "Mary" || lines[1] != "Had" || lines[2] != "A" ||
+ lines[3] != "Little" || lines[4] != "Lamb.") {
+ std::cerr << "Problem with Split "
+ << "\"Mary Had A Little Lamb.\"" << std::endl;
+ res = false;
+ }
+
+ if (kwsys::SystemTools::ConvertToWindowsOutputPath(
+ "L://Local Mojo/Hex Power Pack/Iffy Voodoo") !=
+ "\"L:\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"") {
+ std::cerr << "Problem with ConvertToWindowsOutputPath "
+ << "\"L://Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl;
+ res = false;
+ }
+
+ if (kwsys::SystemTools::ConvertToWindowsOutputPath(
+ "//grayson/Local Mojo/Hex Power Pack/Iffy Voodoo") !=
+ "\"\\\\grayson\\Local Mojo\\Hex Power Pack\\Iffy Voodoo\"") {
+ std::cerr << "Problem with ConvertToWindowsOutputPath "
+ << "\"//grayson/Local Mojo/Hex Power Pack/Iffy Voodoo\""
+ << std::endl;
+ res = false;
+ }
+
+ if (kwsys::SystemTools::ConvertToUnixOutputPath(
+ "//Local Mojo/Hex Power Pack/Iffy Voodoo") !=
+ "//Local\\ Mojo/Hex\\ Power\\ Pack/Iffy\\ Voodoo") {
+ std::cerr << "Problem with ConvertToUnixOutputPath "
+ << "\"//Local Mojo/Hex Power Pack/Iffy Voodoo\"" << std::endl;
+ res = false;
+ }
+
+ return res;
+}
+
+static bool CheckPutEnv(const std::string& env, const char* name,
+ const char* value)
+{
+ if (!kwsys::SystemTools::PutEnv(env)) {
+ std::cerr << "PutEnv(\"" << env << "\") failed!" << std::endl;
+ return false;
+ }
+ std::string v = "(null)";
+ kwsys::SystemTools::GetEnv(name, v);
+ if (v != value) {
+ std::cerr << "GetEnv(\"" << name << "\") returned \"" << v << "\", not \""
+ << value << "\"!" << std::endl;
+ return false;
+ }
+ return true;
+}
+
+static bool CheckUnPutEnv(const char* env, const char* name)
+{
+ if (!kwsys::SystemTools::UnPutEnv(env)) {
+ std::cerr << "UnPutEnv(\"" << env << "\") failed!" << std::endl;
+ return false;
+ }
+ std::string v;
+ if (kwsys::SystemTools::GetEnv(name, v)) {
+ std::cerr << "GetEnv(\"" << name << "\") returned \"" << v
+ << "\", not (null)!" << std::endl;
+ return false;
+ }
+ return true;
+}
+
+static bool CheckEnvironmentOperations()
+{
+ bool res = true;
+ res &= CheckPutEnv("A=B", "A", "B");
+ res &= CheckPutEnv("B=C", "B", "C");
+ res &= CheckPutEnv("C=D", "C", "D");
+ res &= CheckPutEnv("D=E", "D", "E");
+ res &= CheckUnPutEnv("A", "A");
+ res &= CheckUnPutEnv("B=", "B");
+ res &= CheckUnPutEnv("C=D", "C");
+ /* Leave "D=E" in environment so a memory checker can test for leaks. */
+ return res;
+}
+
+static bool CheckRelativePath(const std::string& local,
+ const std::string& remote,
+ const std::string& expected)
+{
+ std::string result = kwsys::SystemTools::RelativePath(local, remote);
+ if (!kwsys::SystemTools::ComparePath(expected, result)) {
+ std::cerr << "RelativePath(" << local << ", " << remote << ") yielded "
+ << result << " instead of " << expected << std::endl;
+ return false;
+ }
+ return true;
+}
+
+static bool CheckRelativePaths()
+{
+ bool res = true;
+ res &= CheckRelativePath("/usr/share", "/bin/bash", "../../bin/bash");
+ res &= CheckRelativePath("/usr/./share/", "/bin/bash", "../../bin/bash");
+ res &= CheckRelativePath("/usr//share/", "/bin/bash", "../../bin/bash");
+ res &=
+ CheckRelativePath("/usr/share/../bin/", "/bin/bash", "../../bin/bash");
+ res &= CheckRelativePath("/usr/share", "/usr/share//bin", "bin");
+ return res;
+}
+
+static bool CheckCollapsePath(const std::string& path,
+ const std::string& expected,
+ const char* base = nullptr)
+{
+ std::string result = kwsys::SystemTools::CollapseFullPath(path, base);
+ if (!kwsys::SystemTools::ComparePath(expected, result)) {
+ std::cerr << "CollapseFullPath(" << path << ") yielded " << result
+ << " instead of " << expected << std::endl;
+ return false;
+ }
+ return true;
+}
+
+static bool CheckCollapsePath()
+{
+ bool res = true;
+ res &= CheckCollapsePath("/usr/share/*", "/usr/share/*");
+ res &= CheckCollapsePath("C:/Windows/*", "C:/Windows/*");
+ res &= CheckCollapsePath("/usr/share/../lib", "/usr/lib");
+ res &= CheckCollapsePath("/usr/share/./lib", "/usr/share/lib");
+ res &= CheckCollapsePath("/usr/share/../../lib", "/lib");
+ res &= CheckCollapsePath("/usr/share/.././../lib", "/lib");
+ res &= CheckCollapsePath("/../lib", "/lib");
+ res &= CheckCollapsePath("/../lib/", "/lib");
+ res &= CheckCollapsePath("/", "/");
+ res &= CheckCollapsePath("C:/", "C:/");
+ res &= CheckCollapsePath("C:/../", "C:/");
+ res &= CheckCollapsePath("C:/../../", "C:/");
+ res &= CheckCollapsePath("../b", "../../b", "../");
+ res &= CheckCollapsePath("../a/../b", "../b", "../rel");
+ res &= CheckCollapsePath("a/../b", "../rel/b", "../rel");
+ return res;
+}
+
+static std::string StringVectorToString(const std::vector<std::string>& vec)
+{
+ std::stringstream ss;
+ ss << "vector(";
+ for (std::vector<std::string>::const_iterator i = vec.begin();
+ i != vec.end(); ++i) {
+ if (i != vec.begin()) {
+ ss << ", ";
+ }
+ ss << *i;
+ }
+ ss << ")";
+ return ss.str();
+}
+
+static bool CheckGetPath()
+{
+ const char* envName = "S";
+#ifdef _WIN32
+ const char* envValue = "C:\\Somewhere\\something;D:\\Temp";
+#else
+ const char* envValue = "/Somewhere/something:/tmp";
+#endif
+ const char* registryPath = "[HKEY_LOCAL_MACHINE\\SOFTWARE\\MyApp; MyKey]";
+
+ std::vector<std::string> originalPaths;
+ originalPaths.push_back(registryPath);
+
+ std::vector<std::string> expectedPaths;
+ expectedPaths.push_back(registryPath);
+#ifdef _WIN32
+ expectedPaths.push_back("C:/Somewhere/something");
+ expectedPaths.push_back("D:/Temp");
+#else
+ expectedPaths.push_back("/Somewhere/something");
+ expectedPaths.push_back("/tmp");
+#endif
+
+ bool res = true;
+ res &= CheckPutEnv(std::string(envName) + "=" + envValue, envName, envValue);
+
+ std::vector<std::string> paths = originalPaths;
+ kwsys::SystemTools::GetPath(paths, envName);
+
+ if (paths != expectedPaths) {
+ std::cerr << "GetPath(" << StringVectorToString(originalPaths) << ", "
+ << envName << ") yielded " << StringVectorToString(paths)
+ << " instead of " << StringVectorToString(expectedPaths)
+ << std::endl;
+ res = false;
+ }
+
+ res &= CheckUnPutEnv(envName, envName);
+ return res;
+}
+
+static bool CheckGetFilenameName()
+{
+ const char* windowsFilepath = "C:\\somewhere\\something";
+ const char* unixFilepath = "/somewhere/something";
+
+#if defined(_WIN32) || defined(KWSYS_SYSTEMTOOLS_SUPPORT_WINDOWS_SLASHES)
+ std::string expectedWindowsFilename = "something";
+#else
+ std::string expectedWindowsFilename = "C:\\somewhere\\something";
+#endif
+ std::string expectedUnixFilename = "something";
+
+ bool res = true;
+ std::string filename = kwsys::SystemTools::GetFilenameName(windowsFilepath);
+ if (filename != expectedWindowsFilename) {
+ std::cerr << "GetFilenameName(" << windowsFilepath << ") yielded "
+ << filename << " instead of " << expectedWindowsFilename
+ << std::endl;
+ res = false;
+ }
+
+ filename = kwsys::SystemTools::GetFilenameName(unixFilepath);
+ if (filename != expectedUnixFilename) {
+ std::cerr << "GetFilenameName(" << unixFilepath << ") yielded " << filename
+ << " instead of " << expectedUnixFilename << std::endl;
+ res = false;
+ }
+ return res;
+}
+
+static bool CheckFind()
+{
+ bool res = true;
+ const std::string testFindFileName("testFindFile.txt");
+ const std::string testFindFile(TEST_SYSTEMTOOLS_BINARY_DIR "/" +
+ testFindFileName);
+
+ if (!kwsys::SystemTools::Touch(testFindFile, true)) {
+ std::cerr << "Problem with Touch for: " << testFindFile << std::endl;
+ // abort here as the existence of the file only makes the test meaningful
+ return false;
+ }
+
+ std::vector<std::string> searchPaths;
+ searchPaths.push_back(TEST_SYSTEMTOOLS_BINARY_DIR);
+ if (kwsys::SystemTools::FindFile(testFindFileName, searchPaths, true)
+ .empty()) {
+ std::cerr << "Problem with FindFile without system paths for: "
+ << testFindFileName << std::endl;
+ res = false;
+ }
+ if (kwsys::SystemTools::FindFile(testFindFileName, searchPaths, false)
+ .empty()) {
+ std::cerr << "Problem with FindFile with system paths for: "
+ << testFindFileName << std::endl;
+ res = false;
+ }
+
+ return res;
+}
+
+static bool CheckIsSubDirectory()
+{
+ bool res = true;
+
+ if (kwsys::SystemTools::IsSubDirectory("/foo", "/") == false) {
+ std::cerr << "Problem with IsSubDirectory (root - unix): " << std::endl;
+ res = false;
+ }
+ if (kwsys::SystemTools::IsSubDirectory("c:/foo", "c:/") == false) {
+ std::cerr << "Problem with IsSubDirectory (root - dos): " << std::endl;
+ res = false;
+ }
+ if (kwsys::SystemTools::IsSubDirectory("/foo/bar", "/foo") == false) {
+ std::cerr << "Problem with IsSubDirectory (deep): " << std::endl;
+ res = false;
+ }
+ if (kwsys::SystemTools::IsSubDirectory("/foo", "/foo") == true) {
+ std::cerr << "Problem with IsSubDirectory (identity): " << std::endl;
+ res = false;
+ }
+ if (kwsys::SystemTools::IsSubDirectory("/fooo", "/foo") == true) {
+ std::cerr << "Problem with IsSubDirectory (substring): " << std::endl;
+ res = false;
+ }
+ if (kwsys::SystemTools::IsSubDirectory("/foo/", "/foo") == true) {
+ std::cerr << "Problem with IsSubDirectory (prepended slash): "
+ << std::endl;
+ res = false;
+ }
+
+ return res;
+}
+
+static bool CheckGetLineFromStream()
+{
+ const std::string fileWithFiveCharsOnFirstLine(TEST_SYSTEMTOOLS_SOURCE_DIR
+ "/README.rst");
+
+ kwsys::ifstream file(fileWithFiveCharsOnFirstLine.c_str(), std::ios::in);
+
+ if (!file) {
+ std::cerr << "Problem opening: " << fileWithFiveCharsOnFirstLine
+ << std::endl;
+ return false;
+ }
+
+ std::string line;
+ bool has_newline = false;
+ bool result;
+
+ file.seekg(0, std::ios::beg);
+ result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1);
+ if (!result || line.size() != 5) {
+ std::cerr << "First line does not have five characters: " << line.size()
+ << std::endl;
+ return false;
+ }
+
+ file.seekg(0, std::ios::beg);
+ result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1);
+ if (!result || line.size() != 5) {
+ std::cerr << "First line does not have five characters after rewind: "
+ << line.size() << std::endl;
+ return false;
+ }
+
+ bool ret = true;
+
+ for (size_t size = 1; size <= 5; ++size) {
+ file.seekg(0, std::ios::beg);
+ result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline,
+ static_cast<long>(size));
+ if (!result || line.size() != size) {
+ std::cerr << "Should have read " << size << " characters but got "
+ << line.size() << std::endl;
+ ret = false;
+ }
+ }
+
+ return ret;
+}
+
+static bool CheckGetLineFromStreamLongLine()
+{
+ const std::string fileWithLongLine("longlines.txt");
+ std::string firstLine, secondLine;
+ // First line: large buffer, containing a carriage return for some reason.
+ firstLine.assign(2050, ' ');
+ firstLine += "\rfirst";
+ secondLine.assign(2050, 'y');
+ secondLine += "second";
+
+ // Create file with long lines.
+ {
+ kwsys::ofstream out(fileWithLongLine.c_str(), std::ios::binary);
+ if (!out) {
+ std::cerr << "Problem opening for write: " << fileWithLongLine
+ << std::endl;
+ return false;
+ }
+ out << firstLine << "\r\n\n" << secondLine << "\n";
+ }
+
+ kwsys::ifstream file(fileWithLongLine.c_str(), std::ios::binary);
+ if (!file) {
+ std::cerr << "Problem opening: " << fileWithLongLine << std::endl;
+ return false;
+ }
+
+ std::string line;
+ bool has_newline = false;
+ bool result;
+
+ // Read first line.
+ result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1);
+ if (!result || line != firstLine) {
+ std::cerr << "First line does not match, expected " << firstLine.size()
+ << " characters, got " << line.size() << std::endl;
+ return false;
+ }
+ if (!has_newline) {
+ std::cerr << "Expected new line to be read from first line" << std::endl;
+ return false;
+ }
+
+ // Read empty line.
+ has_newline = false;
+ result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1);
+ if (!result || !line.empty()) {
+ std::cerr << "Expected successful read with an empty line, got "
+ << line.size() << " characters" << std::endl;
+ return false;
+ }
+ if (!has_newline) {
+ std::cerr << "Expected new line to be read for an empty line" << std::endl;
+ return false;
+ }
+
+ // Read second line.
+ has_newline = false;
+ result = kwsys::SystemTools::GetLineFromStream(file, line, &has_newline, -1);
+ if (!result || line != secondLine) {
+ std::cerr << "Second line does not match, expected " << secondLine.size()
+ << " characters, got " << line.size() << std::endl;
+ return false;
+ }
+ if (!has_newline) {
+ std::cerr << "Expected new line to be read from second line" << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+static bool writeFile(const char* fileName, const char* data)
+{
+ kwsys::ofstream out(fileName, std::ios::binary);
+ out << data;
+ if (!out) {
+ std::cerr << "Failed to write file: " << fileName << std::endl;
+ return false;
+ }
+ return true;
+}
+
+static std::string readFile(const char* fileName)
+{
+ kwsys::ifstream in(fileName, std::ios::binary);
+ std::stringstream sstr;
+ sstr << in.rdbuf();
+ std::string data = sstr.str();
+ if (!in) {
+ std::cerr << "Failed to read file: " << fileName << std::endl;
+ return std::string();
+ }
+ return data;
+}
+
+struct
+{
+ const char* a;
+ const char* b;
+ bool differ;
+} diff_test_cases[] = { { "one", "one", false },
+ { "one", "two", true },
+ { "", "", false },
+ { "\n", "\r\n", false },
+ { "one\n", "one\n", false },
+ { "one\r\n", "one\n", false },
+ { "one\n", "one", false },
+ { "one\ntwo", "one\ntwo", false },
+ { "one\ntwo", "one\r\ntwo", false } };
+
+static bool CheckTextFilesDiffer()
+{
+ const int num_test_cases =
+ sizeof(diff_test_cases) / sizeof(diff_test_cases[0]);
+ for (int i = 0; i < num_test_cases; ++i) {
+ if (!writeFile("file_a", diff_test_cases[i].a) ||
+ !writeFile("file_b", diff_test_cases[i].b)) {
+ return false;
+ }
+ if (kwsys::SystemTools::TextFilesDiffer("file_a", "file_b") !=
+ diff_test_cases[i].differ) {
+ std::cerr << "Incorrect TextFilesDiffer result for test case " << i + 1
+ << "." << std::endl;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool CheckCopyFileIfDifferent()
+{
+ bool ret = true;
+ const int num_test_cases =
+ sizeof(diff_test_cases) / sizeof(diff_test_cases[0]);
+ for (int i = 0; i < num_test_cases; ++i) {
+ if (!writeFile("file_a", diff_test_cases[i].a) ||
+ !writeFile("file_b", diff_test_cases[i].b)) {
+ return false;
+ }
+ const char* cptarget =
+ i < 4 ? TEST_SYSTEMTOOLS_BINARY_DIR "/file_b" : "file_b";
+ if (!kwsys::SystemTools::CopyFileIfDifferent("file_a", cptarget)) {
+ std::cerr << "CopyFileIfDifferent() returned false for test case "
+ << i + 1 << "." << std::endl;
+ ret = false;
+ continue;
+ }
+ std::string bdata = readFile("file_b");
+ if (diff_test_cases[i].a != bdata) {
+ std::cerr << "Incorrect CopyFileIfDifferent file contents in test case "
+ << i + 1 << "." << std::endl;
+ ret = false;
+ continue;
+ }
+ }
+
+ return ret;
+}
+
+int testSystemTools(int, char* [])
+{
+ bool res = true;
+
+ int cc;
+ for (cc = 0; toUnixPaths[cc][0]; cc++) {
+ res &= CheckConvertToUnixSlashes(toUnixPaths[cc][0], toUnixPaths[cc][1]);
+ }
+
+ // Special check for ~
+ std::string output;
+ if (kwsys::SystemTools::GetEnv("HOME", output)) {
+ output += "/foo bar/lala";
+ res &= CheckConvertToUnixSlashes("~/foo bar/lala", output);
+ }
+
+ for (cc = 0; checkEscapeChars[cc][0]; cc++) {
+ res &= CheckEscapeChars(checkEscapeChars[cc][0], checkEscapeChars[cc][1],
+ *checkEscapeChars[cc][2], checkEscapeChars[cc][3]);
+ }
+
+ res &= CheckFileOperations();
+
+ res &= CheckStringOperations();
+
+ res &= CheckEnvironmentOperations();
+
+ res &= CheckRelativePaths();
+
+ res &= CheckCollapsePath();
+
+ res &= CheckGetPath();
+
+ res &= CheckFind();
+
+ res &= CheckIsSubDirectory();
+
+ res &= CheckGetLineFromStream();
+
+ res &= CheckGetLineFromStreamLongLine();
+
+ res &= CheckGetFilenameName();
+
+ res &= CheckTextFilesDiffer();
+
+ res &= CheckCopyFileIfDifferent();
+
+ return res ? 0 : 1;
+}
diff --git a/test/API/driver/kwsys/testSystemTools.h.in b/test/API/driver/kwsys/testSystemTools.h.in
new file mode 100644
index 0000000..022e36e
--- /dev/null
+++ b/test/API/driver/kwsys/testSystemTools.h.in
@@ -0,0 +1,12 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#ifndef @KWSYS_NAMESPACE@_testSystemtools_h
+#define @KWSYS_NAMESPACE@_testSystemtools_h
+
+#define EXECUTABLE_OUTPUT_PATH "@CMAKE_CURRENT_BINARY_DIR@"
+
+#define TEST_SYSTEMTOOLS_SOURCE_DIR "@TEST_SYSTEMTOOLS_SOURCE_DIR@"
+#define TEST_SYSTEMTOOLS_BINARY_DIR "@TEST_SYSTEMTOOLS_BINARY_DIR@"
+#cmakedefine KWSYS_TEST_SYSTEMTOOLS_LONG_PATHS
+
+#endif
diff --git a/test/API/driver/kwsys/testTerminal.c b/test/API/driver/kwsys/testTerminal.c
new file mode 100644
index 0000000..652830c
--- /dev/null
+++ b/test/API/driver/kwsys/testTerminal.c
@@ -0,0 +1,22 @@
+/* Distributed under the OSI-approved BSD 3-Clause License. See accompanying
+ file Copyright.txt or https://cmake.org/licensing#kwsys for details. */
+#include "kwsysPrivate.h"
+#include KWSYS_HEADER(Terminal.h)
+
+/* Work-around CMake dependency scanning limitation. This must
+ duplicate the above list of headers. */
+#if 0
+# include "Terminal.h.in"
+#endif
+
+int testTerminal(int argc, char* argv[])
+{
+ (void)argc;
+ (void)argv;
+ kwsysTerminal_cfprintf(kwsysTerminal_Color_ForegroundYellow |
+ kwsysTerminal_Color_BackgroundBlue |
+ kwsysTerminal_Color_AssumeTTY,
+ stdout, "Hello %s!", "World");
+ fprintf(stdout, "\n");
+ return 0;
+}
diff --git a/test/API/driver/kwsys/update-gitsetup.bash b/test/API/driver/kwsys/update-gitsetup.bash
new file mode 100644
index 0000000..aa83cb8
--- /dev/null
+++ b/test/API/driver/kwsys/update-gitsetup.bash
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+
+set -e
+set -x
+shopt -s dotglob
+
+readonly name="GitSetup"
+readonly ownership="GitSetup Upstream <kwrobot@kitware.com>"
+readonly subtree="GitSetup"
+readonly repo="https://gitlab.kitware.com/utils/gitsetup.git"
+readonly tag="setup"
+readonly shortlog=false
+readonly paths="
+"
+
+extract_source () {
+ git_archive
+}
+
+. "${BASH_SOURCE%/*}/update-third-party.bash"
diff --git a/test/API/driver/kwsys/update-third-party.bash b/test/API/driver/kwsys/update-third-party.bash
new file mode 100644
index 0000000..3b8358e
--- /dev/null
+++ b/test/API/driver/kwsys/update-third-party.bash
@@ -0,0 +1,169 @@
+#=============================================================================
+# Copyright 2015-2016 Kitware, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#=============================================================================
+
+########################################################################
+# Script for updating third party packages.
+#
+# This script should be sourced in a project-specific script which sets
+# the following variables:
+#
+# name
+# The name of the project.
+# ownership
+# A git author name/email for the commits.
+# subtree
+# The location of the thirdparty package within the main source
+# tree.
+# repo
+# The git repository to use as upstream.
+# tag
+# The tag, branch or commit hash to use for upstream.
+# shortlog
+# Optional. Set to 'true' to get a shortlog in the commit message.
+#
+# Additionally, an "extract_source" function must be defined. It will be
+# run within the checkout of the project on the requested tag. It should
+# should place the desired tree into $extractdir/$name-reduced. This
+# directory will be used as the newest commit for the project.
+#
+# For convenience, the function may use the "git_archive" function which
+# does a standard "git archive" extraction using the (optional) "paths"
+# variable to only extract a subset of the source tree.
+########################################################################
+
+########################################################################
+# Utility functions
+########################################################################
+git_archive () {
+ git archive --worktree-attributes --prefix="$name-reduced/" HEAD -- $paths | \
+ tar -C "$extractdir" -x
+}
+
+die () {
+ echo >&2 "$@"
+ exit 1
+}
+
+warn () {
+ echo >&2 "warning: $@"
+}
+
+readonly regex_date='20[0-9][0-9]-[0-9][0-9]-[0-9][0-9]'
+readonly basehash_regex="$name $regex_date ([0-9a-f]*)"
+readonly basehash="$( git rev-list --author="$ownership" --grep="$basehash_regex" -n 1 HEAD )"
+readonly upstream_old_short="$( git cat-file commit "$basehash" | sed -n '/'"$basehash_regex"'/ {s/.*(//;s/)//;p}' | egrep '^[0-9a-f]+$' )"
+
+########################################################################
+# Sanity checking
+########################################################################
+[ -n "$name" ] || \
+ die "'name' is empty"
+[ -n "$ownership" ] || \
+ die "'ownership' is empty"
+[ -n "$subtree" ] || \
+ die "'subtree' is empty"
+[ -n "$repo" ] || \
+ die "'repo' is empty"
+[ -n "$tag" ] || \
+ die "'tag' is empty"
+[ -n "$basehash" ] || \
+ warn "'basehash' is empty; performing initial import"
+readonly do_shortlog="${shortlog-false}"
+
+readonly workdir="$PWD/work"
+readonly upstreamdir="$workdir/upstream"
+readonly extractdir="$workdir/extract"
+
+[ -d "$workdir" ] && \
+ die "error: workdir '$workdir' already exists"
+
+trap "rm -rf '$workdir'" EXIT
+
+# Get upstream
+git clone "$repo" "$upstreamdir"
+
+if [ -n "$basehash" ]; then
+ # Use the existing package's history
+ git worktree add "$extractdir" "$basehash"
+ # Clear out the working tree
+ pushd "$extractdir"
+ git ls-files | xargs rm -v
+ find . -type d -empty -delete
+ popd
+else
+ # Create a repo to hold this package's history
+ mkdir -p "$extractdir"
+ git -C "$extractdir" init
+fi
+
+# Extract the subset of upstream we care about
+pushd "$upstreamdir"
+git checkout "$tag"
+readonly upstream_hash="$( git rev-parse HEAD )"
+readonly upstream_hash_short="$( git rev-parse --short=8 "$upstream_hash" )"
+readonly upstream_datetime="$( git rev-list "$upstream_hash" --format='%ci' -n 1 | grep -e "^$regex_date" )"
+readonly upstream_date="$( echo "$upstream_datetime" | grep -o -e "$regex_date" )"
+if $do_shortlog && [ -n "$basehash" ]; then
+ readonly commit_shortlog="
+
+Upstream Shortlog
+-----------------
+
+$( git shortlog --no-merges --abbrev=8 --format='%h %s' "$upstream_old_short".."$upstream_hash" )"
+else
+ readonly commit_shortlog=""
+fi
+extract_source || \
+ die "failed to extract source"
+popd
+
+[ -d "$extractdir/$name-reduced" ] || \
+ die "expected directory to extract does not exist"
+readonly commit_summary="$name $upstream_date ($upstream_hash_short)"
+
+# Commit the subset
+pushd "$extractdir"
+mv -v "$name-reduced/"* .
+rmdir "$name-reduced/"
+git add -A .
+git commit -n --author="$ownership" --date="$upstream_datetime" -F - <<-EOF
+$commit_summary
+
+Code extracted from:
+
+ $repo
+
+at commit $upstream_hash ($tag).$commit_shortlog
+EOF
+git branch -f "upstream-$name"
+popd
+
+# Merge the subset into this repository
+if [ -n "$basehash" ]; then
+ git merge --log -s recursive "-Xsubtree=$subtree/" --no-commit "upstream-$name"
+else
+ unrelated_histories_flag=""
+ if git merge --help | grep -q -e allow-unrelated-histories; then
+ unrelated_histories_flag="--allow-unrelated-histories "
+ fi
+ readonly unrelated_histories_flag
+
+ git fetch "$extractdir" "upstream-$name:upstream-$name"
+ git merge --log -s ours --no-commit $unrelated_histories_flag "upstream-$name"
+ git read-tree -u --prefix="$subtree/" "upstream-$name"
+fi
+git commit --no-edit
+git branch -d "upstream-$name"
diff --git a/test/API/tarray.c b/test/API/tarray.c
new file mode 100644
index 0000000..214a022
--- /dev/null
+++ b/test/API/tarray.c
@@ -0,0 +1,2250 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tarray
+ *
+ * Test the Array Datatype functionality
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+/* #include "H5srcdir.h" */
+
+#define FILENAME "tarray1.h5"
+#define TESTFILE "tarrold.h5"
+
+/* 1-D array datatype */
+#define ARRAY1_RANK 1
+#define ARRAY1_DIM1 4
+
+/* 3-D array datatype */
+#define ARRAY2_RANK 3
+#define ARRAY2_DIM1 3
+#define ARRAY2_DIM2 4
+#define ARRAY2_DIM3 5
+
+/* 2-D array datatype */
+#define ARRAY3_RANK 2
+#define ARRAY3_DIM1 6
+#define ARRAY3_DIM2 3
+
+/* 1-D dataset with fixed dimensions */
+#define SPACE1_RANK 1
+#define SPACE1_DIM1 4
+
+/* Parameters used with the test_array_bkg() test */
+#define FIELDNAME "ArrayofStructures"
+#define LENGTH 5
+#define ALEN 10
+#define RANK 1
+#define NMAX 100
+
+/* Struct used with test_array_bkg() test */
+typedef struct {
+ int nsubfields;
+ char *name[NMAX];
+ size_t offset[NMAX];
+ hid_t datatype[NMAX];
+
+} CmpDTSinfo;
+
+/* Forward declarations for custom vlen memory manager functions */
+void *test_array_alloc_custom(size_t size, void *info);
+void test_array_free_custom(void *mem, void *info);
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_atomic_1d
+ *
+ * Purpose: Test basic array datatype code.
+ * Tests 1-D array of atomic datatypes.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_atomic_1d(void)
+{
+ int wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */
+ int rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hsize_t sdims1[] = {SPACE1_DIM1};
+ hsize_t tdims1[] = {ARRAY1_DIM1};
+ int ndims; /* Array rank for reading */
+ hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */
+ int i, j; /* counting variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 1-D Array of Atomic Datatypes Functionality\n"));
+
+ /* Allocate and initialize array data to write */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++)
+ wdata[i][j] = i * 10 + j;
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tarray_create2(H5T_NATIVE_INT, ARRAY1_RANK, tdims1);
+ CHECK(tid1, FAIL, "H5Tarray_create2");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get the datatype */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(tid1);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(tid1, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++)
+ if (wdata[i][j] != rdata[i][j]) {
+ TestErrPrintf("Array data information doesn't match!, wdata[%d][%d]=%d, rdata[%d][%d]=%d\n",
+ (int)i, (int)j, (int)wdata[i][j], (int)i, (int)j, (int)rdata[i][j]);
+ continue;
+ } /* end if */
+
+ /* Close Datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_array_atomic_1d() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_funcs
+ *
+ * Purpose: Test some type functions that are and aren't supposed to
+ * work with array type.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_funcs(void)
+{
+ hid_t type; /* Datatype ID */
+ hsize_t tdims1[] = {ARRAY1_DIM1};
+ size_t size;
+ H5T_pad_t inpad;
+ H5T_norm_t norm;
+ H5T_cset_t cset;
+ H5T_str_t strpad;
+ herr_t ret; /* Generic return value */
+
+ /* Create a datatype to refer to */
+ type = H5Tarray_create2(H5T_IEEE_F32BE, ARRAY1_RANK, tdims1);
+ CHECK(type, FAIL, "H5Tarray_create2");
+
+ size = H5Tget_precision(type);
+ CHECK(size, 0, "H5Tget_precision");
+
+ size = H5Tget_size(type);
+ CHECK(size, 0, "H5Tget_size");
+
+ size = H5Tget_ebias(type);
+ CHECK(size, 0, "H5Tget_ebias");
+
+ ret = H5Tset_pad(type, H5T_PAD_ZERO, H5T_PAD_ONE);
+ CHECK(ret, FAIL, "H5Tset_pad");
+
+ inpad = H5Tget_inpad(type);
+ CHECK(inpad, FAIL, "H5Tget_inpad");
+
+ norm = H5Tget_norm(type);
+ CHECK(norm, FAIL, "H5Tget_norm");
+
+ ret = H5Tset_offset(type, (size_t)16);
+ CHECK(ret, FAIL, "H5Tset_offset");
+
+ H5E_BEGIN_TRY
+ {
+ cset = H5Tget_cset(type);
+ }
+ H5E_END_TRY;
+ VERIFY(cset, FAIL, "H5Tget_cset");
+
+ H5E_BEGIN_TRY
+ {
+ strpad = H5Tget_strpad(type);
+ }
+ H5E_END_TRY;
+ VERIFY(strpad, FAIL, "H5Tget_strpad");
+
+ /* Close datatype */
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+} /* end test_array_funcs() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_atomic_3d
+ *
+ * Purpose: Test basic array datatype code.
+ * Tests 3-D array of atomic datatypes.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_atomic_3d(void)
+{
+ int wdata[SPACE1_DIM1][ARRAY2_DIM1][ARRAY2_DIM2][ARRAY2_DIM3]; /* Information to write */
+ int rdata[SPACE1_DIM1][ARRAY2_DIM1][ARRAY2_DIM2][ARRAY2_DIM3]; /* Information read in */
+ hid_t fid; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hsize_t sdims1[] = {SPACE1_DIM1};
+ hsize_t tdims2[] = {ARRAY2_DIM1, ARRAY2_DIM2, ARRAY2_DIM3};
+ int ndims; /* Array rank for reading */
+ hsize_t rdims2[H5S_MAX_RANK]; /* Array dimensions for reading */
+ int i, j, k, l; /* counting variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 3-D Array of Atomic Datatypes Functionality\n"));
+
+ /* Allocate and initialize array data to write */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY2_DIM1; j++)
+ for (k = 0; k < ARRAY2_DIM2; k++)
+ for (l = 0; l < ARRAY2_DIM3; l++)
+ wdata[i][j][k][l] = i * 1000 + j * 100 + k * 10 + l;
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid = H5Screate_simple(SPACE1_RANK, sdims1, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid = H5Tarray_create2(H5T_NATIVE_INT, ARRAY2_RANK, tdims2);
+ CHECK(tid, FAIL, "H5Tarray_create2");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, "Dataset1", tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get the datatype */
+ tid = H5Dget_type(dataset);
+ CHECK(tid, FAIL, "H5Dget_type");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(tid);
+ VERIFY(ndims, ARRAY2_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(tid, rdims2);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims2[i] != tdims2[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims2[%d]=%d, tdims2[%d]=%d\n",
+ (int)i, (int)rdims2[i], (int)i, (int)tdims2[i]);
+ continue;
+ } /* end if */
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY2_DIM1; j++)
+ for (k = 0; k < ARRAY2_DIM2; k++)
+ for (l = 0; l < ARRAY2_DIM3; l++)
+ if (wdata[i][j][k][l] != rdata[i][j][k][l]) {
+ TestErrPrintf("Array data information doesn't match!, wdata[%d][%d][%d][%d]=%d, "
+ "rdata[%d][%d][%d][%d]=%d\n",
+ (int)i, (int)j, (int)k, (int)l, (int)wdata[i][j][k][l], (int)i, (int)j,
+ (int)k, (int)l, (int)rdata[i][j][k][l]);
+ continue;
+ } /* end if */
+
+ /* Close Datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_array_atomic_3d() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_array_atomic
+ *
+ * Purpose: Test basic array datatype code.
+ * Tests 1-D array 2-D arrays of atomic datatypes.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_array_atomic(void)
+{
+ int wdata[SPACE1_DIM1][ARRAY1_DIM1][ARRAY3_DIM1][ARRAY3_DIM2]; /* Information to write */
+ int rdata[SPACE1_DIM1][ARRAY1_DIM1][ARRAY3_DIM1][ARRAY3_DIM2]; /* Information read in */
+ hid_t fid; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid1; /* 1-D array Datatype ID */
+ hid_t tid2; /* 2-D array Datatype ID */
+ hsize_t sdims1[] = {SPACE1_DIM1};
+ hsize_t tdims1[] = {ARRAY1_DIM1};
+ hsize_t tdims2[] = {ARRAY3_DIM1, ARRAY3_DIM2};
+ int ndims1; /* Array rank for reading */
+ int ndims2; /* Array rank for reading */
+ hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */
+ hsize_t rdims2[H5S_MAX_RANK]; /* Array dimensions for reading */
+ int i, j, k, l; /* counting variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 1-D Array 2-D Arrays of Atomic Datatypes Functionality\n"));
+
+ /* Allocate and initialize array data to write */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++)
+ for (k = 0; k < ARRAY3_DIM1; k++)
+ for (l = 0; l < ARRAY3_DIM2; l++)
+ wdata[i][j][k][l] = i * 1000 + j * 100 + k * 10 + l;
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid = H5Screate_simple(SPACE1_RANK, sdims1, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create a 2-D datatype to refer to */
+ tid2 = H5Tarray_create2(H5T_NATIVE_INT, ARRAY3_RANK, tdims2);
+ CHECK(tid2, FAIL, "H5Tarray_create2");
+
+ /* Create a 1-D datatype to refer to */
+ tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1);
+ CHECK(tid1, FAIL, "H5Tarray_create2");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, "Dataset1", tid1, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatypes */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get the 1-D datatype */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Check the 1-D array rank */
+ ndims1 = H5Tget_array_ndims(tid1);
+ VERIFY(ndims1, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the 1-D array dimensions */
+ ret = H5Tget_array_dims2(tid1, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims1; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Get the 2-D datatype */
+ tid2 = H5Tget_super(tid1);
+ CHECK(tid2, FAIL, "H5Tget_super");
+
+ /* Check the 2-D array rank */
+ ndims2 = H5Tget_array_ndims(tid2);
+ VERIFY(ndims2, ARRAY3_RANK, "H5Tget_array_ndims");
+
+ /* Get the 2-D array dimensions */
+ ret = H5Tget_array_dims2(tid2, rdims2);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims2; i++)
+ if (rdims2[i] != tdims2[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims2[%d]=%d, tdims2[%d]=%d\n",
+ (int)i, (int)rdims2[i], (int)i, (int)tdims2[i]);
+ continue;
+ } /* end if */
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++)
+ for (k = 0; k < ARRAY3_DIM1; k++)
+ for (l = 0; l < ARRAY3_DIM2; l++)
+ if (wdata[i][j][k][l] != rdata[i][j][k][l]) {
+ TestErrPrintf("Array data information doesn't match!, wdata[%d][%d][%d][%d]=%d, "
+ "rdata[%d][%d][%d][%d]=%d\n",
+ (int)i, (int)j, (int)k, (int)l, (int)wdata[i][j][k][l], (int)i, (int)j,
+ (int)k, (int)l, (int)rdata[i][j][k][l]);
+ continue;
+ } /* end if */
+
+ /* Close Datatypes */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_array_array_atomic() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_compound_atomic
+ *
+ * Purpose: Test basic array datatype code.
+ * Tests 1-D array of compound datatypes (with no array fields).
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_compound_atomic(void)
+{
+ typedef struct { /* Typedef for compound datatype */
+ int i;
+ float f;
+ } s1_t;
+
+ s1_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */
+ s1_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Array Datatype ID */
+ hid_t tid2; /* Compound Datatype ID */
+ hsize_t sdims1[] = {SPACE1_DIM1};
+ hsize_t tdims1[] = {ARRAY1_DIM1};
+ int ndims; /* Array rank for reading */
+ hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */
+ int nmemb; /* Number of compound members */
+ char *mname; /* Name of compound field */
+ size_t off; /* Offset of compound field */
+ hid_t mtid; /* Datatype ID for field */
+ int i, j; /* counting variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 1-D Array of Compound Atomic Datatypes Functionality\n"));
+
+ /* Initialize array data to write */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++) {
+ wdata[i][j].i = i * 10 + j;
+ wdata[i][j].f = (float)i * 2.5F + (float)j;
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a compound datatype to refer to */
+ tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Insert integer field */
+ ret = H5Tinsert(tid2, "i", HOFFSET(s1_t, i), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Insert float field */
+ ret = H5Tinsert(tid2, "f", HOFFSET(s1_t, f), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create an array datatype to refer to */
+ tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1);
+ CHECK(tid1, FAIL, "H5Tarray_create2");
+
+ /* Close compound datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get the datatype */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(tid1);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(tid1, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Get the compound datatype */
+ tid2 = H5Tget_super(tid1);
+ CHECK(tid2, FAIL, "H5Tget_super");
+
+ /* Check the number of members */
+ nmemb = H5Tget_nmembers(tid2);
+ VERIFY(nmemb, 2, "H5Tget_nmembers");
+
+ /* Check the 1st field's name */
+ mname = H5Tget_member_name(tid2, 0);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (HDstrcmp(mname, "i") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ H5free_memory(mname);
+
+ /* Check the 1st field's offset */
+ off = H5Tget_member_offset(tid2, 0);
+ VERIFY(off, HOFFSET(s1_t, i), "H5Tget_member_offset");
+
+ /* Check the 1st field's datatype */
+ mtid = H5Tget_member_type(tid2, 0);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+ if ((ret = H5Tequal(mtid, H5T_NATIVE_INT)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Check the 2nd field's name */
+ mname = H5Tget_member_name(tid2, 1);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (HDstrcmp(mname, "f") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ H5free_memory(mname);
+
+ /* Check the 2nd field's offset */
+ off = H5Tget_member_offset(tid2, 1);
+ VERIFY(off, HOFFSET(s1_t, f), "H5Tget_member_offset");
+
+ /* Check the 2nd field's datatype */
+ mtid = H5Tget_member_type(tid2, 1);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+ if ((ret = H5Tequal(mtid, H5T_NATIVE_FLOAT)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Close Compound Datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++) {
+ if (wdata[i][j].i != rdata[i][j].i) {
+ TestErrPrintf(
+ "Array data information doesn't match!, wdata[%d][%d].i=%d, rdata[%d][%d].i=%d\n", (int)i,
+ (int)j, (int)wdata[i][j].i, (int)i, (int)j, (int)rdata[i][j].i);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(wdata[i][j].f, rdata[i][j].f)) {
+ TestErrPrintf(
+ "Array data information doesn't match!, wdata[%d][%d].f=%f, rdata[%d][%d].f=%f\n", (int)i,
+ (int)j, (double)wdata[i][j].f, (int)i, (int)j, (double)rdata[i][j].f);
+ continue;
+ } /* end if */
+ } /* end for */
+
+ /* Close Datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_array_compound_atomic() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_compound_array
+ *
+ * Purpose: Test basic array datatype code.
+ * Tests 1-D array of compound datatypes (with array fields).
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_compound_array(void)
+{
+ typedef struct { /* Typedef for compound datatype */
+ int i;
+ float f[ARRAY1_DIM1];
+ } s1_t;
+
+ s1_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */
+ s1_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Array Datatype ID */
+ hid_t tid2; /* Compound Datatype ID */
+ hid_t tid3; /* Nested Array Datatype ID */
+ hsize_t sdims1[] = {SPACE1_DIM1};
+ hsize_t tdims1[] = {ARRAY1_DIM1};
+ int ndims; /* Array rank for reading */
+ hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */
+ int nmemb; /* Number of compound members */
+ char *mname; /* Name of compound field */
+ size_t off; /* Offset of compound field */
+ hid_t mtid; /* Datatype ID for field */
+ H5T_class_t mclass; /* Datatype class for field */
+ int i, j, k; /* counting variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 1-D Array of Compound Array Datatypes Functionality\n"));
+
+ /* Initialize array data to write */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++) {
+ wdata[i][j].i = i * 10 + j;
+ for (k = 0; k < ARRAY1_DIM1; k++)
+ wdata[i][j].f[k] = (float)i * 10.0F + (float)j * 2.5F + (float)k;
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a compound datatype to refer to */
+ tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Insert integer field */
+ ret = H5Tinsert(tid2, "i", HOFFSET(s1_t, i), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create an array of floats datatype */
+ tid3 = H5Tarray_create2(H5T_NATIVE_FLOAT, ARRAY1_RANK, tdims1);
+ CHECK(tid3, FAIL, "H5Tarray_create2");
+
+ /* Insert float array field */
+ ret = H5Tinsert(tid2, "f", HOFFSET(s1_t, f), tid3);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Close array of floats field datatype */
+ ret = H5Tclose(tid3);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create an array datatype to refer to */
+ tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1);
+ CHECK(tid1, FAIL, "H5Tarray_create2");
+
+ /* Close compound datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get the datatype */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(tid1);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(tid1, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Get the compound datatype */
+ tid2 = H5Tget_super(tid1);
+ CHECK(tid2, FAIL, "H5Tget_super");
+
+ /* Check the number of members */
+ nmemb = H5Tget_nmembers(tid2);
+ VERIFY(nmemb, 2, "H5Tget_nmembers");
+
+ /* Check the 1st field's name */
+ mname = H5Tget_member_name(tid2, 0);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (HDstrcmp(mname, "i") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ H5free_memory(mname);
+
+ /* Check the 1st field's offset */
+ off = H5Tget_member_offset(tid2, 0);
+ VERIFY(off, HOFFSET(s1_t, i), "H5Tget_member_offset");
+
+ /* Check the 1st field's datatype */
+ mtid = H5Tget_member_type(tid2, 0);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+ if ((ret = H5Tequal(mtid, H5T_NATIVE_INT)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Check the 2nd field's name */
+ mname = H5Tget_member_name(tid2, 1);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (HDstrcmp(mname, "f") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ H5free_memory(mname);
+
+ /* Check the 2nd field's offset */
+ off = H5Tget_member_offset(tid2, 1);
+ VERIFY(off, HOFFSET(s1_t, f), "H5Tget_member_offset");
+
+ /* Check the 2nd field's datatype */
+ mtid = H5Tget_member_type(tid2, 1);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+
+ /* Get the 2nd field's class */
+ mclass = H5Tget_class(mtid);
+ VERIFY(mclass, H5T_ARRAY, "H5Tget_class");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(mtid);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(mtid, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Nested array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Check the nested array's datatype */
+ tid3 = H5Tget_super(mtid);
+ CHECK(tid3, FAIL, "H5Tget_super");
+
+ if ((ret = H5Tequal(tid3, H5T_NATIVE_FLOAT)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+
+ /* Close the array's base type datatype */
+ ret = H5Tclose(tid3);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Close the member datatype */
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Close Compound Datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ for (j = 0; j < ARRAY1_DIM1; j++) {
+ if (wdata[i][j].i != rdata[i][j].i) {
+ TestErrPrintf(
+ "Array data information doesn't match!, wdata[%d][%d].i=%d, rdata[%d][%d].i=%d\n", (int)i,
+ (int)j, (int)wdata[i][j].i, (int)i, (int)j, (int)rdata[i][j].i);
+ continue;
+ } /* end if */
+ for (k = 0; k < ARRAY1_DIM1; k++)
+ if (!H5_FLT_ABS_EQUAL(wdata[i][j].f[k], rdata[i][j].f[k])) {
+ TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].f[%d]=%f, "
+ "rdata[%d][%d].f[%d]=%f\n",
+ (int)i, (int)j, (int)k, (double)wdata[i][j].f[k], (int)i, (int)j, (int)k,
+ (double)rdata[i][j].f[k]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Close Datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_array_compound_array() */
+
+/****************************************************************
+**
+** test_array_alloc_custom(): Test VL datatype custom memory
+** allocation routines. This routine just uses malloc to
+** allocate the memory and increments the amount of memory
+** allocated.
+**
+****************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_alloc_custom
+ *
+ * Purpose: Memory allocator for testing VL datatype custom memory
+ * allocation routines.
+ *
+ * This routine just uses malloc to allocate the memory and
+ * increments the amount of memory allocated.
+ *
+ * Return:
+ *
+ * Success: A memory buffer
+ * Failure: NULL
+ *
+ *-------------------------------------------------------------------------
+ */
+void *
+test_array_alloc_custom(size_t size, void *info)
+{
+ void *ret_value = NULL; /* Pointer to return */
+ size_t *mem_used = (size_t *)info; /* Pointer to the memory used */
+ size_t extra; /* Extra space needed */
+
+ /*
+ * This weird contortion is required on the DEC Alpha to keep the
+ * alignment correct - QAK
+ */
+ extra = MAX(sizeof(void *), sizeof(size_t));
+
+ if ((ret_value = HDmalloc(extra + size)) != NULL) {
+ *(size_t *)ret_value = size;
+ *mem_used += size;
+ } /* end if */
+
+ ret_value = ((unsigned char *)ret_value) + extra;
+ return ret_value;
+} /* end test_array_alloc_custom() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_free_custom
+ *
+ * Purpose: Memory free function for testing VL datatype custom memory
+ * allocation routines.
+ *
+ * This routine just uses free to free the memory and
+ * decrements the amount of memory allocated.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+test_array_free_custom(void *_mem, void *info)
+{
+ unsigned char *mem = NULL; /* Pointer to mem to be freed */
+ size_t *mem_used = (size_t *)info; /* Pointer to the memory used */
+ size_t extra; /* Extra space needed */
+
+ /*
+ * This weird contortion is required on the DEC Alpha to keep the
+ * alignment correct - QAK
+ */
+ extra = MAX(sizeof(void *), sizeof(size_t));
+
+ if (_mem != NULL) {
+ mem = ((unsigned char *)_mem) - extra;
+ *mem_used -= *(size_t *)((void *)mem);
+ HDfree(mem);
+ } /* end if */
+
+} /* end test_array_free_custom() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_vlen_atomic
+ *
+ * Purpose: Test basic array datatype code.
+ * Tests 1-D array of atomic VL datatypes.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_vlen_atomic(void)
+{
+ hvl_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Array Datatype ID */
+ hid_t tid2; /* VL Datatype ID */
+ hid_t tid3; /* Atomic Datatype ID */
+ hsize_t sdims1[] = {SPACE1_DIM1};
+ hsize_t tdims1[] = {ARRAY1_DIM1};
+ int ndims; /* Array rank for reading */
+ hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */
+ H5T_class_t mclass; /* Datatype class for VL */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t size; /* Number of bytes which will be used */
+ size_t mem_used = 0; /* Memory used during allocation */
+ int i, j, k; /* counting variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 1-D Array of Atomic Variable-Length Datatypes Functionality\n"));
+
+ /* Initialize array data to write */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++) {
+ wdata[i][j].p = HDmalloc((size_t)(i + j + 1) * sizeof(unsigned int));
+ wdata[i][j].len = (size_t)(i + j + 1);
+ for (k = 0; k < (i + j + 1); k++)
+ ((unsigned int *)wdata[i][j].p)[k] = (unsigned int)(i * 100 + j * 10 + k);
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a compound datatype to refer to */
+ tid2 = H5Tvlen_create(H5T_NATIVE_UINT);
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Create an array datatype to refer to */
+ tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1);
+ CHECK(tid1, FAIL, "H5Tarray_create2");
+
+ /* Close VL datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get the dataspace */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Get the datatype */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(tid1);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(tid1, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Get the VL datatype */
+ tid2 = H5Tget_super(tid1);
+ CHECK(tid2, FAIL, "H5Tget_super");
+
+ /* Get the 2nd field's class */
+ mclass = H5Tget_class(tid2);
+ VERIFY(mclass, H5T_VLEN, "H5Tget_class");
+
+ /* Check the VL datatype's base type */
+ tid3 = H5Tget_super(tid2);
+ CHECK(tid3, FAIL, "H5Tget_super");
+
+ if ((ret = H5Tequal(tid3, H5T_NATIVE_UINT)) <= 0)
+ TestErrPrintf("VL base datatype is incorrect!, ret=%d\n", (int)ret);
+
+ /* Close the array's base type datatype */
+ ret = H5Tclose(tid3);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close VL Datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_array_alloc_custom, &mem_used, test_array_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) +
+ * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64 elements
+ */
+ VERIFY(size, 64 * sizeof(unsigned int), "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) +
+ * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64 elements
+ */
+ VERIFY(mem_used, 64 * sizeof(unsigned int), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ for (j = 0; j < ARRAY1_DIM1; j++) {
+ if (wdata[i][j].len != rdata[i][j].len) {
+ TestErrPrintf("VL data length don't match!, wdata[%d][%d].len=%d, rdata[%d][%d].len=%d\n",
+ (int)i, (int)j, (int)wdata[i][j].len, (int)i, (int)j, (int)rdata[i][j].len);
+ continue;
+ } /* end if */
+ for (k = 0; k < (int)rdata[i][j].len; k++) {
+ if (((unsigned int *)wdata[i][j].p)[k] != ((unsigned int *)rdata[i][j].p)[k]) {
+ TestErrPrintf(
+ "VL data values don't match!, wdata[%d][%d].p[%d]=%d, rdata[%d][%d].p[%d]=%d\n",
+ (int)i, (int)j, (int)k, (int)((unsigned int *)wdata[i][j].p)[k], (int)i, (int)j,
+ (int)k, (int)((unsigned int *)rdata[i][j].p)[k]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(tid1, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close Datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_array_vlen_atomic() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_vlen_array
+ *
+ * Purpose: Test basic array datatype code.
+ * Tests 1-D array of 1-D array VL datatypes.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_vlen_array(void)
+{
+ hvl_t wdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1][ARRAY1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Array Datatype ID */
+ hid_t tid2; /* VL Datatype ID */
+ hid_t tid3; /* Nested Array Datatype ID */
+ hid_t tid4; /* Atomic Datatype ID */
+ hsize_t sdims1[] = {SPACE1_DIM1};
+ hsize_t tdims1[] = {ARRAY1_DIM1};
+ int ndims; /* Array rank for reading */
+ hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */
+ H5T_class_t mclass; /* Datatype class for VL */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t size; /* Number of bytes which will be used */
+ size_t mem_used = 0; /* Memory used during allocation */
+ int i, j, k, l; /* Index variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 1-D Array of 1-D Array Variable-Length Datatypes Functionality\n"));
+
+ /* Initialize array data to write */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < ARRAY1_DIM1; j++) {
+ wdata[i][j].p = HDmalloc((size_t)(i + j + 1) * sizeof(unsigned int) * (size_t)ARRAY1_DIM1);
+ wdata[i][j].len = (size_t)(i + j + 1);
+ for (k = 0; k < (i + j + 1); k++)
+ for (l = 0; l < ARRAY1_DIM1; l++)
+ ((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l] =
+ (unsigned int)(i * 1000 + j * 100 + k * 10 + l);
+ }
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, sdims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create the nested array datatype to refer to */
+ tid3 = H5Tarray_create2(H5T_NATIVE_UINT, ARRAY1_RANK, tdims1);
+ CHECK(tid3, FAIL, "H5Tarray_create2");
+
+ /* Create a VL datatype of 1-D arrays to refer to */
+ tid2 = H5Tvlen_create(tid3);
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Close nested array datatype */
+ ret = H5Tclose(tid3);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create an array datatype to refer to */
+ tid1 = H5Tarray_create2(tid2, ARRAY1_RANK, tdims1);
+ CHECK(tid1, FAIL, "H5Tarray_create2");
+
+ /* Close VL datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get the dataspace */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Get the datatype */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(tid1);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(tid1, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Get the VL datatype */
+ tid2 = H5Tget_super(tid1);
+ CHECK(tid2, FAIL, "H5Tget_super");
+
+ /* Get the VL datatype's class */
+ mclass = H5Tget_class(tid2);
+ VERIFY(mclass, H5T_VLEN, "H5Tget_class");
+
+ /* Check the VL datatype's base type */
+ tid3 = H5Tget_super(tid2);
+ CHECK(tid3, FAIL, "H5Tget_super");
+
+ /* Get the nested array datatype's class */
+ mclass = H5Tget_class(tid3);
+ VERIFY(mclass, H5T_ARRAY, "H5Tget_class");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(tid3);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(tid3, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Check the array's base type */
+ tid4 = H5Tget_super(tid3);
+ CHECK(tid4, FAIL, "H5Tget_super");
+
+ if ((ret = H5Tequal(tid4, H5T_NATIVE_UINT)) <= 0)
+ TestErrPrintf("VL base datatype is incorrect!, ret=%d\n", (int)ret);
+
+ /* Close the array's base type datatype */
+ ret = H5Tclose(tid4);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close the nested array datatype */
+ ret = H5Tclose(tid3);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close VL Datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_array_alloc_custom, &mem_used, test_array_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) +
+ * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64*ARRAY1_DIM1 elements
+ */
+ VERIFY(size, 64 * (sizeof(unsigned int) * ARRAY1_DIM1), "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* # elements allocated = (1 + 2 + 3 + 4) + (2 + 3 + 4 + 5) +
+ * (3 + 4 + 5 + 6) + (4 + 5 + 6 + 7) = 64*ARRAY1_DIM1 elements
+ */
+ VERIFY(mem_used, 64 * (sizeof(unsigned int) * ARRAY1_DIM1), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ for (j = 0; j < ARRAY1_DIM1; j++) {
+ if (wdata[i][j].len != rdata[i][j].len) {
+ TestErrPrintf("VL data length don't match!, wdata[%d][%d].len=%d, rdata[%d][%d].len=%d\n",
+ (int)i, (int)j, (int)wdata[i][j].len, (int)i, (int)j, (int)rdata[i][j].len);
+ continue;
+ } /* end if */
+ for (k = 0; k < (int)rdata[i][j].len; k++) {
+ for (l = 0; l < ARRAY1_DIM1; l++) {
+ if (((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l] !=
+ ((unsigned int *)rdata[i][j].p)[k * ARRAY1_DIM1 + l]) {
+ TestErrPrintf("VL data values don't match!, wdata[%d][%d].p[%d][%d]=%d, "
+ "rdata[%d][%d].p[%d][%d]=%d\n",
+ (int)i, (int)j, (int)k, (int)l,
+ (int)((unsigned int *)wdata[i][j].p)[k * ARRAY1_DIM1 + l], (int)i,
+ (int)j, (int)k, (int)l,
+ (int)((unsigned int *)rdata[i][j].p)[k * ARRAY1_DIM1 + l]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(tid1, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close Datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_array_vlen_array() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_array_bkg
+ *
+ * Purpose: Test basic array datatype code.
+ * Tests reading compound datatype with array fields and
+ * writing partial fields.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+test_array_bkg(void)
+{
+ herr_t status = -1;
+
+ hid_t fid, array_dt;
+ hid_t space;
+ hid_t type;
+ hid_t dataset;
+
+ hsize_t dim[] = {LENGTH};
+ hsize_t dima[] = {ALEN};
+
+ int i, j;
+ unsigned ndims[3] = {1, 1, 1};
+
+ typedef struct {
+ int a[ALEN];
+ float b[ALEN];
+ double c[ALEN];
+ } CmpField;
+
+ CmpField cf[LENGTH];
+ CmpField cfr[LENGTH];
+ CmpDTSinfo *dtsinfo = NULL;
+
+ typedef struct {
+ float b[ALEN];
+ } fld_t;
+
+ fld_t fld[LENGTH];
+ fld_t fldr[LENGTH];
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Partial I/O of Array Fields in Compound Datatype Functionality\n"));
+
+ /* Initialize the data */
+ /* ------------------- */
+ dtsinfo = (CmpDTSinfo *)HDmalloc(sizeof(CmpDTSinfo));
+ CHECK_PTR(dtsinfo, "HDmalloc");
+ HDmemset(dtsinfo, 0, sizeof(CmpDTSinfo));
+ for (i = 0; i < LENGTH; i++) {
+ for (j = 0; j < ALEN; j++) {
+ cf[i].a[j] = 100 * (i + 1) + j;
+ cf[i].b[j] = 100.0F * ((float)i + 1.0F) + 0.01F * (float)j;
+ cf[i].c[j] = (double)(100.0F * ((float)i + 1.0F) + 0.02F * (float)j);
+ } /* end for */
+ } /* end for */
+
+ /* Set the number of data members */
+ /* ------------------------------ */
+ dtsinfo->nsubfields = 3;
+
+ /* Initialize the offsets */
+ /* ----------------------- */
+ dtsinfo->offset[0] = HOFFSET(CmpField, a);
+ dtsinfo->offset[1] = HOFFSET(CmpField, b);
+ dtsinfo->offset[2] = HOFFSET(CmpField, c);
+
+ /* Initialize the data type IDs */
+ /* ---------------------------- */
+ dtsinfo->datatype[0] = H5T_NATIVE_INT;
+ dtsinfo->datatype[1] = H5T_NATIVE_FLOAT;
+ dtsinfo->datatype[2] = H5T_NATIVE_DOUBLE;
+
+ /* Initialize the names of data members */
+ /* ------------------------------------ */
+ for (i = 0; i < dtsinfo->nsubfields; i++)
+ dtsinfo->name[i] = (char *)HDcalloc((size_t)20, sizeof(char));
+
+ HDstrcpy(dtsinfo->name[0], "One");
+ HDstrcpy(dtsinfo->name[1], "Two");
+ HDstrcpy(dtsinfo->name[2], "Three");
+
+ /* Create file */
+ /* ----------- */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create data space */
+ /* ----------------- */
+ space = H5Screate_simple(RANK, dim, NULL);
+ CHECK(space, FAIL, "H5Screate_simple");
+
+ /* Create the memory data type */
+ /* --------------------------- */
+ type = H5Tcreate(H5T_COMPOUND, sizeof(CmpField));
+ CHECK(type, FAIL, "H5Tcreate");
+
+ /* Add members to the compound data type */
+ /* -------------------------------------- */
+ for (i = 0; i < dtsinfo->nsubfields; i++) {
+ array_dt = H5Tarray_create2(dtsinfo->datatype[i], ndims[i], dima);
+ CHECK(array_dt, FAIL, "H5Tarray_create2");
+
+ status = H5Tinsert(type, dtsinfo->name[i], dtsinfo->offset[i], array_dt);
+ CHECK(status, FAIL, "H5Tinsert");
+
+ status = H5Tclose(array_dt);
+ CHECK(status, FAIL, "H5Tclose");
+ } /* end for */
+
+ /* Create the dataset */
+ /* ------------------ */
+ dataset = H5Dcreate2(fid, FIELDNAME, type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write data to the dataset */
+ /* ------------------------- */
+ status = H5Dwrite(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cf);
+ CHECK(status, FAIL, "H5Dwrite");
+
+ status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr);
+ CHECK(status, FAIL, "H5Dread");
+
+ /* Verify correct data */
+ /* ------------------- */
+ for (i = 0; i < LENGTH; i++) {
+ for (j = 0; j < ALEN; j++) {
+ if (cf[i].a[j] != cfr[i].a[j]) {
+ TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i,
+ (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) {
+ TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i,
+ (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]);
+ continue;
+ } /* end if */
+ if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) {
+ TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i,
+ (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Release memory resources */
+ /* ------------------------ */
+ for (i = 0; i < dtsinfo->nsubfields; i++)
+ HDfree(dtsinfo->name[i]);
+
+ /* Release IDs */
+ /* ----------- */
+ status = H5Tclose(type);
+ CHECK(status, FAIL, "H5Tclose");
+
+ status = H5Sclose(space);
+ CHECK(status, FAIL, "H5Sclose");
+
+ status = H5Dclose(dataset);
+ CHECK(status, FAIL, "H5Dclose");
+
+ status = H5Fclose(fid);
+ CHECK(status, FAIL, "H5Fclose");
+
+ /******************************/
+ /* Reopen the file and update */
+ /******************************/
+
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ dataset = H5Dopen2(fid, FIELDNAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ type = H5Tcreate(H5T_COMPOUND, sizeof(fld_t));
+ CHECK(type, FAIL, "H5Tcreate");
+
+ array_dt = H5Tarray_create2(H5T_NATIVE_FLOAT, 1, dima);
+ CHECK(array_dt, FAIL, "H5Tarray_create2");
+
+ status = H5Tinsert(type, "Two", HOFFSET(fld_t, b), array_dt);
+ CHECK(status, FAIL, "H5Tinsert");
+
+ /* Initialize the data to overwrite */
+ /* -------------------------------- */
+ for (i = 0; i < LENGTH; i++)
+ for (j = 0; j < ALEN; j++)
+ cf[i].b[j] = fld[i].b[j] = 1.313F;
+
+ status = H5Dwrite(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, fld);
+ CHECK(status, FAIL, "H5Dwrite");
+
+ /* Read just the field changed */
+ status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, fldr);
+ CHECK(status, FAIL, "H5Dread");
+
+ for (i = 0; i < LENGTH; i++)
+ for (j = 0; j < ALEN; j++)
+ if (!H5_FLT_ABS_EQUAL(fld[i].b[j], fldr[i].b[j])) {
+ TestErrPrintf("Field data doesn't match, fld[%d].b[%d]=%f, fldr[%d].b[%d]=%f\n", (int)i,
+ (int)j, (double)fld[i].b[j], (int)i, (int)j, (double)fldr[i].b[j]);
+ continue;
+ } /* end if */
+
+ status = H5Tclose(type);
+ CHECK(status, FAIL, "H5Tclose");
+
+ status = H5Tclose(array_dt);
+ CHECK(status, FAIL, "H5Tclose");
+
+ type = H5Dget_type(dataset);
+ CHECK(type, FAIL, "H5Dget_type");
+
+ /* Read the entire dataset again */
+ status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr);
+ CHECK(status, FAIL, "H5Dread");
+
+ /* Verify correct data */
+ /* ------------------- */
+ for (i = 0; i < LENGTH; i++) {
+ for (j = 0; j < ALEN; j++) {
+ if (cf[i].a[j] != cfr[i].a[j]) {
+ TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i,
+ (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) {
+ TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i,
+ (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]);
+ continue;
+ } /* end if */
+ if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) {
+ TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i,
+ (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ status = H5Dclose(dataset);
+ CHECK(status, FAIL, "H5Dclose");
+
+ status = H5Tclose(type);
+ CHECK(status, FAIL, "H5Tclose");
+
+ status = H5Fclose(fid);
+ CHECK(status, FAIL, "H5Fclose");
+
+ /****************************************************/
+ /* Reopen the file and print out all the data again */
+ /****************************************************/
+
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ dataset = H5Dopen2(fid, FIELDNAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ type = H5Dget_type(dataset);
+ CHECK(type, FAIL, "H5Dget_type");
+
+ /* Reset the data to read in */
+ /* ------------------------- */
+ HDmemset(cfr, 0, sizeof(CmpField) * LENGTH);
+
+ status = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, cfr);
+ CHECK(status, FAIL, "H5Dread");
+
+ /* Verify correct data */
+ /* ------------------- */
+ for (i = 0; i < LENGTH; i++) {
+ for (j = 0; j < ALEN; j++) {
+ if (cf[i].a[j] != cfr[i].a[j]) {
+ TestErrPrintf("Field a data doesn't match, cf[%d].a[%d]=%d, cfr[%d].a[%d]=%d\n", (int)i,
+ (int)j, (int)cf[i].a[j], (int)i, (int)j, (int)cfr[i].a[j]);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(cf[i].b[j], cfr[i].b[j])) {
+ TestErrPrintf("Field b data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i,
+ (int)j, (double)cf[i].b[j], (int)i, (int)j, (double)cfr[i].b[j]);
+ continue;
+ } /* end if */
+ if (!H5_DBL_ABS_EQUAL(cf[i].c[j], cfr[i].c[j])) {
+ TestErrPrintf("Field c data doesn't match, cf[%d].b[%d]=%f, cfr[%d].b[%d]=%f\n", (int)i,
+ (int)j, (double)cf[i].c[j], (int)i, (int)j, (double)cfr[i].c[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ status = H5Dclose(dataset);
+ CHECK(status, FAIL, "H5Dclose");
+
+ status = H5Tclose(type);
+ CHECK(status, FAIL, "H5Tclose");
+
+ status = H5Fclose(fid);
+ CHECK(status, FAIL, "H5Fclose");
+
+ HDfree(dtsinfo);
+} /* end test_array_bkg() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_compat
+ *
+ * Purpose: Test array datatype compatibility code.
+ *
+ * Reads file containing old version of datatype object header
+ * messages for compound datatypes and verifies reading the older
+ * version of the is working correctly.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+#if 0
+static void
+test_compat(void)
+{
+ const char *testfile = H5_get_srcdir_filename(TESTFILE); /* Corrected test file name */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t tid1; /* Array Datatype ID */
+ hid_t tid2; /* Datatype ID */
+ hsize_t tdims1[] = {ARRAY1_DIM1};
+ int ndims; /* Array rank for reading */
+ hsize_t rdims1[H5S_MAX_RANK]; /* Array dimensions for reading */
+ H5T_class_t mclass; /* Datatype class for VL */
+ int nmemb; /* Number of compound members */
+ char *mname; /* Name of compound field */
+ size_t off; /* Offset of compound field */
+ hid_t mtid; /* Datatype ID for field */
+ int i; /* Index variables */
+ hbool_t driver_is_default_compatible;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Array Datatypes Compatibility Functionality\n"));
+
+ /*
+ * Try reading a file that has been prepared that has datasets with
+ * compound datatypes which use an older version (version 1) of the
+ * datatype object header message for describing the datatype.
+ *
+ * If this test fails and the datatype object header message version has
+ * changed, follow the instructions in gen_old_array.c for regenerating
+ * the tarrold.h5 file.
+ */
+
+ if (h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible) < 0)
+ TestErrPrintf("can't check if VFD is default VFD compatible\n");
+ if (!driver_is_default_compatible) {
+ HDprintf(" -- SKIPPED --\n");
+ return;
+ }
+
+ /* Open the testfile */
+ fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK_I(fid1, "H5Fopen");
+
+ /* Only try to proceed if the file is around */
+ if (fid1 >= 0) {
+ /* Open the first dataset (with no array fields) */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK_I(dataset, "H5Dopen2");
+
+ /* Get the datatype */
+ tid1 = H5Dget_type(dataset);
+ CHECK_I(tid1, "H5Dget_type");
+
+ /* Verify datatype class */
+ mclass = H5Tget_class(tid1);
+ VERIFY(mclass, H5T_COMPOUND, "H5Tget_class");
+
+ /* Get the number of compound datatype fields */
+ nmemb = H5Tget_nmembers(tid1);
+ VERIFY(nmemb, 3, "H5Tget_nmembers");
+
+ /* Check the 1st field's name */
+ mname = H5Tget_member_name(tid1, 0);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (HDstrcmp(mname, "i") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ H5free_memory(mname);
+
+ /* Check the 1st field's offset */
+ off = H5Tget_member_offset(tid1, 0);
+ VERIFY(off, 0, "H5Tget_member_offset");
+
+ /* Check the 1st field's datatype */
+ mtid = H5Tget_member_type(tid1, 0);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+ if ((ret = H5Tequal(mtid, H5T_STD_I16LE)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Check the 2nd field's name */
+ mname = H5Tget_member_name(tid1, 1);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (HDstrcmp(mname, "f") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ H5free_memory(mname);
+
+ /* Check the 2nd field's offset */
+ off = H5Tget_member_offset(tid1, 1);
+ VERIFY(off, 4, "H5Tget_member_offset");
+
+ /* Check the 2nd field's datatype */
+ mtid = H5Tget_member_type(tid1, 1);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+ if ((ret = H5Tequal(mtid, H5T_IEEE_F32LE)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Check the 3rd field's name */
+ mname = H5Tget_member_name(tid1, 2);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (HDstrcmp(mname, "l") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ H5free_memory(mname);
+
+ /* Check the 3rd field's offset */
+ off = H5Tget_member_offset(tid1, 2);
+ VERIFY(off, 8, "H5Tget_member_offset");
+
+ /* Check the 3rd field's datatype */
+ mtid = H5Tget_member_type(tid1, 2);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+ if ((ret = H5Tequal(mtid, H5T_STD_I32LE)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Close the datatype */
+ ret = H5Tclose(tid1);
+ CHECK_I(ret, "H5Tclose");
+
+ /* Close the dataset */
+ ret = H5Dclose(dataset);
+ CHECK_I(ret, "H5Dclose");
+
+ /* Open the second dataset (with array fields) */
+ dataset = H5Dopen2(fid1, "Dataset2", H5P_DEFAULT);
+ CHECK_I(dataset, "H5Dopen2");
+
+ /* Get the datatype */
+ tid1 = H5Dget_type(dataset);
+ CHECK_I(tid1, "H5Dget_type");
+
+ /* Verify datatype class */
+ mclass = H5Tget_class(tid1);
+ VERIFY(mclass, H5T_COMPOUND, "H5Tget_class");
+
+ /* Get the number of compound datatype fields */
+ nmemb = H5Tget_nmembers(tid1);
+ VERIFY(nmemb, 4, "H5Tget_nmembers");
+
+ /* Check the 1st field's name */
+ mname = H5Tget_member_name(tid1, 0);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (mname && HDstrcmp(mname, "i") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ if (mname)
+ H5free_memory(mname);
+
+ /* Check the 1st field's offset */
+ off = H5Tget_member_offset(tid1, 0);
+ VERIFY(off, 0, "H5Tget_member_offset");
+
+ /* Check the 1st field's datatype */
+ mtid = H5Tget_member_type(tid1, 0);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+ if ((ret = H5Tequal(mtid, H5T_STD_I16LE)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Check the 2nd field's name */
+ mname = H5Tget_member_name(tid1, 1);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (mname && HDstrcmp(mname, "f") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ if (mname)
+ H5free_memory(mname);
+
+ /* Check the 2nd field's offset */
+ off = H5Tget_member_offset(tid1, 1);
+ VERIFY(off, 4, "H5Tget_member_offset");
+
+ /* Check the 2nd field's datatype */
+ mtid = H5Tget_member_type(tid1, 1);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+
+ /* Verify datatype class */
+ mclass = H5Tget_class(mtid);
+ VERIFY(mclass, H5T_ARRAY, "H5Tget_class");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(mtid);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(mtid, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Check the array's base datatype */
+ tid2 = H5Tget_super(mtid);
+ CHECK(tid2, FAIL, "H5Tget_super");
+
+ if ((ret = H5Tequal(tid2, H5T_IEEE_F32LE)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(mtid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Check the 3rd field's name */
+ mname = H5Tget_member_name(tid1, 2);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (mname && HDstrcmp(mname, "l") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ if (mname)
+ H5free_memory(mname);
+
+ /* Check the 3rd field's offset */
+ off = H5Tget_member_offset(tid1, 2);
+ VERIFY(off, 20, "H5Tget_member_offset");
+
+ /* Check the 3rd field's datatype */
+ mtid = H5Tget_member_type(tid1, 2);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+
+ /* Verify datatype class */
+ mclass = H5Tget_class(mtid);
+ VERIFY(mclass, H5T_ARRAY, "H5Tget_class");
+
+ /* Check the array rank */
+ ndims = H5Tget_array_ndims(mtid);
+ VERIFY(ndims, ARRAY1_RANK, "H5Tget_array_ndims");
+
+ /* Get the array dimensions */
+ ret = H5Tget_array_dims2(mtid, rdims1);
+ CHECK(ret, FAIL, "H5Tget_array_dims2");
+
+ /* Check the array dimensions */
+ for (i = 0; i < ndims; i++)
+ if (rdims1[i] != tdims1[i]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%" PRIuHSIZE
+ ", tdims1[%d]=%" PRIuHSIZE "\n",
+ i, rdims1[i], i, tdims1[i]);
+ continue;
+ } /* end if */
+
+ /* Check the array's base datatype */
+ tid2 = H5Tget_super(mtid);
+ CHECK(tid2, FAIL, "H5Tget_super");
+
+ if ((ret = H5Tequal(tid2, H5T_STD_I32LE)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(mtid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Check the 4th field's name */
+ mname = H5Tget_member_name(tid1, 3);
+ CHECK_PTR(mname, "H5Tget_member_name");
+ if (mname && HDstrcmp(mname, "d") != 0)
+ TestErrPrintf("Compound field name doesn't match!, mname=%s\n", mname);
+ if (mname)
+ H5free_memory(mname);
+
+ /* Check the 4th field's offset */
+ off = H5Tget_member_offset(tid1, 3);
+ VERIFY(off, 36, "H5Tget_member_offset");
+
+ /* Check the 4th field's datatype */
+ mtid = H5Tget_member_type(tid1, 3);
+ CHECK(mtid, FAIL, "H5Tget_member_type");
+ if ((ret = H5Tequal(mtid, H5T_IEEE_F64LE)) <= 0)
+ TestErrPrintf("Compound data type is incorrect!, ret=%d\n", (int)ret);
+ ret = H5Tclose(mtid);
+ CHECK(mtid, FAIL, "H5Tclose");
+
+ /* Close the datatype */
+ ret = H5Tclose(tid1);
+ CHECK_I(ret, "H5Tclose");
+
+ /* Close the dataset */
+ ret = H5Dclose(dataset);
+ CHECK_I(ret, "H5Dclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid1);
+ CHECK_I(ret, "H5Fclose");
+ } /* end if */
+ else
+ HDprintf("***cannot open the pre-created compound datatype test file (%s)\n", testfile);
+
+} /* end test_compat() */
+#endif
+
+/*-------------------------------------------------------------------------
+ * Function: test_array
+ *
+ * Purpose: Main array datatype testing routine.
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+test_array(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Array Datatypes\n"));
+
+ /* These tests use the same file... */
+ test_array_atomic_1d(); /* Test 1-D array of atomic datatypes */
+ test_array_atomic_3d(); /* Test 3-D array of atomic datatypes */
+ test_array_array_atomic(); /* Test 1-D array of 2-D arrays of atomic datatypes */
+ test_array_compound_atomic(); /* Test 1-D array of compound datatypes (with no array fields) */
+ test_array_compound_array(); /* Test 1-D array of compound datatypes (with array fields) */
+ test_array_vlen_atomic(); /* Test 1-D array of atomic VL datatypes */
+ test_array_vlen_array(); /* Test 1-D array of 1-D array VL datatypes */
+ test_array_funcs(); /* Test type functions with array types */
+
+ test_array_bkg(); /* Read compound datatype with array fields and background fields read */
+#if 0
+ /* This test uses a custom file */
+ test_compat(); /* Test compatibility changes for compound datatype fields */
+#endif
+} /* end test_array() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_array
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: void
+ *
+ * Programmer: Quincey Koziol
+ * June 8, 1999
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_array(void)
+{
+ H5Fdelete(FILENAME, H5P_DEFAULT);
+} /* end cleanup_array() */
diff --git a/test/API/tattr.c b/test/API/tattr.c
new file mode 100644
index 0000000..d006eb8
--- /dev/null
+++ b/test/API/tattr.c
@@ -0,0 +1,11929 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tattr
+ *
+ * Test the attribute functionality
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+
+#if 0
+#include "H5VLnative_private.h"
+
+/*
+ * This file needs to access private information from the H5O package.
+ * This file also needs to access the object header testing code.
+ */
+#define H5O_FRIEND /*suppress error about including H5Opkg */
+#define H5O_TESTING
+#include "H5Opkg.h" /* Object headers */
+
+/*
+ * This file needs to access private information from the H5A package.
+ * This file also needs to access the attribute testing code.
+ */
+#define H5A_FRIEND /*suppress error about including H5Apkg */
+#define H5A_TESTING
+#include "H5Apkg.h" /* Attributes */
+
+/*
+ * This file needs to access private information from the H5F package.
+ * This file also needs to access the file testing code.
+ */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#include "H5Fpkg.h" /* File access */
+#endif
+
+#define FILENAME "tattr.h5"
+#define NAME_BUF_SIZE 1024
+#define ATTR_NAME_LEN 16
+#define ATTR_MAX_DIMS 7
+#define ATTR_TMP_NAME "a really long temp_name"
+#define CORDER_ITER_STOP 3
+
+/* 3-D dataset with fixed dimensions */
+#define SPACE1_RANK 3
+#define SPACE1_DIM1 3
+#define SPACE1_DIM2 15
+#define SPACE1_DIM3 13
+
+/* Dataset Information */
+#define DSET1_NAME "Dataset1"
+#define DSET2_NAME "Dataset2"
+#define DSET3_NAME "Dataset3"
+#define NUM_DSETS 3
+
+/* Group Information */
+#define GROUP1_NAME "/Group1"
+#define GROUP2_NAME "/Group2"
+#define GROUP3_NAME "/Group3"
+
+/* Named Datatype Information */
+#define TYPE1_NAME "/Type"
+
+/* Attribute Rank & Dimensions */
+#define ATTR1_NAME "Attr1"
+#define ATTR1_RANK 1
+#define ATTR1_DIM1 3
+int attr_data1[ATTR1_DIM1] = {512, -234, 98123}; /* Test data for 1st attribute */
+
+/* rank & dimensions for another attribute */
+#define ATTR1A_NAME "Attr1_a"
+int attr_data1a[ATTR1_DIM1] = {256, 11945, -22107};
+
+#define ATTR2_NAME "Attr2"
+#define ATTR2_RANK 2
+#define ATTR2_DIM1 2
+#define ATTR2_DIM2 2
+int attr_data2[ATTR2_DIM1][ATTR2_DIM2] = {{7614, -416}, {197814, -3}}; /* Test data for 2nd attribute */
+
+#define ATTR3_NAME "Attr3"
+#define ATTR3_RANK 3
+#define ATTR3_DIM1 2
+#define ATTR3_DIM2 2
+#define ATTR3_DIM3 2
+double attr_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3] = {
+ {{2.3, -26.1}, {0.123, -10.0}}, {{973.23, -0.91827}, {2.0, 23.0}}}; /* Test data for 3rd attribute */
+
+#define ATTR4_NAME "Attr4"
+#define ATTR4_RANK 2
+#define ATTR4_DIM1 2
+#define ATTR4_DIM2 2
+#define ATTR4_FIELDNAME1 "i"
+#define ATTR4_FIELDNAME2 "d"
+#define ATTR4_FIELDNAME3 "c"
+size_t attr4_field1_off = 0;
+size_t attr4_field2_off = 0;
+size_t attr4_field3_off = 0;
+struct attr4_struct {
+ int i;
+ double d;
+ char c;
+} attr_data4[ATTR4_DIM1][ATTR4_DIM2] = {
+ {{3, -26.1, 'd'}, {-100000, 0.123, '3'}},
+ {{-23, 981724.2, 'Q'}, {0, 2.0, '\n'}}}; /* Test data for 4th attribute */
+
+#define ATTR5_NAME "Attr5"
+#define ATTR5_RANK 0
+float attr_data5 = -5.123F; /* Test data for 5th attribute */
+
+#define ATTR6_RANK 3
+#define ATTR6_DIM1 100
+#define ATTR6_DIM2 100
+#define ATTR6_DIM3 100
+
+#define ATTR7_NAME "attr 1 - 000000"
+#define ATTR8_NAME "attr 2"
+
+#define LINK1_NAME "Link1"
+
+#define NATTR_MANY_OLD 350
+#define NATTR_MANY_NEW 3500
+
+#define BUG2_NATTR 100
+#define BUG2_NATTR2 16
+
+#define BUG3_DSET_NAME "dset"
+#define BUG3_DT_NAME "dt"
+#define BUG3_ATTR_NAME "attr"
+
+/* Used by test_attr_delete_last_dense() */
+#define GRPNAME "grp"
+#define ATTRNAME "attr"
+#define DIM0 100
+#define DIM1 100
+#define RANK 2
+
+/* Used by test_attr_info_null_info_pointer() */
+#define GET_INFO_NULL_POINTER_ATTR_NAME "NullInfoPointerAttr"
+
+/* Used by test_attr_rename_invalid_name() */
+#define INVALID_RENAME_TEST_ATTR_NAME "InvalidRenameTestAttr"
+#define INVALID_RENAME_TEST_NEW_ATTR_NAME "InvalidRenameTestNewAttr"
+
+/* Used by test_attr_get_name_invalid_buf() */
+#define GET_NAME_INVALID_BUF_TEST_ATTR_NAME "InvalidNameBufferTestAttr"
+
+/* Attribute iteration struct */
+typedef struct {
+ H5_iter_order_t order; /* Direction of iteration */
+ unsigned ncalled; /* # of times callback is entered */
+ unsigned nskipped; /* # of attributes skipped */
+ int stop; /* # of iterations to stop after */
+ hsize_t curr; /* Current creation order value */
+ size_t max_visit; /* Size of "visited attribute" flag array */
+ hbool_t *visited; /* Pointer to array of "visited attribute" flags */
+} attr_iter_info_t;
+
+static herr_t attr_op1(hid_t loc_id, const char *name, const H5A_info_t *ainfo, void *op_data);
+
+/* Global dcpl ID, can be re-set as a generated dcpl for various operations
+ * across multiple tests.
+ * e.g., minimized dataset object headers
+ */
+static hid_t dcpl_g = H5P_DEFAULT;
+
+/****************************************************************
+**
+** test_attr_basic_write(): Test basic H5A (attribute) code.
+** Tests integer attributes on both datasets and groups
+**
+****************************************************************/
+static void
+test_attr_basic_write(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t group; /* Group ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t attr, attr2; /* Attribute ID */
+#if 0
+ hsize_t attr_size; /* storage size for attribute */
+#endif
+ ssize_t attr_name_size; /* size of attribute name */
+ char *attr_name = NULL; /* name of attribute */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {ATTR1_DIM1};
+ hsize_t dims3[] = {ATTR2_DIM1, ATTR2_DIM2};
+ int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */
+ int i;
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ hid_t ret_id; /* Generic hid_t return value */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Scalar Attribute Writing Functions\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Create dataspace for attribute */
+ sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Try to create an attribute on the file (should create an attribute on root group) */
+ attr = H5Acreate2(fid1, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open the root group */
+ group = H5Gopen2(fid1, "/", H5P_DEFAULT);
+ CHECK(group, FAIL, "H5Gopen2");
+
+ /* Open attribute again */
+ attr = H5Aopen(group, ATTR1_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close root group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create an attribute for the dataset */
+ attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ /* Try to create the same attribute again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Acreate2");
+#endif
+ /* Write attribute information */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Create an another attribute for the dataset */
+ attr2 = H5Acreate2(dataset, ATTR1A_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write attribute information */
+ ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1a);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check storage size for attribute */
+ attr_size = H5Aget_storage_size(attr);
+ VERIFY(attr_size, (ATTR1_DIM1 * sizeof(int)), "H5A_get_storage_size");
+#endif
+ /* Read attribute information immediately, without closing attribute */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i,
+ attr_data1[i], i, read_data1[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close attribute */
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* change attribute name */
+ ret = H5Arename(dataset, ATTR1_NAME, ATTR_TMP_NAME);
+ CHECK(ret, FAIL, "H5Arename");
+
+ /* Open attribute again */
+ attr = H5Aopen(dataset, ATTR_TMP_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Verify new attribute name */
+ attr_name_size = H5Aget_name(attr, (size_t)0, NULL);
+ CHECK(attr_name_size, FAIL, "H5Aget_name");
+
+ if (attr_name_size > 0) {
+ attr_name = (char *)HDcalloc((size_t)(attr_name_size + 1), sizeof(char));
+ CHECK_PTR(attr_name, "HDcalloc");
+
+ if (attr_name) {
+ ret = (herr_t)H5Aget_name(attr, (size_t)(attr_name_size + 1), attr_name);
+ CHECK(ret, FAIL, "H5Aget_name");
+ ret = HDstrcmp(attr_name, ATTR_TMP_NAME);
+ VERIFY(ret, 0, "HDstrcmp");
+
+ HDfree(attr_name);
+ attr_name = NULL;
+ } /* end if */
+ } /* end if */
+
+ /* Read attribute information immediately, without closing attribute */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i,
+ attr_data1[i], i, read_data1[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open the second attribute again */
+ attr2 = H5Aopen(dataset, ATTR1A_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Verify new attribute name */
+ attr_name_size = H5Aget_name(attr2, (size_t)0, NULL);
+ CHECK(attr_name_size, FAIL, "H5Aget_name");
+
+ if (attr_name_size > 0) {
+ attr_name = (char *)HDcalloc((size_t)(attr_name_size + 1), sizeof(char));
+ CHECK_PTR(attr_name, "HDcalloc");
+
+ if (attr_name) {
+ ret = (herr_t)H5Aget_name(attr2, (size_t)(attr_name_size + 1), attr_name);
+ CHECK(ret, FAIL, "H5Aget_name");
+ ret = HDstrcmp(attr_name, ATTR1A_NAME);
+ VERIFY(ret, 0, "HDstrcmp");
+
+ HDfree(attr_name);
+ attr_name = NULL;
+ } /* end if */
+ } /* end if */
+
+ /* Read attribute information immediately, without closing attribute */
+ ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1a[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1a[%d]=%d, read_data1[%d]=%d\n", __LINE__,
+ i, attr_data1a[i], i, read_data1[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create group */
+ group = H5Gcreate2(fid1, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, FAIL, "H5Gcreate2");
+
+ /* Create dataspace for attribute */
+ sid2 = H5Screate_simple(ATTR2_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create an attribute for the group */
+ attr = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check storage size for attribute */
+ attr_size = H5Aget_storage_size(attr);
+ VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5Aget_storage_size");
+#endif
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ /* Try to create the same attribute again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Acreate2(group, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Acreate2");
+#endif
+ /* Write attribute information */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data2);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check storage size for attribute */
+ attr_size = H5Aget_storage_size(attr);
+ VERIFY(attr_size, (ATTR2_DIM1 * ATTR2_DIM2 * sizeof(int)), "H5A_get_storage_size");
+#endif
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Attribute dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_basic_write() */
+
+/****************************************************************
+**
+** test_attr_basic_read(): Test basic H5A (attribute) code.
+**
+****************************************************************/
+static void
+test_attr_basic_read(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t group; /* Group ID */
+ hid_t attr; /* Attribute ID */
+ H5O_info2_t oinfo; /* Object info */
+ int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */
+ int read_data2[ATTR2_DIM1][ATTR2_DIM2] = {{0}}; /* Buffer for reading 2nd attribute */
+ int i, j; /* Local index variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Attribute Functions\n"));
+
+ /* Create file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 2, "H5Oget_info3");
+
+ /* Open first attribute for the dataset */
+ attr = H5Aopen(dataset, ATTR_TMP_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i,
+ attr_data1[i], i, read_data1[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open the group */
+ group = H5Gopen2(fid1, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(group, FAIL, "H5Gopen2");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(group, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 1, "H5Oget_info3");
+
+ /* Open the attribute for the group */
+ attr = H5Aopen(group, ATTR2_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data2);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR2_DIM1; i++)
+ for (j = 0; j < ATTR2_DIM2; j++)
+ if (attr_data2[i][j] != read_data2[i][j])
+ TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n",
+ __LINE__, i, j, attr_data2[i][j], i, j, read_data1[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_basic_read() */
+
+/****************************************************************
+**
+** test_attr_flush(): Test H5A (attribute) code for performing
+** I/O when H5Fflush is used.
+**
+****************************************************************/
+static void
+test_attr_flush(hid_t fapl)
+{
+ hid_t fil, /* File ID */
+ att, /* Attribute ID */
+ spc, /* Dataspace ID */
+ set; /* Dataset ID */
+ double wdata = 3.14159; /* Data to write */
+ double rdata; /* Data read in */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Attribute Flushing\n"));
+
+ fil = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fil, FAIL, "H5Fcreate");
+
+ spc = H5Screate(H5S_SCALAR);
+ CHECK(spc, FAIL, "H5Screate");
+
+ set = H5Dcreate2(fil, DSET1_NAME, H5T_NATIVE_DOUBLE, spc, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(set, FAIL, "H5Dcreate2");
+
+ att = H5Acreate2(set, ATTR1_NAME, H5T_NATIVE_DOUBLE, spc, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(att, FAIL, "H5Acreate2");
+#ifndef NO_ATTR_FILL_VALUE_SUPPORT
+ ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata);
+ CHECK(ret, FAIL, "H5Aread");
+
+ if (!H5_DBL_ABS_EQUAL(rdata, 0.0))
+ TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, 0.0);
+
+ ret = H5Fflush(fil, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ if (!H5_DBL_ABS_EQUAL(rdata, 0.0))
+ TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, 0.0);
+#else
+ HDprintf("** SKIPPED attribute pre-read temporarily until attribute fill values supported **\n");
+#endif
+ ret = H5Awrite(att, H5T_NATIVE_DOUBLE, &wdata);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ ret = H5Aread(att, H5T_NATIVE_DOUBLE, &rdata);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ if (!H5_DBL_ABS_EQUAL(rdata, wdata))
+ TestErrPrintf("attribute value wrong: rdata=%f, should be %f\n", rdata, wdata);
+
+ ret = H5Sclose(spc);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Aclose(att);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Dclose(set);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fil);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_flush() */
+
+/****************************************************************
+**
+** test_attr_plist(): Test Attribute Creation Property Lists
+**
+****************************************************************/
+static void
+test_attr_plist(hid_t fapl)
+{
+ hid_t fid = H5I_INVALID_HID; /* File ID */
+ hid_t did = H5I_INVALID_HID; /* Dataset ID */
+ hid_t dsid = H5I_INVALID_HID; /* Dataspace ID (for dataset) */
+ hid_t asid = H5I_INVALID_HID; /* Dataspace ID (for attribute) */
+ hid_t aid = H5I_INVALID_HID; /* Attribute ID */
+ hid_t acpl_id = H5I_INVALID_HID; /* Attribute creation property list ID */
+ hid_t aapl_id = H5I_INVALID_HID; /* Attribute access property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {ATTR1_DIM1};
+ H5T_cset_t cset; /* Character set for attributes */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Attribute Property Lists\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ dsid = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(dsid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a dataset */
+ did = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, dsid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create dataspace for attribute */
+ asid = H5Screate_simple(ATTR1_RANK, dims2, NULL);
+ CHECK(asid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create default creation property list for attribute */
+ acpl_id = H5Pcreate(H5P_ATTRIBUTE_CREATE);
+ CHECK(acpl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create default access property list for attribute
+ * This currently has no properties, but we need to test its creation
+ * and use.
+ */
+ aapl_id = H5Pcreate(H5P_ATTRIBUTE_ACCESS);
+ CHECK(aapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Get the character encoding and ensure that it is the default (ASCII) */
+ ret = H5Pget_char_encoding(acpl_id, &cset);
+ CHECK(ret, FAIL, "H5Pget_char_encoding");
+ VERIFY(cset, H5T_CSET_ASCII, "H5Pget_char_encoding");
+
+ /* Create an attribute for the dataset using the property list */
+ aid = H5Acreate2(did, ATTR1_NAME, H5T_NATIVE_INT, asid, acpl_id, aapl_id);
+ CHECK(aid, H5I_INVALID_HID, "H5Acreate2");
+
+ /* Close the property list, and get the attribute's creation property list */
+ ret = H5Pclose(acpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ acpl_id = H5Aget_create_plist(aid);
+ CHECK(acpl_id, H5I_INVALID_HID, "H5Aget_create_plist");
+
+ /* Get the character encoding and ensure that it is the default (ASCII) */
+ ret = H5Pget_char_encoding(acpl_id, &cset);
+ CHECK(ret, FAIL, "H5Pget_char_encoding");
+ VERIFY(cset, H5T_CSET_ASCII, "H5Pget_char_encoding");
+
+ /* Close the property list and attribute */
+ ret = H5Pclose(acpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create a new property list and modify it to use a different encoding */
+ acpl_id = H5Pcreate(H5P_ATTRIBUTE_CREATE);
+ CHECK(acpl_id, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_char_encoding(acpl_id, H5T_CSET_UTF8);
+ CHECK(ret, FAIL, "H5Pset_char_encoding");
+
+ /* Get the character encoding and ensure that it has been changed */
+ ret = H5Pget_char_encoding(acpl_id, &cset);
+ CHECK(ret, FAIL, "H5Pget_char_encoding");
+ VERIFY(cset, H5T_CSET_UTF8, "H5Pget_char_encoding");
+
+ /* Create an attribute for the dataset using the modified property list */
+ aid = H5Acreate2(did, ATTR2_NAME, H5T_NATIVE_INT, asid, acpl_id, aapl_id);
+ CHECK(aid, H5I_INVALID_HID, "H5Acreate2");
+
+ /* Close the property list and attribute */
+ ret = H5Pclose(acpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Re-open the second attribute and ensure that its character encoding is correct */
+ aid = H5Aopen(did, ATTR2_NAME, H5P_DEFAULT);
+ CHECK(aid, H5I_INVALID_HID, "H5Aopen");
+ acpl_id = H5Aget_create_plist(aid);
+ CHECK(acpl_id, H5I_INVALID_HID, "H5Aget_create_plist");
+ ret = H5Pget_char_encoding(acpl_id, &cset);
+ CHECK(ret, FAIL, "H5Pget_char_encoding");
+ VERIFY(cset, H5T_CSET_UTF8, "H5Pget_char_encoding");
+
+ /* Close everything */
+ ret = H5Sclose(dsid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(asid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(aapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(acpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_plist() */
+
+/****************************************************************
+**
+** test_attr_compound_write(): Test H5A (attribute) code.
+** Tests compound datatype attributes
+**
+****************************************************************/
+static void
+test_attr_compound_write(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t tid1; /* Attribute datatype ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {ATTR4_DIM1, ATTR4_DIM2};
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ hid_t ret_id; /* Generic hid_t return value */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Multiple Attribute Functions\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close dataset's dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create the attribute datatype. */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(struct attr4_struct));
+ CHECK(tid1, FAIL, "H5Tcreate");
+ attr4_field1_off = HOFFSET(struct attr4_struct, i);
+ ret = H5Tinsert(tid1, ATTR4_FIELDNAME1, attr4_field1_off, H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ attr4_field2_off = HOFFSET(struct attr4_struct, d);
+ ret = H5Tinsert(tid1, ATTR4_FIELDNAME2, attr4_field2_off, H5T_NATIVE_DOUBLE);
+ CHECK(ret, FAIL, "H5Tinsert");
+ attr4_field3_off = HOFFSET(struct attr4_struct, c);
+ ret = H5Tinsert(tid1, ATTR4_FIELDNAME3, attr4_field3_off, H5T_NATIVE_SCHAR);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create dataspace for 1st attribute */
+ sid2 = H5Screate_simple(ATTR4_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create complex attribute for the dataset */
+ attr = H5Acreate2(dataset, ATTR4_NAME, tid1, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ /* Try to create the same attribute again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Acreate2(dataset, ATTR4_NAME, tid1, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Acreate2");
+#endif
+ /* Write complex attribute data */
+ ret = H5Awrite(attr, tid1, attr_data4);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close attribute's dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close attribute's datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_compound_write() */
+
+/****************************************************************
+**
+** test_attr_compound_read(): Test basic H5A (attribute) code.
+**
+****************************************************************/
+static void
+test_attr_compound_read(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t space; /* Attribute dataspace */
+ hid_t type; /* Attribute datatype */
+ hid_t attr; /* Attribute ID */
+ char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */
+ int rank; /* Attribute rank */
+ hsize_t dims[ATTR_MAX_DIMS]; /* Attribute dimensions */
+ H5T_class_t t_class; /* Attribute datatype class */
+ H5T_order_t order; /* Attribute datatype order */
+ size_t size; /* Attribute datatype size as stored in file */
+ int fields; /* # of Attribute datatype fields */
+ char *fieldname; /* Name of a field */
+ size_t offset; /* Attribute datatype field offset */
+ hid_t field; /* Attribute field datatype */
+ struct attr4_struct read_data4[ATTR4_DIM1][ATTR4_DIM2]; /* Buffer for reading 4th attribute */
+ ssize_t name_len; /* Length of attribute name */
+ H5O_info2_t oinfo; /* Object info */
+ int i, j; /* Local index variables */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Attribute Functions\n"));
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 1, "H5Oget_info3");
+
+ /* Open 1st attribute for the dataset */
+ attr =
+ H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen_by_idx");
+
+ /* Verify Dataspace */
+ space = H5Aget_space(attr);
+ CHECK(space, FAIL, "H5Aget_space");
+ rank = H5Sget_simple_extent_ndims(space);
+ VERIFY(rank, ATTR4_RANK, "H5Sget_simple_extent_ndims");
+ ret = H5Sget_simple_extent_dims(space, dims, NULL);
+ CHECK(ret, FAIL, "H5Sget_simple_extent_dims");
+ if (dims[0] != ATTR4_DIM1)
+ TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR4_DIM1);
+ if (dims[1] != ATTR4_DIM2)
+ TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR4_DIM2);
+ H5Sclose(space);
+
+ /* Verify Datatype */
+ type = H5Aget_type(attr);
+ CHECK(type, FAIL, "H5Aget_type");
+ t_class = H5Tget_class(type);
+ VERIFY(t_class, H5T_COMPOUND, "H5Tget_class");
+ fields = H5Tget_nmembers(type);
+ VERIFY(fields, 3, "H5Tget_nmembers");
+ for (i = 0; i < fields; i++) {
+ fieldname = H5Tget_member_name(type, (unsigned)i);
+ if (!(HDstrcmp(fieldname, ATTR4_FIELDNAME1) != 0 || HDstrcmp(fieldname, ATTR4_FIELDNAME2) != 0 ||
+ HDstrcmp(fieldname, ATTR4_FIELDNAME3) != 0))
+ TestErrPrintf("invalid field name for field #%d: %s\n", i, fieldname);
+ H5free_memory(fieldname);
+ } /* end for */
+ offset = H5Tget_member_offset(type, 0);
+ VERIFY(offset, attr4_field1_off, "H5Tget_member_offset");
+ offset = H5Tget_member_offset(type, 1);
+ VERIFY(offset, attr4_field2_off, "H5Tget_member_offset");
+ offset = H5Tget_member_offset(type, 2);
+ VERIFY(offset, attr4_field3_off, "H5Tget_member_offset");
+
+ /* Verify each field's type, class & size */
+ field = H5Tget_member_type(type, 0);
+ CHECK(field, FAIL, "H5Tget_member_type");
+ t_class = H5Tget_class(field);
+ VERIFY(t_class, H5T_INTEGER, "H5Tget_class");
+ order = H5Tget_order(field);
+ VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order");
+ size = H5Tget_size(field);
+ VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size");
+ H5Tclose(field);
+ field = H5Tget_member_type(type, 1);
+ CHECK(field, FAIL, "H5Tget_member_type");
+ t_class = H5Tget_class(field);
+ VERIFY(t_class, H5T_FLOAT, "H5Tget_class");
+ order = H5Tget_order(field);
+ VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_DOUBLE), H5T_order_t, "%d", "H5Tget_order");
+ size = H5Tget_size(field);
+ VERIFY(size, H5Tget_size(H5T_NATIVE_DOUBLE), "H5Tget_size");
+ H5Tclose(field);
+ field = H5Tget_member_type(type, 2);
+ CHECK(field, FAIL, "H5Tget_member_type");
+ t_class = H5Tget_class(field);
+ VERIFY(t_class, H5T_INTEGER, "H5Tget_class");
+ order = H5Tget_order(field);
+ VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_SCHAR), H5T_order_t, "%d", "H5Tget_order");
+ size = H5Tget_size(field);
+ VERIFY(size, H5Tget_size(H5T_NATIVE_SCHAR), "H5Tget_size");
+ H5Tclose(field);
+
+ /* Read attribute information */
+ ret = H5Aread(attr, type, read_data4);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR4_DIM1; i++)
+ for (j = 0; j < ATTR4_DIM2; j++)
+ if (HDmemcmp(&attr_data4[i][j], &read_data4[i][j], sizeof(struct attr4_struct)) != 0) {
+ HDprintf("%d: attribute data different: attr_data4[%d][%d].i=%d, read_data4[%d][%d].i=%d\n",
+ __LINE__, i, j, attr_data4[i][j].i, i, j, read_data4[i][j].i);
+ HDprintf("%d: attribute data different: attr_data4[%d][%d].d=%f, read_data4[%d][%d].d=%f\n",
+ __LINE__, i, j, attr_data4[i][j].d, i, j, read_data4[i][j].d);
+ TestErrPrintf(
+ "%d: attribute data different: attr_data4[%d][%d].c=%c, read_data4[%d][%d].c=%c\n",
+ __LINE__, i, j, attr_data4[i][j].c, i, j, read_data4[i][j].c);
+ } /* end if */
+
+ /* Verify Name */
+ name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name);
+ VERIFY(name_len, HDstrlen(ATTR4_NAME), "H5Aget_name");
+ if (HDstrcmp(attr_name, ATTR4_NAME) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR4_NAME);
+
+ /* Close attribute datatype */
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_compound_read() */
+
+/****************************************************************
+**
+** test_attr_scalar_write(): Test scalar H5A (attribute) writing code.
+**
+****************************************************************/
+static void
+test_attr_scalar_write(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ hid_t ret_id; /* Generic hid_t return value */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Attribute Functions\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Create dataspace for attribute */
+ sid2 = H5Screate_simple(ATTR5_RANK, NULL, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create an attribute for the dataset */
+ attr = H5Acreate2(dataset, ATTR5_NAME, H5T_NATIVE_FLOAT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ /* Try to create the same attribute again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Acreate2(dataset, ATTR5_NAME, H5T_NATIVE_FLOAT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Acreate2");
+#endif
+ /* Write attribute information */
+ ret = H5Awrite(attr, H5T_NATIVE_FLOAT, &attr_data5);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_scalar_write() */
+
+/****************************************************************
+**
+** test_attr_scalar_read(): Test scalar H5A (attribute) reading code.
+**
+****************************************************************/
+static void
+test_attr_scalar_read(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ H5S_class_t stype; /* Dataspace class */
+ float rdata = 0.0F; /* Buffer for reading 1st attribute */
+ H5O_info2_t oinfo; /* Object info */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Scalar Attribute Reading Functions\n"));
+
+ /* Create file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 1, "H5Oget_info3");
+
+ /* Open an attribute for the dataset */
+ attr = H5Aopen(dataset, ATTR5_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_FLOAT, &rdata);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify the floating-poing value in this way to avoid compiler warning. */
+ if (!H5_FLT_ABS_EQUAL(rdata, attr_data5))
+ HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Aread",
+ (double)attr_data5, (double)rdata, (int)__LINE__, __FILE__);
+
+ /* Get the attribute's dataspace */
+ sid = H5Aget_space(attr);
+ CHECK(sid, FAIL, "H5Aget_space");
+
+ /* Make certain the dataspace is scalar */
+ stype = H5Sget_simple_extent_type(sid);
+ VERIFY(stype, H5S_SCALAR, "H5Sget_simple_extent_type");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_scalar_read() */
+
+/****************************************************************
+**
+** test_attr_mult_write(): Test basic H5A (attribute) code.
+** Tests integer attributes on both datasets and groups
+**
+****************************************************************/
+static void
+test_attr_mult_write(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {ATTR1_DIM1};
+ hsize_t dims3[] = {ATTR2_DIM1, ATTR2_DIM2};
+ hsize_t dims4[] = {ATTR3_DIM1, ATTR3_DIM2, ATTR3_DIM3};
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ hid_t ret_id; /* Generic hid_t return value */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Multiple Attribute Functions\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close dataset's dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for 1st attribute */
+ sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create 1st attribute for the dataset */
+ attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ /* Try to create the same attribute again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Acreate2");
+#endif
+ /* Write 1st attribute data */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close 1st attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close attribute's dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for 2nd attribute */
+ sid2 = H5Screate_simple(ATTR2_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create 2nd attribute for the dataset */
+ attr = H5Acreate2(dataset, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ /* Try to create the same attribute again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Acreate2(dataset, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Acreate2");
+#endif
+ /* Write 2nd attribute information */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data2);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close 2nd attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close 2nd attribute's dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for 3rd attribute */
+ sid2 = H5Screate_simple(ATTR3_RANK, dims4, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create 3rd attribute for the dataset */
+ attr = H5Acreate2(dataset, ATTR3_NAME, H5T_NATIVE_DOUBLE, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ /* Try to create the same attribute again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Acreate2(dataset, ATTR3_NAME, H5T_NATIVE_DOUBLE, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Acreate2");
+#endif
+ /* Write 3rd attribute information */
+ ret = H5Awrite(attr, H5T_NATIVE_DOUBLE, attr_data3);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close 3rd attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close 3rd attribute's dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_mult_write() */
+
+/****************************************************************
+**
+** test_attr_mult_read(): Test basic H5A (attribute) code.
+**
+****************************************************************/
+static void
+test_attr_mult_read(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t space; /* Attribute dataspace */
+ hid_t type; /* Attribute datatype */
+ hid_t attr; /* Attribute ID */
+ char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */
+ char temp_name[ATTR_NAME_LEN]; /* Buffer for mangling attribute names */
+ int rank; /* Attribute rank */
+ hsize_t dims[ATTR_MAX_DIMS]; /* Attribute dimensions */
+ H5T_class_t t_class; /* Attribute datatype class */
+ H5T_order_t order; /* Attribute datatype order */
+ size_t size; /* Attribute datatype size as stored in file */
+ int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */
+ int read_data2[ATTR2_DIM1][ATTR2_DIM2] = {{0}}; /* Buffer for reading 2nd attribute */
+ double read_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3] = {{{0}}}; /* Buffer for reading 3rd attribute */
+ ssize_t name_len; /* Length of attribute name */
+ H5O_info2_t oinfo; /* Object info */
+ int i, j, k; /* Local index values */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Attribute Functions\n"));
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 3, "H5Oget_info3");
+
+ /* Open 1st attribute for the dataset */
+ attr =
+ H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen_by_idx");
+
+ /* Verify Dataspace */
+ space = H5Aget_space(attr);
+ CHECK(space, FAIL, "H5Aget_space");
+ rank = H5Sget_simple_extent_ndims(space);
+ VERIFY(rank, ATTR1_RANK, "H5Sget_simple_extent_ndims");
+ ret = H5Sget_simple_extent_dims(space, dims, NULL);
+ CHECK(ret, FAIL, "H5Sget_simple_extent_dims");
+ if (dims[0] != ATTR1_DIM1)
+ TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR1_DIM1);
+ H5Sclose(space);
+
+ /* Verify Datatype */
+ type = H5Aget_type(attr);
+ CHECK(type, FAIL, "H5Aget_type");
+ t_class = H5Tget_class(type);
+ VERIFY(t_class, H5T_INTEGER, "H5Tget_class");
+ order = H5Tget_order(type);
+ VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order");
+ size = H5Tget_size(type);
+ VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size");
+ H5Tclose(type);
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i,
+ attr_data1[i], i, read_data1[i]);
+
+ /* Verify Name */
+ name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name);
+ VERIFY(name_len, HDstrlen(ATTR1_NAME), "H5Aget_name");
+ if (HDstrcmp(attr_name, ATTR1_NAME) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR1_NAME);
+
+ /* Verify Name with too small of a buffer */
+ name_len = H5Aget_name(attr, HDstrlen(ATTR1_NAME), attr_name);
+ VERIFY(name_len, HDstrlen(ATTR1_NAME), "H5Aget_name");
+ HDstrcpy(temp_name, ATTR1_NAME); /* make a copy of the name */
+ temp_name[HDstrlen(ATTR1_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */
+ if (HDstrcmp(attr_name, temp_name) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open 2nd attribute for the dataset */
+ attr =
+ H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)1, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen_by_idx");
+
+ /* Verify Dataspace */
+ space = H5Aget_space(attr);
+ CHECK(space, FAIL, "H5Aget_space");
+ rank = H5Sget_simple_extent_ndims(space);
+ VERIFY(rank, ATTR2_RANK, "H5Sget_simple_extent_ndims");
+ ret = H5Sget_simple_extent_dims(space, dims, NULL);
+ CHECK(ret, FAIL, "H5Sget_simple_extent_dims");
+ if (dims[0] != ATTR2_DIM1)
+ TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR2_DIM1);
+ if (dims[1] != ATTR2_DIM2)
+ TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR2_DIM2);
+ H5Sclose(space);
+
+ /* Verify Datatype */
+ type = H5Aget_type(attr);
+ CHECK(type, FAIL, "H5Aget_type");
+ t_class = H5Tget_class(type);
+ VERIFY(t_class, H5T_INTEGER, "H5Tget_class");
+ order = H5Tget_order(type);
+ VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_INT), H5T_order_t, "%d", "H5Tget_order");
+ size = H5Tget_size(type);
+ VERIFY(size, H5Tget_size(H5T_NATIVE_INT), "H5Tget_size");
+ H5Tclose(type);
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data2);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR2_DIM1; i++)
+ for (j = 0; j < ATTR2_DIM2; j++)
+ if (attr_data2[i][j] != read_data2[i][j])
+ TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n",
+ __LINE__, i, j, attr_data2[i][j], i, j, read_data2[i][j]);
+
+ /* Verify Name */
+ name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name);
+ VERIFY(name_len, HDstrlen(ATTR2_NAME), "H5Aget_name");
+ if (HDstrcmp(attr_name, ATTR2_NAME) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR2_NAME);
+
+ /* Verify Name with too small of a buffer */
+ name_len = H5Aget_name(attr, HDstrlen(ATTR2_NAME), attr_name);
+ VERIFY(name_len, HDstrlen(ATTR2_NAME), "H5Aget_name");
+ HDstrcpy(temp_name, ATTR2_NAME); /* make a copy of the name */
+ temp_name[HDstrlen(ATTR2_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */
+ if (HDstrcmp(attr_name, temp_name) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open 2nd attribute for the dataset */
+ attr =
+ H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen_by_idx");
+
+ /* Verify Dataspace */
+ space = H5Aget_space(attr);
+ CHECK(space, FAIL, "H5Aget_space");
+ rank = H5Sget_simple_extent_ndims(space);
+ VERIFY(rank, ATTR3_RANK, "H5Sget_simple_extent_ndims");
+ ret = H5Sget_simple_extent_dims(space, dims, NULL);
+ CHECK(ret, FAIL, "H5Sget_simple_extent_dims");
+ if (dims[0] != ATTR3_DIM1)
+ TestErrPrintf("attribute dimensions different: dims[0]=%d, should be %d\n", (int)dims[0], ATTR3_DIM1);
+ if (dims[1] != ATTR3_DIM2)
+ TestErrPrintf("attribute dimensions different: dims[1]=%d, should be %d\n", (int)dims[1], ATTR3_DIM2);
+ if (dims[2] != ATTR3_DIM3)
+ TestErrPrintf("attribute dimensions different: dims[2]=%d, should be %d\n", (int)dims[2], ATTR3_DIM3);
+ H5Sclose(space);
+
+ /* Verify Datatype */
+ type = H5Aget_type(attr);
+ CHECK(type, FAIL, "H5Aget_type");
+ t_class = H5Tget_class(type);
+ VERIFY(t_class, H5T_FLOAT, "H5Tget_class");
+ order = H5Tget_order(type);
+ VERIFY_TYPE(order, H5Tget_order(H5T_NATIVE_DOUBLE), H5T_order_t, "%d", "H5Tget_order");
+ size = H5Tget_size(type);
+ VERIFY(size, H5Tget_size(H5T_NATIVE_DOUBLE), "H5Tget_size");
+ H5Tclose(type);
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_DOUBLE, read_data3);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR3_DIM1; i++)
+ for (j = 0; j < ATTR3_DIM2; j++)
+ for (k = 0; k < ATTR3_DIM3; k++)
+ if (!H5_DBL_ABS_EQUAL(attr_data3[i][j][k], read_data3[i][j][k]))
+ TestErrPrintf("%d: attribute data different: attr_data3[%d][%d][%d]=%f, "
+ "read_data3[%d][%d][%d]=%f\n",
+ __LINE__, i, j, k, attr_data3[i][j][k], i, j, k, read_data3[i][j][k]);
+
+ /* Verify Name */
+ name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name);
+ VERIFY(name_len, HDstrlen(ATTR3_NAME), "H5Aget_name");
+ if (HDstrcmp(attr_name, ATTR3_NAME) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME);
+
+ /* Verify Name with too small of a buffer */
+ name_len = H5Aget_name(attr, HDstrlen(ATTR3_NAME), attr_name);
+ VERIFY(name_len, HDstrlen(ATTR3_NAME), "H5Aget_name");
+ HDstrcpy(temp_name, ATTR3_NAME); /* make a copy of the name */
+ temp_name[HDstrlen(ATTR3_NAME) - 1] = '\0'; /* truncate it to match the one retrieved */
+ if (HDstrcmp(attr_name, temp_name) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, temp_name);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_mult_read() */
+
+/****************************************************************
+**
+** attr_op1(): Attribute operator
+**
+****************************************************************/
+static herr_t
+attr_op1(hid_t H5_ATTR_UNUSED loc_id, const char *name, const H5A_info_t H5_ATTR_UNUSED *ainfo, void *op_data)
+{
+ int *count = (int *)op_data;
+ herr_t ret = 0;
+
+ switch (*count) {
+ case 0:
+ if (HDstrcmp(name, ATTR1_NAME) != 0)
+ TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR1_NAME);
+ (*count)++;
+ break;
+
+ case 1:
+ if (HDstrcmp(name, ATTR2_NAME) != 0)
+ TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR2_NAME);
+ (*count)++;
+ break;
+
+ case 2:
+ if (HDstrcmp(name, ATTR3_NAME) != 0)
+ TestErrPrintf("attribute name different: name=%s, should be %s\n", name, ATTR3_NAME);
+ (*count)++;
+ break;
+
+ default:
+ ret = -1;
+ break;
+ } /* end switch() */
+
+ return (ret);
+} /* end attr_op1() */
+
+/****************************************************************
+**
+** test_attr_iterate(): Test H5A (attribute) iterator code.
+**
+****************************************************************/
+static void
+test_attr_iterate(hid_t fapl)
+{
+ hid_t file; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ int count; /* operator data for the iterator */
+ H5O_info2_t oinfo; /* Object info */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Attribute Functions\n"));
+
+ /* Open file */
+ file = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(file, FAIL, "H5Fopen");
+
+ /* Create a dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a new dataset */
+ dataset = H5Dcreate2(file, DSET2_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 0, "H5Oget_info3");
+
+ /* Iterate over attributes on dataset */
+ count = 0;
+ ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_op1, &count);
+ VERIFY(ret, 0, "H5Aiterate2");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open existing dataset w/attributes */
+ dataset = H5Dopen2(file, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 3, "H5Oget_info3");
+
+ /* Iterate over attributes on dataset */
+ count = 0;
+ ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, NULL, attr_op1, &count);
+ VERIFY(ret, 0, "H5Aiterate2");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_iterate() */
+
+/****************************************************************
+**
+** test_attr_delete(): Test H5A (attribute) code for deleting objects.
+**
+****************************************************************/
+static void
+test_attr_delete(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t attr; /* Attribute ID */
+ char attr_name[ATTR_NAME_LEN]; /* Buffer for attribute names */
+ ssize_t name_len; /* Length of attribute name */
+ H5O_info2_t oinfo; /* Object info */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Attribute Deletion Functions\n"));
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 3, "H5Oget_info3");
+#ifndef NO_DELETE_NONEXISTENT_ATTRIBUTE
+ /* Try to delete bogus attribute */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Adelete(dataset, "Bogus");
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Adelete");
+#endif
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 3, "H5Oget_info3");
+
+ /* Delete middle (2nd) attribute */
+ ret = H5Adelete(dataset, ATTR2_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 2, "H5Oget_info3");
+
+ /* Open 1st attribute for the dataset */
+ attr =
+ H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen_by_idx");
+
+ /* Verify Name */
+ name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name);
+ VERIFY(name_len, HDstrlen(ATTR1_NAME), "H5Aget_name");
+ if (HDstrcmp(attr_name, ATTR1_NAME) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR1_NAME);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open last (formally 3rd) attribute for the dataset */
+ attr =
+ H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)1, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen_by_idx");
+
+ /* Verify Name */
+ name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name);
+ VERIFY(name_len, HDstrlen(ATTR3_NAME), "H5Aget_name");
+ if (HDstrcmp(attr_name, ATTR3_NAME) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Delete first attribute */
+ ret = H5Adelete(dataset, ATTR1_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 1, "H5Oget_info3");
+
+ /* Open last (formally 3rd) attribute for the dataset */
+ attr =
+ H5Aopen_by_idx(dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen_by_idx");
+
+ /* Verify Name */
+ name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, attr_name);
+ VERIFY(name_len, HDstrlen(ATTR3_NAME), "H5Aget_name");
+ if (HDstrcmp(attr_name, ATTR3_NAME) != 0)
+ TestErrPrintf("attribute name different: attr_name=%s, should be %s\n", attr_name, ATTR3_NAME);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Delete first attribute */
+ ret = H5Adelete(dataset, ATTR3_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Verify the correct number of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, 0, "H5Oget_info3");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_delete() */
+
+/****************************************************************
+**
+** test_attr_dtype_shared(): Test H5A (attribute) code for using
+** shared datatypes in attributes.
+**
+****************************************************************/
+static void
+test_attr_dtype_shared(hid_t fapl)
+{
+#ifndef NO_SHARED_DATATYPES
+ hid_t file_id; /* File ID */
+ hid_t dset_id; /* Dataset ID */
+ hid_t space_id; /* Dataspace ID for dataset & attribute */
+ hid_t type_id; /* Datatype ID for named datatype */
+ hid_t attr_id; /* Attribute ID */
+ int data = 8; /* Data to write */
+ int rdata = 0; /* Read read in */
+ H5O_info2_t oinfo; /* Object's information */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+#else
+ (void)fapl;
+#endif
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Shared Datatypes with Attributes - SKIPPED for now due to no support for shared "
+ "datatypes\n"));
+#ifndef NO_SHARED_DATATYPES
+ /* Create a file */
+ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file_id, FAIL, "H5Fopen");
+
+ /* Close file */
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+
+ /* Re-open file */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(file_id, FAIL, "H5Fopen");
+
+ /* Create a datatype to commit and use */
+ type_id = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(type_id, FAIL, "H5Tcopy");
+
+ /* Commit datatype to file */
+ ret = H5Tcommit2(file_id, TYPE1_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Check reference count on named datatype */
+ ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "H5Oget_info_by_name3");
+
+ /* Create dataspace for dataset */
+ space_id = H5Screate(H5S_SCALAR);
+ CHECK(space_id, FAIL, "H5Screate");
+
+ /* Create dataset */
+ dset_id = H5Dcreate2(file_id, DSET1_NAME, type_id, space_id, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ /* Check reference count on named datatype */
+ ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 2, "H5Oget_info_by_name3");
+
+ /* Create attribute on dataset */
+ attr_id = H5Acreate2(dset_id, ATTR1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+
+ /* Check reference count on named datatype */
+ ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3");
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Delete attribute */
+ ret = H5Adelete(dset_id, ATTR1_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Check reference count on named datatype */
+ ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 2, "H5Oget_info_by_name3");
+
+ /* Create attribute on dataset */
+ attr_id = H5Acreate2(dset_id, ATTR1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+
+ /* Check reference count on named datatype */
+ ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr_id, H5T_NATIVE_INT, &data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close file */
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(file_id, FAIL, "H5Fopen");
+
+ /* Open dataset */
+ dset_id = H5Dopen2(file_id, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dopen2");
+
+ /* Open attribute */
+ attr_id = H5Aopen(dset_id, ATTR1_NAME, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Aopen");
+
+ /* Read data from the attribute */
+ ret = H5Aread(attr_id, H5T_NATIVE_INT, &rdata);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(data, rdata, "H5Aread");
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Check reference count on named datatype */
+ ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 3, "H5Oget_info_by_name3");
+
+ /* Unlink the dataset */
+ ret = H5Ldelete(file_id, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Check reference count on named datatype */
+ ret = H5Oget_info_by_name3(file_id, TYPE1_NAME, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "H5Oget_info_by_name3");
+
+ /* Unlink the named datatype */
+ ret = H5Ldelete(file_id, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+#endif
+#endif
+} /* test_attr_dtype_shared() */
+
+/****************************************************************
+**
+** test_attr_duplicate_ids(): Test operations with more than
+** one ID handles.
+**
+****************************************************************/
+static void
+test_attr_duplicate_ids(hid_t fapl)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t gid1, gid2; /* Group ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t attr, attr2; /* Attribute ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {ATTR1_DIM1};
+ int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading 1st attribute */
+ int rewrite_data[ATTR1_DIM1] = {1234, -423, 9907256}; /* Test data for rewrite */
+ int i;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing operations with two ID handles\n"));
+
+ /*-----------------------------------------------------------------------------------
+ * Create an attribute in a new file and fill it with fill value.
+ */
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, DSET1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Create dataspace for attribute */
+ sid2 = H5Screate_simple(ATTR1_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Try to create an attribute on the dataset */
+ attr = H5Acreate2(dataset, ATTR1_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Open the attribute just created and get a second ID */
+ attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT);
+ CHECK(attr2, FAIL, "H5Aopen");
+
+ /* Close attribute */
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Reopen the file and verify the fill value for attribute. Also write
+ * some real data.
+ */
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Open first attribute for the dataset */
+ attr = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Read attribute with fill value */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (0 != read_data1[i])
+ TestErrPrintf("%d: attribute data different: read_data1[%d]=%d\n", __LINE__, i, read_data1[i]);
+
+ /* Open attribute for the second time */
+ attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Write attribute information */
+ ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Reopen the file and verify the data. Also rewrite the data and verify it.
+ */
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Open first attribute for the dataset */
+ attr = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i,
+ attr_data1[i], i, read_data1[i]);
+
+ /* Open attribute for the second time */
+ attr2 = H5Aopen(dataset, ATTR1_NAME, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Write attribute information */
+ ret = H5Awrite(attr2, H5T_NATIVE_INT, rewrite_data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (read_data1[i] != rewrite_data[i])
+ TestErrPrintf("%d: attribute data different: read_data1[%d]=%d, rewrite_data[%d]=%d\n", __LINE__,
+ i, read_data1[i], i, rewrite_data[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Verify that the attribute being pointed to by different paths shares
+ * the same data.
+ */
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Create a group */
+ gid1 = H5Gcreate2(fid1, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
+
+ /* Create hard link to the first group */
+ ret = H5Lcreate_hard(gid1, GROUP1_NAME, H5L_SAME_LOC, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_hard");
+
+ /* Try to create an attribute on the group */
+ attr = H5Acreate2(gid1, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Open the hard link just created */
+ gid2 = H5Gopen2(fid1, GROUP2_NAME, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gopen2");
+
+ /* Open the attribute of the group for the second time */
+ attr2 = H5Aopen(gid2, ATTR2_NAME, H5P_DEFAULT);
+ CHECK(attr2, FAIL, "H5Aopen");
+
+ /* Write attribute information with the first attribute handle */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Read attribute information with the second attribute handle */
+ ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i,
+ attr_data1[i], i, read_data1[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close group */
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close Attribute dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_duplicate_ids() */
+
+/****************************************************************
+**
+** test_attr_dense_verify(): Test basic H5A (attribute) code.
+** Verify attributes on object
+**
+****************************************************************/
+static int
+test_attr_dense_verify(hid_t loc_id, unsigned max_attr)
+{
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ hid_t attr; /* Attribute ID */
+ unsigned value; /* Attribute value */
+ unsigned u; /* Local index variable */
+ int old_nerrs; /* Number of errors when entering this check */
+ herr_t ret; /* Generic return value */
+
+ /* Retrieve the current # of reported errors */
+ old_nerrs = nerrors;
+
+ /* Re-open all the attributes by name and verify the data */
+ for (u = 0; u < max_attr; u++) {
+ /* Open attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Aopen(loc_id, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Read data from the attribute */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, &value);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(value, u, "H5Aread");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Re-open all the attributes by index and verify the data */
+ for (u = 0; u < max_attr; u++) {
+ ssize_t name_len; /* Length of attribute name */
+ char check_name[ATTR_NAME_LEN]; /* Buffer for checking attribute names */
+
+ /* Open attribute */
+ attr = H5Aopen_by_idx(loc_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen_by_idx");
+
+ /* Verify Name */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ name_len = H5Aget_name(attr, (size_t)ATTR_NAME_LEN, check_name);
+ VERIFY(name_len, HDstrlen(attrname), "H5Aget_name");
+ if (HDstrcmp(check_name, attrname) != 0)
+ TestErrPrintf("attribute name different: attrname = '%s', should be '%s'\n", check_name,
+ attrname);
+
+ /* Read data from the attribute */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, &value);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(value, u, "H5Aread");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Retrieve current # of errors */
+ if (old_nerrs == nerrors)
+ return (0);
+ else
+ return (-1);
+} /* test_attr_dense_verify() */
+
+/****************************************************************
+**
+** test_attr_dense_create(): Test basic H5A (attribute) code.
+** Tests "dense" attribute storage creation
+**
+****************************************************************/
+static void
+test_attr_dense_create(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+#endif
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dense Attribute Storage Creation\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* need DCPL to query the group creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes, until just before converting to dense storage */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add one more attribute, to push into "dense" storage */
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#ifndef NO_PREVENT_CREATE_SAME_ATTRIBUTE_TWICE
+ /* Attempt to add attribute again, which should fail */
+ H5E_BEGIN_TRY
+ {
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(attr, FAIL, "H5Acreate2");
+#endif
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink dataset with attributes */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+} /* test_attr_dense_create() */
+
+/****************************************************************
+**
+** test_attr_dense_open(): Test basic H5A (attribute) code.
+** Tests opening attributes in "dense" storage
+**
+****************************************************************/
+static void
+test_attr_dense_open(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+#endif
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Opening Attributes in Dense Storage\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* need DCPL to query the group creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Enable creation order tracking on attributes, so creation order tests work */
+ ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED);
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes, until just before converting to dense storage */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Verify attributes written so far */
+ ret = test_attr_dense_verify(dataset, u);
+ CHECK(ret, FAIL, "test_attr_dense_verify");
+ } /* end for */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add one more attribute, to push into "dense" storage */
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Verify all the attributes written */
+ ret = test_attr_dense_verify(dataset, (u + 1));
+ CHECK(ret, FAIL, "test_attr_dense_verify");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink dataset with attributes */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+} /* test_attr_dense_open() */
+
+/****************************************************************
+**
+** test_attr_dense_delete(): Test basic H5A (attribute) code.
+** Tests deleting attributes in "dense" storage
+**
+****************************************************************/
+static void
+test_attr_dense_delete(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+#endif
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ H5O_info2_t oinfo; /* Object info */
+ int use_min_dset_oh = (dcpl_g != H5P_DEFAULT);
+ herr_t ret; /* Generic return value */
+
+ /* Only run this test for sec2/default driver */
+ if (!h5_using_default_driver(NULL))
+ return;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Deleting Attributes in Dense Storage\n"));
+
+ if (use_min_dset_oh) { /* using minimized dataset headers */
+ /* modify fcpl...
+ * sidestep "bug" where file space is lost with minimized dset ohdrs
+ */
+ fcpl = H5Pcopy(fcpl);
+ CHECK(fcpl, FAIL, "H5Pcopy");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, 1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ }
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+ if (use_min_dset_oh)
+ CHECK(H5Pclose(fcpl), FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* need DCPL to query the group creation properties */
+ if (use_min_dset_oh) {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+ else {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+
+ /* Enable creation order tracking on attributes, so creation order tests work */
+ ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED);
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes, until well into dense storage */
+ for (u = 0; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Check # of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3");
+ } /* end for */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Delete attributes until the attributes revert to compact storage again */
+ for (u--; u >= min_dense; u--) {
+ /* Delete attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Verify attributes still left */
+ ret = test_attr_dense_verify(dataset, u);
+ CHECK(ret, FAIL, "test_attr_dense_verify");
+ } /* end for */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Delete one more attribute, which should cause reversion to compact storage */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Verify attributes still left */
+ ret = test_attr_dense_verify(dataset, (u - 1));
+ CHECK(ret, FAIL, "test_attr_dense_verify");
+
+ /* Delete another attribute, to verify deletion in compact storage */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u - 1));
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Verify attributes still left */
+ ret = test_attr_dense_verify(dataset, (u - 2));
+ CHECK(ret, FAIL, "test_attr_dense_verify");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink dataset with attributes */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+} /* test_attr_dense_delete() */
+
+/****************************************************************
+**
+** test_attr_dense_rename(): Test basic H5A (attribute) code.
+** Tests renaming attributes in "dense" storage
+**
+****************************************************************/
+static void
+test_attr_dense_rename(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ char new_attrname[NAME_BUF_SIZE]; /* New name of attribute */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ H5O_info2_t oinfo; /* Object info */
+ unsigned u; /* Local index variable */
+ int use_min_dset_oh = (dcpl_g != H5P_DEFAULT);
+ unsigned use_corder; /* Track creation order or not */
+ herr_t ret; /* Generic return value */
+
+ /* Only run this test for sec2/default driver */
+ if (!h5_using_default_driver(NULL))
+ return;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Renaming Attributes in Dense Storage\n"));
+
+ if (use_min_dset_oh) { /* using minimized dataset headers */
+ /* modify fcpl...
+ * sidestep "bug" where file space is lost with minimized dset ohdrs
+ */
+ fcpl = H5Pcopy(fcpl);
+ CHECK(fcpl, FAIL, "H5Pcopy");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, 1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ }
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+ if (use_min_dset_oh)
+ CHECK(H5Pclose(fcpl), FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fopen");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate");
+
+ /* need DCPL to query the group creation properties */
+ if (use_min_dset_oh) {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, H5I_INVALID_HID, "H5Pcopy");
+ }
+ else {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate");
+ }
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Using creation order or not */
+ for (use_corder = FALSE; use_corder <= TRUE; use_corder++) {
+
+ if (use_corder) {
+ ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED);
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ }
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes, until well into dense storage */
+ for (u = 0; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Rename attribute */
+ HDsnprintf(new_attrname, sizeof(new_attrname), "new attr %02u", u);
+
+ /* Rename attribute */
+ ret = H5Arename_by_name(fid, DSET1_NAME, attrname, new_attrname, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Arename_by_name");
+
+ /* Check # of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3");
+ } /* end for */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ if (!use_corder) {
+ /* Unlink dataset with attributes */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ }
+
+ } /* end for use_corder */
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Verify renamed attributes */
+ for (u = 0; u < (max_compact * 2); u++) {
+ unsigned value; /* Attribute value */
+
+ /* Open attribute */
+ HDsnprintf(attrname, sizeof(attrname), "new attr %02u", u);
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Aopen");
+
+ /* Read data from the attribute */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, &value);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(value, u, "H5Aread");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink dataset with attributes */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+} /* test_attr_dense_rename() */
+
+/****************************************************************
+**
+** test_attr_dense_unlink(): Test basic H5A (attribute) code.
+** Tests unlinking object with attributes in "dense" storage
+**
+****************************************************************/
+static void
+test_attr_dense_unlink(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+ size_t mesg_count; /* # of shared messages */
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ H5O_info2_t oinfo; /* Object info */
+ unsigned u; /* Local index variable */
+ int use_min_dset_oh = (dcpl_g != H5P_DEFAULT);
+ herr_t ret; /* Generic return value */
+
+ /* Only run this test for sec2/default driver */
+ if (!h5_using_default_driver(NULL))
+ return;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Unlinking Object with Attributes in Dense Storage\n"));
+
+ if (use_min_dset_oh) { /* using minimized dataset headers */
+ /* modify fcpl...
+ * sidestep "bug" where file space is lost with minimized dset ohdrs
+ */
+ fcpl = H5Pcopy(fcpl);
+ CHECK(fcpl, FAIL, "H5Pcopy");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, 1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ }
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+ if (use_min_dset_oh)
+ CHECK(H5Pclose(fcpl), FAIL, "H5Pclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* need DCPL to query the group creation properties */
+ if (use_min_dset_oh) {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+ else {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes, until well into dense storage */
+ for (u = 0; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Check # of attributes */
+ ret = H5Oget_info3(dataset, &oinfo, H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.num_attrs, (u + 1), "H5Oget_info3");
+ } /* end for */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Unlink dataset */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+#endif
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+} /* test_attr_dense_unlink() */
+
+/****************************************************************
+**
+** test_attr_dense_limits(): Test basic H5A (attribute) code.
+** Tests attribute in "dense" storage limits
+**
+****************************************************************/
+static void
+test_attr_dense_limits(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned max_compact, rmax_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense, rmin_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+#endif
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Phase Change Limits For Attributes in Dense Storage\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* need DCPL to query the group creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Change limits on compact/dense attribute storage */
+ max_compact = 0;
+ min_dense = 0;
+ ret = H5Pset_attr_phase_change(dcpl, max_compact, min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &rmax_compact, &rmin_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(rmax_compact, max_compact, "H5Pget_attr_phase_change");
+ VERIFY(rmin_dense, min_dense, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Add first attribute, which should be immediately in dense storage */
+
+ /* Create attribute */
+ u = 0;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Add second attribute, to allow deletions to be checked easily */
+
+ /* Create attribute */
+ u = 1;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Delete second attribute, attributes should still be stored densely */
+
+ /* Delete attribute */
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Delete first attribute, attributes should not be stored densely */
+
+ /* Delete attribute */
+ u = 0;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink dataset */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+} /* test_attr_dense_limits() */
+
+/****************************************************************
+**
+** test_attr_dense_dup_ids(): Test operations with multiple ID
+** handles with "dense" attribute storage creation
+**
+****************************************************************/
+static void
+test_attr_dense_dup_ids(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t gid1, gid2; /* Group ID */
+ hid_t sid, sid2; /* Dataspace ID */
+ hid_t attr, attr2, add_attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ hsize_t dims[] = {ATTR1_DIM1};
+ int read_data1[ATTR1_DIM1] = {0}; /* Buffer for reading attribute */
+ int rewrite_data[ATTR1_DIM1] = {1234, -423, 9907256}; /* Test data for rewrite */
+ unsigned scalar_data = 1317; /* scalar data for attribute */
+ unsigned read_scalar; /* variable for reading attribute*/
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+#endif
+ unsigned u, i; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing operations with two IDs for Dense Storage\n"));
+
+ /*-----------------------------------------------------------------------------------
+ * Create an attribute in dense storage and fill it with fill value.
+ */
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* need DCPL to query the group creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes, until just before converting to dense storage */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add one more attribute, to push into "dense" storage */
+ /* Create dataspace for attribute */
+ sid2 = H5Screate_simple(ATTR1_RANK, dims, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Open the attribute just created and get a second ID */
+ attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr2, FAIL, "H5Aopen");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Reopen the file and verify the fill value for attribute. Also write
+ * some real data.
+ */
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Open first attribute for the dataset */
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Read attribute with fill value */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (0 != read_data1[i])
+ TestErrPrintf("%d: attribute data different: read_data1[%d]=%d\n", __LINE__, i, read_data1[i]);
+
+ /* Open attribute for the second time */
+ attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Write attribute information */
+ ret = H5Awrite(attr2, H5T_NATIVE_INT, attr_data1);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Reopen the file and verify the data. Also rewrite the data and verify it.
+ */
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Open first attribute for the dataset */
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Read attribute information */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i,
+ attr_data1[i], i, read_data1[i]);
+
+ /* Open attribute for the second time */
+ attr2 = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Write attribute information with the second ID */
+ ret = H5Awrite(attr2, H5T_NATIVE_INT, rewrite_data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Read attribute information with the first ID */
+ ret = H5Aread(attr, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (read_data1[i] != rewrite_data[i])
+ TestErrPrintf("%d: attribute data different: read_data1[%d]=%d, rewrite_data[%d]=%d\n", __LINE__,
+ i, read_data1[i], i, rewrite_data[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Open the attribute by index. Verify the data is shared when the attribute
+ * is opened twice.
+ */
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Open first attribute for the dataset */
+ attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)4, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Open attribute for the second time */
+ attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)4, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Write attribute information with the second ID */
+ ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Read attribute information with the first ID */
+ ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ if (read_scalar != scalar_data)
+ TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar,
+ scalar_data);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Open one attribute. As it remains open, delete some attributes. The
+ * attribute storage should switch from dense to compact. Then open the
+ * same attribute for the second time and verify that the attribute data
+ * is shared.
+ */
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Open attribute of the dataset for the first time */
+ attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Delete a few attributes until the storage switches to compact */
+ for (u = max_compact; u >= min_dense - 1; u--) {
+ ret = H5Adelete_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)u, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_idx");
+ }
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Open attribute for the second time */
+ attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Write attribute information with the second ID */
+ ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Read attribute information with the first ID */
+ ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ if (read_scalar != scalar_data)
+ TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar,
+ scalar_data);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Open one attribute. As it remains open, create some attributes. The
+ * attribute storage should switch from compact to dense. Then open the
+ * same attribute for the second time and verify that the attribute data
+ * is shared.
+ */
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Open attribute of the dataset for the first time */
+ attr = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)3, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Delete a few attributes until the storage switches to compact */
+ for (u = min_dense - 1; u <= max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ add_attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(add_attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(add_attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(add_attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ }
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Open attribute for the second time */
+ attr2 = H5Aopen_by_idx(dataset, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)3, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Write attribute information with the second ID */
+ ret = H5Awrite(attr2, H5T_NATIVE_UINT, &scalar_data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Read attribute information with the first ID */
+ ret = H5Aread(attr, H5T_NATIVE_INT, &read_scalar);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ if (read_scalar != scalar_data)
+ TestErrPrintf("%d: attribute data different: read_scalar=%d, scalar_data=%d\n", __LINE__, read_scalar,
+ scalar_data);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*-----------------------------------------------------------------------------------
+ * Verify that the attribute being pointed to by different paths shares
+ * the same data.
+ */
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create a group */
+ gid1 = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
+
+ /* Create hard link to the first group */
+ ret = H5Lcreate_hard(gid1, GROUP1_NAME, H5L_SAME_LOC, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_hard");
+
+ /* Add attributes, until just before converting to dense storage */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(gid1, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Try to create another attribute to make dense storage */
+ attr = H5Acreate2(gid1, ATTR2_NAME, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check on group's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(gid1);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Open the hard link just created */
+ gid2 = H5Gopen2(fid, GROUP2_NAME, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gopen2");
+
+ /* Open the attribute of the group for the second time */
+ attr2 = H5Aopen(gid2, ATTR2_NAME, H5P_DEFAULT);
+ CHECK(attr2, FAIL, "H5Aopen");
+
+ /* Write attribute information with the first attribute handle */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, attr_data1);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Read attribute information with the second attribute handle */
+ ret = H5Aread(attr2, H5T_NATIVE_INT, read_data1);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Verify values read in */
+ for (i = 0; i < ATTR1_DIM1; i++)
+ if (attr_data1[i] != read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n", __LINE__, i,
+ attr_data1[i], i, read_data1[i]);
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(attr2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close group */
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close Attribute dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_dense_dup_ids() */
+
+/****************************************************************
+**
+** test_attr_big(): Test basic H5A (attribute) code.
+** Tests storing "big" attribute in dense storage immediately, if available
+**
+****************************************************************/
+static void
+test_attr_big(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t big_sid; /* "Big" dataspace ID */
+ hsize_t dims[ATTR6_RANK] = {ATTR6_DIM1, ATTR6_DIM2, ATTR6_DIM3}; /* Attribute dimensions */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+ unsigned nshared_indices; /* # of shared message indices */
+ H5F_libver_t low, high; /* File format bounds */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+#endif
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Storing 'Big' Attributes in Dense Storage\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset & "small" attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create "big" dataspace for "big" attributes */
+ big_sid = H5Screate_simple(ATTR6_RANK, dims, NULL);
+ CHECK(big_sid, FAIL, "H5Screate_simple");
+
+ /* need DCPL to query the group creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Retrieve # of shared message indices (ie. whether attributes are shared or not) */
+ ret = H5Pget_shared_mesg_nindexes(fcpl, &nshared_indices);
+ CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes");
+
+ /* Retrieve the format bounds for creating objects in the file */
+ ret = H5Pget_libver_bounds(fapl, &low, &high);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Add first "small" attribute, which should be in compact storage */
+
+ /* Create attribute */
+ u = 0;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Add second "small" attribute, which should stay in compact storage */
+
+ /* Create attribute */
+ u = 1;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Add first "big" attribute, which should push storage into dense form */
+
+ /* Create attribute */
+ u = 2;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ if (low == H5F_LIBVER_LATEST || attr >= 0) {
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Check on dataset's attribute storage status */
+ /* (when attributes are shared, the "big" attribute goes into the shared
+ * message heap instead of forcing the attribute storage into the dense
+ * form - QAK)
+ */
+#if 0
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, (nshared_indices ? FALSE : TRUE), "H5O__is_attr_dense_test");
+#endif
+
+ /* Add second "big" attribute, which should leave storage in dense form */
+
+ /* Create attribute */
+ u = 3;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Check on dataset's attribute storage status */
+ /* (when attributes are shared, the "big" attribute goes into the shared
+ * message heap instead of forcing the attribute storage into the dense
+ * form - QAK)
+ */
+#if 0
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, (nshared_indices ? FALSE : TRUE), "H5O__is_attr_dense_test");
+#endif
+
+ /* Delete second "small" attribute, attributes should still be stored densely */
+
+ /* Delete attribute */
+ u = 1;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, (nshared_indices ? FALSE : TRUE), "H5O__is_attr_dense_test");
+#endif
+
+ /* Delete second "big" attribute, attributes should still be stored densely */
+
+ /* Delete attribute */
+ u = 3;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, (nshared_indices ? FALSE : TRUE), "H5O__is_attr_dense_test");
+#endif
+
+ /* Delete first "big" attribute, attributes should _not_ be stored densely */
+
+ /* Delete attribute */
+ u = 2;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Delete first "small" attribute, should be no attributes now */
+
+ /* Delete attribute */
+ u = 0;
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+#endif
+ } /* end if */
+#if 0
+ else {
+ /* Shouldn't be able to create "big" attributes with older version of format */
+ VERIFY(attr, FAIL, "H5Acreate2");
+
+ /* Check on dataset's attribute storage status */
+ /* (when attributes are shared, the "big" attribute goes into the shared
+ * message heap instead of forcing the attribute storage into the dense
+ * form - QAK)
+ */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ } /* end else */
+#endif
+
+ /* Close dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(big_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink dataset */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+} /* test_attr_big() */
+
+/****************************************************************
+**
+** test_attr_null_space(): Test basic H5A (attribute) code.
+** Tests storing attribute with "null" dataspace
+**
+****************************************************************/
+static void
+test_attr_null_space(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t null_sid; /* "null" dataspace ID */
+ hid_t attr_sid; /* Attribute's dataspace ID */
+ hid_t attr; /* Attribute ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned value; /* Attribute value */
+ htri_t cmp; /* Results of comparison */
+#if 0
+ hsize_t storage_size; /* Size of storage for attribute */
+#endif
+ H5A_info_t ainfo; /* Attribute info */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Storing Attributes with 'null' dataspace\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create "null" dataspace for attribute */
+ null_sid = H5Screate(H5S_NULL);
+ CHECK(null_sid, FAIL, "H5Screate");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Add attribute with 'null' dataspace */
+
+ /* Create attribute */
+ HDstrcpy(attrname, "null attr");
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, null_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Try to read data from the attribute */
+ /* (shouldn't fail, but should leave buffer alone) */
+ value = 23;
+ ret = H5Aread(attr, H5T_NATIVE_UINT, &value);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(value, 23, "H5Aread");
+
+ /* Get the dataspace for the attribute and make certain it's 'null' */
+ attr_sid = H5Aget_space(attr);
+ CHECK(attr_sid, FAIL, "H5Aget_space");
+
+ /* Compare the dataspaces */
+ cmp = H5Sextent_equal(attr_sid, null_sid);
+ CHECK(cmp, FAIL, "H5Sextent_equal");
+ VERIFY(cmp, TRUE, "H5Sextent_equal");
+
+ /* Close dataspace */
+ ret = H5Sclose(attr_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+#if 0
+ /* Check the storage size for the attribute */
+ storage_size = H5Aget_storage_size(attr);
+ VERIFY(storage_size, 0, "H5Aget_storage_size");
+#endif
+ /* Get the attribute info */
+ ret = H5Aget_info(attr, &ainfo);
+ CHECK(ret, FAIL, "H5Aget_info");
+#if 0
+ VERIFY(ainfo.data_size, storage_size, "H5Aget_info");
+#endif
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Add another attribute with 'null' dataspace */
+
+ /* Create attribute */
+ HDstrcpy(attrname, "null attr #2");
+ attr = H5Acreate2(dataset, attrname, H5T_NATIVE_UINT, null_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Try to write data to the attribute */
+ /* (shouldn't fail, but should leave buffer alone) */
+ value = 23;
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &value);
+ CHECK(ret, FAIL, "H5Awrite");
+ VERIFY(value, 23, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file and check on the attributes */
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Open first attribute */
+ HDstrcpy(attrname, "null attr #2");
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Try to read data from the attribute */
+ /* (shouldn't fail, but should leave buffer alone) */
+ value = 23;
+ ret = H5Aread(attr, H5T_NATIVE_UINT, &value);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(value, 23, "H5Aread");
+
+ /* Get the dataspace for the attribute and make certain it's 'null' */
+ attr_sid = H5Aget_space(attr);
+ CHECK(attr_sid, FAIL, "H5Aget_space");
+
+ /* Compare the dataspaces */
+ cmp = H5Sextent_equal(attr_sid, null_sid);
+ CHECK(cmp, FAIL, "H5Sextent_equal");
+ VERIFY(cmp, TRUE, "H5Sextent_equal");
+
+ /* Close dataspace */
+ ret = H5Sclose(attr_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+#if 0
+ /* Check the storage size for the attribute */
+ storage_size = H5Aget_storage_size(attr);
+ VERIFY(storage_size, 0, "H5Aget_storage_size");
+#endif
+ /* Get the attribute info */
+ ret = H5Aget_info(attr, &ainfo);
+ CHECK(ret, FAIL, "H5Aget_info");
+#if 0
+ VERIFY(ainfo.data_size, storage_size, "H5Aget_info");
+#endif
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open second attribute */
+ HDstrcpy(attrname, "null attr");
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Try to write data to the attribute */
+ /* (shouldn't fail, but should leave buffer alone) */
+ value = 23;
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &value);
+ CHECK(ret, FAIL, "H5Awrite");
+ VERIFY(value, 23, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink dataset */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(null_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+} /* test_attr_null_space() */
+
+/****************************************************************
+**
+** test_attr_deprec(): Test basic H5A (attribute) code.
+** Tests deprecated API routines
+**
+****************************************************************/
+static void
+test_attr_deprec(hid_t fcpl, hid_t fapl)
+{
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Deprecated Attribute Routines\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Add attribute to dataset */
+
+ /* Create attribute */
+ attr = H5Acreate1(dataset, "attr", H5T_NATIVE_UINT, sid, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate1");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataspaces */
+ ret = H5Sclose(sid);
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file and operate on the attribute */
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open dataset */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+#if 0
+ /* Get number of attributes with bad ID */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aget_num_attrs((hid_t)-1);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_num_attrs");
+
+ /* Get number of attributes */
+ ret = H5Aget_num_attrs(dataset);
+ VERIFY(ret, 1, "H5Aget_num_attrs");
+#endif
+ /* Open the attribute by index */
+ attr = H5Aopen_idx(dataset, 0);
+ CHECK(attr, FAIL, "H5Aopen_idx");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open the attribute by name */
+ attr = H5Aopen_name(dataset, "attr");
+ CHECK(attr, FAIL, "H5Aopen_name");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#else /* H5_NO_DEPRECATED_SYMBOLS */
+ /* Shut compiler up */
+ (void)fcpl;
+ (void)fapl;
+
+ /* Output message about test being skipped */
+ MESSAGE(5, ("Skipping Test On Deprecated Attribute Routines\n"));
+
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+} /* test_attr_deprec() */
+
+/****************************************************************
+**
+** test_attr_many(): Test basic H5A (attribute) code.
+** Tests storing lots of attributes
+**
+****************************************************************/
+static void
+test_attr_many(hbool_t new_format, hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t gid; /* Group ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned nattr = (new_format ? NATTR_MANY_NEW : NATTR_MANY_OLD); /* Number of attributes */
+ htri_t exists; /* Whether the attribute exists or not */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Storing Many Attributes\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataspace for attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create group for attributes */
+ gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create many attributes */
+ for (u = 0; u < nattr; u++) {
+ HDsnprintf(attrname, sizeof(attrname), "a-%06u", u);
+
+ exists = H5Aexists(gid, attrname);
+ VERIFY(exists, FALSE, "H5Aexists");
+
+ exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT);
+ VERIFY(exists, FALSE, "H5Aexists_by_name");
+
+ aid = H5Acreate2(gid, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ exists = H5Aexists(gid, attrname);
+ VERIFY(exists, TRUE, "H5Aexists");
+
+ exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT);
+ VERIFY(exists, TRUE, "H5Aexists_by_name");
+
+ ret = H5Awrite(aid, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ exists = H5Aexists(gid, attrname);
+ VERIFY(exists, TRUE, "H5Aexists");
+
+ exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT);
+ VERIFY(exists, TRUE, "H5Aexists_by_name");
+ } /* end for */
+
+ /* Close group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file and check on the attributes */
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open group */
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Verify attributes */
+ for (u = 0; u < nattr; u++) {
+ unsigned value; /* Attribute value */
+
+ HDsnprintf(attrname, sizeof(attrname), "a-%06u", u);
+
+ exists = H5Aexists(gid, attrname);
+ VERIFY(exists, TRUE, "H5Aexists");
+
+ exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT);
+ VERIFY(exists, TRUE, "H5Aexists_by_name");
+
+ aid = H5Aopen(gid, attrname, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Aopen");
+
+ exists = H5Aexists(gid, attrname);
+ VERIFY(exists, TRUE, "H5Aexists");
+
+ exists = H5Aexists_by_name(fid, GROUP1_NAME, attrname, H5P_DEFAULT);
+ VERIFY(exists, TRUE, "H5Aexists_by_name");
+
+ ret = H5Aread(aid, H5T_NATIVE_UINT, &value);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(value, u, "H5Aread");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Close group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_many() */
+
+/****************************************************************
+**
+** test_attr_corder_create_empty(): Test basic H5A (attribute) code.
+** Tests basic code to create objects with attribute creation order info
+**
+****************************************************************/
+static void
+test_attr_corder_create_basic(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned crt_order_flags; /* Creation order flags */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Code for Attributes with Creation Order Info\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+#if 0
+ /* Get creation order indexing on object */
+ ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags);
+ CHECK(ret, FAIL, "H5Pget_attr_creation_order");
+ VERIFY(crt_order_flags, 0, "H5Pget_attr_creation_order");
+#endif
+ /* Setting invalid combination of a attribute order creation order indexing on should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_INDEXED);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_attr_creation_order");
+
+#if 0
+ ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags);
+ CHECK(ret, FAIL, "H5Pget_attr_creation_order");
+ VERIFY(crt_order_flags, 0, "H5Pget_attr_creation_order");
+#endif
+
+ /* Set attribute creation order tracking & indexing for object */
+ ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags);
+ CHECK(ret, FAIL, "H5Pget_attr_creation_order");
+ VERIFY(crt_order_flags, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "H5Pget_attr_creation_order");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open dataset created */
+ dataset = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Retrieve dataset creation property list for group */
+ dcpl = H5Dget_create_plist(dataset);
+ CHECK(dcpl, FAIL, "H5Dget_create_plist");
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_creation_order(dcpl, &crt_order_flags);
+ CHECK(ret, FAIL, "H5Pget_attr_creation_order");
+ VERIFY(crt_order_flags, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "H5Pget_attr_creation_order");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_corder_create_basic() */
+
+/****************************************************************
+**
+** test_attr_corder_create_compact(): Test basic H5A (attribute) code.
+** Tests compact attribute storage on objects with attribute creation order info
+**
+****************************************************************/
+static void
+test_attr_corder_create_compact(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+#endif
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Compact Storage of Attributes with Creation Order Info\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Set attribute creation order tracking & indexing for object */
+ ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Create several attributes, but keep storage in compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (u + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ } /* end for */
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open datasets created */
+ dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dopen2");
+ dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dopen2");
+ dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dopen2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, max_compact, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Loop through attributes, checking their creation order values */
+ /* (the name index is used, but the creation order value is in the same order) */
+ for (u = 0; u < max_compact; u++) {
+ H5A_info_t ainfo; /* Attribute information */
+
+ /* Retrieve information for attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Aget_info_by_name(my_dataset, ".", attrname, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+
+ /* Verify creation order of attribute */
+ VERIFY(ainfo.corder_valid, TRUE, "H5Aget_info_by_name");
+ VERIFY(ainfo.corder, u, "H5Aget_info_by_name");
+ } /* end for */
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_corder_create_compact() */
+
+/****************************************************************
+**
+** test_attr_corder_create_dense(): Test basic H5A (attribute) code.
+** Tests dense attribute storage on objects with attribute creation order info
+**
+****************************************************************/
+static void
+test_attr_corder_create_dense(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dense Storage of Attributes with Creation Order Info\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Set attribute creation order tracking & indexing for object */
+ ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Create several attributes, but keep storage in compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (u + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ } /* end for */
+
+ /* Create another attribute, to push into dense storage */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", max_compact);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+#endif
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open datasets created */
+ dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dopen2");
+ dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dopen2");
+ dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dopen2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Loop through attributes, checking their creation order values */
+ /* (the name index is used, but the creation order value is in the same order) */
+ for (u = 0; u < (max_compact + 1); u++) {
+ H5A_info_t ainfo; /* Attribute information */
+
+ /* Retrieve information for attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Aget_info_by_name(my_dataset, ".", attrname, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+
+ /* Verify creation order of attribute */
+ VERIFY(ainfo.corder_valid, TRUE, "H5Aget_info_by_name");
+ VERIFY(ainfo.corder, u, "H5Aget_info_by_name");
+ } /* end for */
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_corder_create_dense() */
+
+/****************************************************************
+**
+** test_attr_corder_create_reopen(): Test basic H5A (attribute) code.
+** Test creating attributes w/reopening file from using new format
+** to using old format
+**
+****************************************************************/
+static void
+test_attr_corder_create_reopen(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t gcpl_id = -1; /* Group creation property list ID */
+ hid_t gid = -1; /* Group ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t aid = -1; /* Attribute ID */
+ int buf; /* Attribute data */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Creating Attributes w/New & Old Format\n"));
+
+ /* Create dataspace for attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create group */
+ gcpl_id = H5Pcreate(H5P_GROUP_CREATE);
+ CHECK(gcpl_id, FAIL, "H5Pcreate");
+ ret = H5Pset_attr_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED);
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl_id, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create a couple of attributes */
+ aid = H5Acreate2(gid, "attr-003", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+ buf = 3;
+ ret = H5Awrite(aid, H5T_NATIVE_INT, &buf);
+ CHECK(ret, FAIL, "H5Awrite");
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ aid = H5Acreate2(gid, "attr-004", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+ buf = 4;
+ ret = H5Awrite(aid, H5T_NATIVE_INT, &buf);
+ CHECK(ret, FAIL, "H5Awrite");
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /***** Close group & GCPL *****/
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Pclose(gcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file, without "use the latest format" flag */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open group */
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Delete attribute */
+ ret = H5Adelete(gid, "attr-003");
+ CHECK(aid, FAIL, "H5Adelete");
+
+ /* Create some additional attributes */
+ aid = H5Acreate2(gid, "attr-008", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+ buf = 8;
+ ret = H5Awrite(aid, H5T_NATIVE_INT, &buf);
+ CHECK(ret, FAIL, "H5Awrite");
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ aid = H5Acreate2(gid, "attr-006", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+ buf = 6;
+ ret = H5Awrite(aid, H5T_NATIVE_INT, &buf);
+ CHECK(ret, FAIL, "H5Awrite");
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /***** Close group *****/
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close attribute dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_corder_create_reopen() */
+
+/****************************************************************
+**
+** test_attr_corder_transition(): Test basic H5A (attribute) code.
+** Tests attribute storage transitions on objects with attribute creation order info
+**
+****************************************************************/
+static void
+test_attr_corder_transition(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Storage Transitions of Attributes with Creation Order Info\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Set attribute creation order tracking & indexing for object */
+ ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* XXX: Try to find a way to resize dataset's object header so that the object
+ * header can have one chunk, then retrieve "empty" file size and check
+ * that size after everything is deleted -QAK
+ */
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open datasets created */
+ dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dopen2");
+ dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dopen2");
+ dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dopen2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Create several attributes, but keep storage in compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (u + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ } /* end for */
+
+ /* Create another attribute, to push into dense storage */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", max_compact);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+#endif
+ /* Delete several attributes from object, until attribute storage resumes compact form */
+ for (u = max_compact; u >= min_dense; u--) {
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(my_dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, u, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+#endif
+ } /* end for */
+
+ /* Delete another attribute, to push attribute storage into compact form */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (min_dense - 1));
+ ret = H5Adelete(my_dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Re-add attributes to get back into dense form */
+ for (u = (min_dense - 1); u < (max_compact + 1); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+#endif
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open datasets created */
+ dset1 = H5Dopen2(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dopen2");
+ dset2 = H5Dopen2(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dopen2");
+ dset3 = H5Dopen2(fid, DSET3_NAME, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dopen2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+#endif
+ /* Delete several attributes from object, until attribute storage resumes compact form */
+ for (u = max_compact; u >= min_dense; u--) {
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(my_dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, u, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+#endif
+ } /* end for */
+
+ /* Delete another attribute, to push attribute storage into compact form */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (min_dense - 1));
+ ret = H5Adelete(my_dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (min_dense - 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Re-add attributes to get back into dense form */
+ for (u = (min_dense - 1); u < (max_compact + 1); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact + 1), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+#endif
+ /* Delete all attributes */
+ for (u = max_compact; u > 0; u--) {
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ ret = H5Adelete(my_dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+ } /* end for */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", 0);
+ ret = H5Adelete(my_dataset, attrname);
+ CHECK(ret, FAIL, "H5Adelete");
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_corder_transition() */
+
+/****************************************************************
+**
+** test_attr_corder_delete(): Test basic H5A (attribute) code.
+** Tests deleting object w/dense attribute storage on objects with attribute creation order info
+**
+****************************************************************/
+static void
+test_attr_corder_delete(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ unsigned reopen_file; /* Whether to re-open the file before deleting group */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+#ifdef LATER
+ h5_stat_size_t empty_size; /* Size of empty file */
+ h5_stat_size_t file_size; /* Size of file after operating on it */
+#endif /* LATER */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Deleting Object w/Dense Attribute Storage and Creation Order Info\n"));
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Set attribute creation order tracking & indexing for object */
+ ret = H5Pset_attr_creation_order(dcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+/* XXX: Try to find a way to resize dataset's object header so that the object
+ * header can have one chunk, then retrieve "empty" file size and check
+ * that size after everything is deleted -QAK
+ */
+#ifdef LATER
+ /* Create empty file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Get the size of an empty file */
+ empty_size = h5_get_file_size(FILENAME);
+ CHECK(empty_size, FAIL, "h5_get_file_size");
+#endif /* LATER */
+
+ /* Loop to leave file open when deleting dataset, or to close & re-open file
+ * before deleting dataset */
+ for (reopen_file = FALSE; reopen_file <= TRUE; reopen_file++) {
+ /* Create test file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Create attributes, until attribute storage is in dense form */
+ for (u = 0; u < max_compact * 2; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+#endif
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Check for deleting datasets without re-opening file */
+ if (!reopen_file) {
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ ret = H5Ldelete(fid, DSET3_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end if */
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Check for deleting dataset after re-opening file */
+ if (reopen_file) {
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Delete the datasets */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ ret = H5Ldelete(fid, DSET3_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end if */
+
+#ifdef LATER
+ /* Get the size of the file now */
+ file_size = h5_get_file_size(FILENAME);
+ CHECK(file_size, FAIL, "h5_get_file_size");
+ VERIFY(file_size, empty_size, "h5_get_file_size");
+#endif /* LATER */
+ } /* end for */
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_corder_delete() */
+
+/*-------------------------------------------------------------------------
+ * Function: attr_info_by_idx_check
+ *
+ * Purpose: Support routine for attr_info_by_idx, to verify the attribute
+ * info is correct for a attribute
+ *
+ * Note: This routine assumes that the attributes have been added to the
+ * object in alphabetical order.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, Februrary 13, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+attr_info_by_idx_check(hid_t obj_id, const char *attrname, hsize_t n, hbool_t use_index)
+{
+ char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */
+ H5A_info_t ainfo; /* Attribute info struct */
+ int old_nerrs; /* Number of errors when entering this check */
+ herr_t ret; /* Generic return value */
+
+ /* Retrieve the current # of reported errors */
+ old_nerrs = nerrors;
+
+ /* Verify the information for first attribute, in increasing creation order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx");
+
+ /* Verify the information for new attribute, in increasing creation order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, n, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, n, "H5Aget_info_by_idx");
+
+ /* Verify the name for new link, in increasing creation order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, n, tmpname,
+ (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_name_by_idx");
+ if (HDstrcmp(attrname, tmpname) != 0)
+ TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__);
+
+ /* Don't test "native" order if there is no creation order index, since
+ * there's not a good way to easily predict the attribute's order in the name
+ * index.
+ */
+ if (use_index) {
+ /* Verify the information for first attribute, in native creation order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */,
+ (hsize_t)0, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx");
+
+ /* Verify the information for new attribute, in native creation order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */, n, &ainfo,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, n, "H5Aget_info_by_idx");
+
+ /* Verify the name for new link, in increasing native order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC /* H5_ITER_NATIVE */, n,
+ tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_name_by_idx");
+ if (HDstrcmp(attrname, tmpname) != 0)
+ TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__);
+ } /* end if */
+
+ /* Verify the information for first attribute, in decreasing creation order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, n, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx");
+
+ /* Verify the information for new attribute, in increasing creation order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)0, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, n, "H5Aget_info_by_idx");
+
+ /* Verify the name for new link, in increasing creation order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)0, tmpname,
+ (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_name_by_idx");
+ if (HDstrcmp(attrname, tmpname) != 0)
+ TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__);
+
+ /* Verify the information for first attribute, in increasing name order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx");
+
+ /* Verify the information for new attribute, in increasing name order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, n, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, n, "H5Aget_info_by_idx");
+
+ /* Verify the name for new link, in increasing name order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_INC, n, tmpname,
+ (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_name_by_idx");
+ if (HDstrcmp(attrname, tmpname) != 0)
+ TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__);
+
+ /* Don't test "native" order queries on link name order, since there's not
+ * a good way to easily predict the order of the links in the name index.
+ */
+
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ /* Verify the information for first attribute, in decreasing name order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, n, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, 0, "H5Aget_info_by_idx");
+
+ /* Verify the information for new attribute, in increasing name order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, (hsize_t)0, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_idx");
+ VERIFY(ainfo.corder, n, "H5Aget_info_by_idx");
+
+ /* Verify the name for new link, in increasing name order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(obj_id, ".", H5_INDEX_NAME, H5_ITER_DEC, (hsize_t)0, tmpname,
+ (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_name_by_idx");
+ if (HDstrcmp(attrname, tmpname) != 0)
+ TestErrPrintf("Line %d: attribute name size wrong!\n", __LINE__);
+#endif
+ /* Retrieve current # of errors */
+ if (old_nerrs == nerrors)
+ return (0);
+ else
+ return (-1);
+} /* end attr_info_by_idx_check() */
+
+/****************************************************************
+**
+** test_attr_info_by_idx(): Test basic H5A (attribute) code.
+** Tests querying attribute info by index
+**
+****************************************************************/
+static void
+test_attr_info_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ H5A_info_t ainfo; /* Attribute information */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ unsigned use_index; /* Use index on creation order values */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Loop over using index for creation order value */
+ for (use_index = FALSE; use_index <= TRUE; use_index++) {
+ /* Output message about test being performed */
+ if (use_index)
+ MESSAGE(5, ("Testing Querying Attribute Info By Index w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Querying Attribute Info By Index w/o Creation Order Index\n"))
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Set attribute creation order tracking & indexing for object */
+ if (new_format == TRUE) {
+ ret = H5Pset_attr_creation_order(
+ dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0)));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ } /* end if */
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for query on non-existent attribute */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0, &ainfo,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_info_by_idx");
+ H5E_BEGIN_TRY
+ {
+ ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)0,
+ tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_name_by_idx");
+
+ /* Create attributes, up to limit of compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, max_compact, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for out of bound offset queries */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_info_by_idx");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_info_by_idx");
+ H5E_BEGIN_TRY
+ {
+ ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u,
+ tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_name_by_idx");
+
+ /* Create more attributes, to push into dense form */
+ for (; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+#endif
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+
+ if (new_format) {
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ if (use_index)
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+ VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test");
+ } /* end if */
+#endif
+ /* Check for out of bound offset queries */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, &ainfo,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_info_by_idx");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aget_info_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_DEC, (hsize_t)u, &ainfo,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_info_by_idx");
+ H5E_BEGIN_TRY
+ {
+ ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u,
+ tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aget_name_by_idx");
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_info_by_idx() */
+
+/***************************************************************
+**
+** test_attr_info_null_info_pointer(): A test to ensure that
+** passing a NULL attribute info pointer to H5Aget_info
+** (_by_name/_by_idx) doesn't cause bad behavior.
+**
+****************************************************************/
+static void
+test_attr_info_null_info_pointer(hid_t fcpl, hid_t fapl)
+{
+ herr_t err_ret = -1;
+ hid_t fid;
+ hid_t attr;
+ hid_t sid;
+
+ /* Create dataspace for attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create attribute */
+ attr = H5Acreate2(fid, GET_INFO_NULL_POINTER_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info(attr, NULL);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Aget_info");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_name(fid, ".", GET_INFO_NULL_POINTER_ATTR_NAME, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Aget_info_by_name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_info_by_idx(fid, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Aget_info_by_idx");
+
+ /* Close dataspace */
+ err_ret = H5Sclose(sid);
+ CHECK(err_ret, FAIL, "H5Sclose");
+
+ /* Close attribute */
+ err_ret = H5Aclose(attr);
+ CHECK(err_ret, FAIL, "H5Aclose");
+
+ /* Close file */
+ err_ret = H5Fclose(fid);
+ CHECK(err_ret, FAIL, "H5Fclose");
+}
+
+/***************************************************************
+**
+** test_attr_rename_invalid_name(): A test to ensure that
+** passing a NULL or empty attribute name to
+** H5Arename(_by_name) doesn't cause bad behavior.
+**
+****************************************************************/
+static void
+test_attr_rename_invalid_name(hid_t fcpl, hid_t fapl)
+{
+ herr_t err_ret = -1;
+ hid_t fid;
+ hid_t attr;
+ hid_t sid;
+
+ /* Create dataspace for attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create attribute */
+ attr = H5Acreate2(fid, INVALID_RENAME_TEST_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(fid, NULL, INVALID_RENAME_TEST_NEW_ATTR_NAME);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Arename");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(fid, "", INVALID_RENAME_TEST_NEW_ATTR_NAME);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Arename");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(fid, INVALID_RENAME_TEST_ATTR_NAME, NULL);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Arename");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename(fid, INVALID_RENAME_TEST_ATTR_NAME, "");
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Arename");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(fid, ".", NULL, INVALID_RENAME_TEST_NEW_ATTR_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Arename_by_name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(fid, ".", "", INVALID_RENAME_TEST_NEW_ATTR_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Arename_by_name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(fid, ".", INVALID_RENAME_TEST_ATTR_NAME, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Arename_by_name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Arename_by_name(fid, ".", INVALID_RENAME_TEST_ATTR_NAME, "", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ CHECK(err_ret, SUCCEED, "H5Arename_by_name");
+
+ /* Close dataspace */
+ err_ret = H5Sclose(sid);
+ CHECK(err_ret, FAIL, "H5Sclose");
+
+ /* Close attribute */
+ err_ret = H5Aclose(attr);
+ CHECK(err_ret, FAIL, "H5Aclose");
+
+ /* Close file */
+ err_ret = H5Fclose(fid);
+ CHECK(err_ret, FAIL, "H5Fclose");
+}
+
+/***************************************************************
+**
+** test_attr_get_name_invalid_buf(): A test to ensure that
+** passing a NULL buffer to H5Aget_name(_by_idx) when
+** the 'size' parameter is non-zero doesn't cause bad
+** behavior.
+**
+****************************************************************/
+static void
+test_attr_get_name_invalid_buf(hid_t fcpl, hid_t fapl)
+{
+ ssize_t err_ret = -1;
+ hid_t fid;
+ hid_t attr;
+ hid_t sid;
+
+ /* Create dataspace for attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create attribute */
+ attr =
+ H5Acreate2(fid, GET_NAME_INVALID_BUF_TEST_ATTR_NAME, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_name(attr, 1, NULL);
+ }
+ H5E_END_TRY;
+
+ VERIFY(err_ret, FAIL, "H5Aget_name");
+
+ H5E_BEGIN_TRY
+ {
+ err_ret = H5Aget_name_by_idx(fid, ".", H5_INDEX_NAME, H5_ITER_INC, 0, NULL, 1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ VERIFY(err_ret, FAIL, "H5Aget_name_by_idx");
+
+ /* Close dataspace */
+ err_ret = H5Sclose(sid);
+ CHECK(err_ret, FAIL, "H5Sclose");
+
+ /* Close attribute */
+ err_ret = H5Aclose(attr);
+ CHECK(err_ret, FAIL, "H5Aclose");
+
+ /* Close file */
+ err_ret = H5Fclose(fid);
+ CHECK(err_ret, FAIL, "H5Fclose");
+}
+
+/****************************************************************
+**
+** test_attr_delete_by_idx(): Test basic H5A (attribute) code.
+** Tests deleting attribute by index
+**
+****************************************************************/
+static void
+test_attr_delete_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ H5A_info_t ainfo; /* Attribute information */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ H5_index_t idx_type; /* Type of index to operate on */
+ H5_iter_order_t order; /* Order within in the index */
+ unsigned use_index; /* Use index on creation order values */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ char tmpname[NAME_BUF_SIZE]; /* Temporary attribute name */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(5, ("Testing Deleting Attribute By Index\n"))
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Loop over operating on different indices on link fields */
+ for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) {
+ /* Loop over operating in different orders */
+ for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) {
+ /* Loop over using index for creation order value */
+ for (use_index = FALSE; use_index <= TRUE; use_index++) {
+ /* Print appropriate test message */
+ if (idx_type == H5_INDEX_CRT_ORDER) {
+ if (order == H5_ITER_INC) {
+ if (use_index)
+ MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Increasing "
+ "Order w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Increasing "
+ "Order w/o Creation Order Index\n"))
+ } /* end if */
+ else {
+ if (use_index)
+ MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Decreasing "
+ "Order w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Deleting Attribute By Creation Order Index in Decreasing "
+ "Order w/o Creation Order Index\n"))
+ } /* end else */
+ } /* end if */
+ else {
+ if (order == H5_ITER_INC) {
+ if (use_index)
+ MESSAGE(5, ("Testing Deleting Attribute By Name Index in Increasing Order "
+ "w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Deleting Attribute By Name Index in Increasing Order w/o "
+ "Creation Order Index\n"))
+ } /* end if */
+ else {
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (use_index)
+ MESSAGE(5, ("Testing Deleting Attribute By Name Index in Decreasing Order "
+ "w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Deleting Attribute By Name Index in Decreasing Order w/o "
+ "Creation Order Index\n"))
+#else
+ continue;
+#endif
+ } /* end else */
+ } /* end else */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Set attribute creation order tracking & indexing for object */
+ if (new_format == TRUE) {
+ ret = H5Pset_attr_creation_order(
+ dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0)));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ } /* end if */
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for deleting non-existent attribute */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Adelete_by_idx");
+
+ /* Create attributes, up to limit of compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr =
+ H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, max_compact, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for out of bound deletions */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Adelete_by_idx");
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Delete attributes from compact storage */
+ for (u = 0; u < (max_compact - 1); u++) {
+ /* Delete first attribute in appropriate order */
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_idx");
+
+ /* Verify the attribute information for first attribute in appropriate order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo,
+ H5P_DEFAULT);
+ if (new_format) {
+ if (order == H5_ITER_INC) {
+ VERIFY(ainfo.corder, (u + 1), "H5Aget_info_by_idx");
+ } /* end if */
+ else {
+ VERIFY(ainfo.corder, (max_compact - (u + 2)), "H5Aget_info_by_idx");
+ } /* end else */
+ } /* end if */
+
+ /* Verify the name for first attribute in appropriate order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0,
+ tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ if (order == H5_ITER_INC)
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u + 1));
+ else
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (max_compact - (u + 2)));
+ ret = HDstrcmp(attrname, tmpname);
+ VERIFY(ret, 0, "H5Aget_name_by_idx");
+ } /* end for */
+
+ /* Delete last attribute */
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_idx");
+#if 0
+ /* Verify state of attribute storage (empty) */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+#endif
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Create more attributes, to push into dense form */
+ for (u = 0; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr =
+ H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ if (u >= max_compact) {
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+ } /* end if */
+#endif
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+
+ if (new_format) {
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ if (use_index)
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+ VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test");
+ } /* end if */
+#endif
+ /* Check for out of bound deletion */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Adelete_by_idx");
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Delete attributes from dense storage */
+ for (u = 0; u < ((max_compact * 2) - 1); u++) {
+ /* Delete first attribute in appropriate order */
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_idx");
+
+ /* Verify the attribute information for first attribute in appropriate order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo,
+ H5P_DEFAULT);
+ if (new_format) {
+ if (order == H5_ITER_INC) {
+ VERIFY(ainfo.corder, (u + 1), "H5Aget_info_by_idx");
+ } /* end if */
+ else {
+ VERIFY(ainfo.corder, ((max_compact * 2) - (u + 2)), "H5Aget_info_by_idx");
+ } /* end else */
+ } /* end if */
+
+ /* Verify the name for first attribute in appropriate order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0,
+ tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ if (order == H5_ITER_INC)
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (u + 1));
+ else
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u",
+ ((max_compact * 2) - (u + 2)));
+ ret = HDstrcmp(attrname, tmpname);
+ VERIFY(ret, 0, "H5Aget_name_by_idx");
+ } /* end for */
+
+ /* Delete last attribute */
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_idx");
+#if 0
+ /* Verify state of attribute storage (empty) */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+#endif
+ /* Check for deletion on empty attribute storage again */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Adelete_by_idx");
+ } /* end for */
+
+ /* Delete attributes in middle */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Create attributes, to push into dense form */
+ for (u = 0; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr =
+ H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ if (u >= max_compact) {
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+ } /* end if */
+#endif
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Delete every other attribute from dense storage, in appropriate order */
+ for (u = 0; u < max_compact; u++) {
+ /* Delete attribute */
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_idx");
+
+ /* Verify the attribute information for first attribute in appropriate order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, &ainfo,
+ H5P_DEFAULT);
+ if (new_format) {
+ if (order == H5_ITER_INC) {
+ VERIFY(ainfo.corder, ((u * 2) + 1), "H5Aget_info_by_idx");
+ } /* end if */
+ else {
+ VERIFY(ainfo.corder, ((max_compact * 2) - ((u * 2) + 2)),
+ "H5Aget_info_by_idx");
+ } /* end else */
+ } /* end if */
+
+ /* Verify the name for first attribute in appropriate order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u,
+ tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ if (order == H5_ITER_INC)
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", ((u * 2) + 1));
+ else
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u",
+ ((max_compact * 2) - ((u * 2) + 2)));
+ ret = HDstrcmp(attrname, tmpname);
+ VERIFY(ret, 0, "H5Aget_name_by_idx");
+ } /* end for */
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Delete remaining attributes from dense storage, in appropriate order */
+ for (u = 0; u < (max_compact - 1); u++) {
+ /* Delete attribute */
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_idx");
+
+ /* Verify the attribute information for first attribute in appropriate order */
+ HDmemset(&ainfo, 0, sizeof(ainfo));
+ ret = H5Aget_info_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, &ainfo,
+ H5P_DEFAULT);
+ if (new_format) {
+ if (order == H5_ITER_INC) {
+ VERIFY(ainfo.corder, ((u * 2) + 3), "H5Aget_info_by_idx");
+ } /* end if */
+ else {
+ VERIFY(ainfo.corder, ((max_compact * 2) - ((u * 2) + 4)),
+ "H5Aget_info_by_idx");
+ } /* end else */
+ } /* end if */
+
+ /* Verify the name for first attribute in appropriate order */
+ HDmemset(tmpname, 0, (size_t)NAME_BUF_SIZE);
+ ret = (herr_t)H5Aget_name_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0,
+ tmpname, (size_t)NAME_BUF_SIZE, H5P_DEFAULT);
+ if (order == H5_ITER_INC)
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", ((u * 2) + 3));
+ else
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u",
+ ((max_compact * 2) - ((u * 2) + 4)));
+ ret = HDstrcmp(attrname, tmpname);
+ VERIFY(ret, 0, "H5Aget_name_by_idx");
+ } /* end for */
+
+ /* Delete last attribute */
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_idx");
+#if 0
+ /* Verify state of attribute storage (empty) */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+#endif
+ /* Check for deletion on empty attribute storage again */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Adelete_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Adelete_by_idx");
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_delete_by_idx() */
+
+/****************************************************************
+**
+** attr_iterate2_cb(): Revised attribute operator
+**
+****************************************************************/
+static herr_t
+attr_iterate2_cb(hid_t loc_id, const char *attr_name, const H5A_info_t *info, void *_op_data)
+{
+ attr_iter_info_t *op_data = (attr_iter_info_t *)_op_data; /* User data */
+ char attrname[NAME_BUF_SIZE]; /* Object name */
+ H5A_info_t my_info; /* Local attribute info */
+
+ /* Increment # of times the callback was called */
+ op_data->ncalled++;
+
+ /* Get the attribute information directly to compare */
+ if (H5Aget_info_by_name(loc_id, ".", attr_name, &my_info, H5P_DEFAULT) < 0)
+ return (H5_ITER_ERROR);
+
+ /* Check more things for revised attribute iteration (vs. older attribute iteration) */
+ if (info) {
+ /* Check for correct order of iteration */
+ /* (if we are operating in increasing or decreasing order) */
+ if (op_data->order != H5_ITER_NATIVE)
+ if (info->corder != op_data->curr)
+ return (H5_ITER_ERROR);
+
+ /* Compare attribute info structs */
+ if (info->corder_valid != my_info.corder_valid)
+ return (H5_ITER_ERROR);
+ if (info->corder != my_info.corder)
+ return (H5_ITER_ERROR);
+ if (info->cset != my_info.cset)
+ return (H5_ITER_ERROR);
+ if (info->data_size != my_info.data_size)
+ return (H5_ITER_ERROR);
+ } /* end if */
+
+ /* Verify name of link */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", (unsigned)my_info.corder);
+ if (HDstrcmp(attr_name, attrname) != 0)
+ return (H5_ITER_ERROR);
+
+ /* Check if we've visited this link before */
+ if ((size_t)op_data->curr >= op_data->max_visit)
+ return (H5_ITER_ERROR);
+ if (op_data->visited[op_data->curr])
+ return (H5_ITER_ERROR);
+ op_data->visited[op_data->curr] = TRUE;
+
+ /* Advance to next value, in correct direction */
+ if (op_data->order != H5_ITER_DEC)
+ op_data->curr++;
+ else
+ op_data->curr--;
+
+ /* Check for stopping in the middle of iterating */
+ if (op_data->stop > 0)
+ if (--op_data->stop == 0)
+ return (CORDER_ITER_STOP);
+
+ return (H5_ITER_CONT);
+} /* end attr_iterate2_cb() */
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+
+/****************************************************************
+**
+** attr_iterate1_cb(): Attribute operator
+**
+****************************************************************/
+#if 0
+static herr_t
+attr_iterate1_cb(hid_t loc_id, const char *attr_name, void *_op_data)
+{
+ return (attr_iterate2_cb(loc_id, attr_name, NULL, _op_data));
+} /* end attr_iterate1_cb() */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+
+#ifndef NO_ITERATION_RESTART
+/*-------------------------------------------------------------------------
+ * Function: attr_iterate2_fail_cb
+ *
+ * Purpose: Callback routine for iterating over attributes on object that
+ * always returns failure
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, February 20, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+attr_iterate2_fail_cb(hid_t H5_ATTR_UNUSED group_id, const char H5_ATTR_UNUSED *attr_name,
+ const H5A_info_t H5_ATTR_UNUSED *info, void H5_ATTR_UNUSED *_op_data)
+{
+ return (H5_ITER_ERROR);
+} /* end attr_iterate2_fail_cb() */
+
+/*-------------------------------------------------------------------------
+ * Function: attr_iterate_check
+ *
+ * Purpose: Check iteration over attributes on an object
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, February 20, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+attr_iterate_check(hid_t fid, const char *dsetname, hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order,
+ unsigned max_attrs, attr_iter_info_t *iter_info)
+{
+ unsigned v; /* Local index variable */
+ hsize_t skip; /* # of attributes to skip on object */
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ unsigned oskip; /* # of attributes to skip on object, with H5Aiterate1 */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+ int old_nerrs; /* Number of errors when entering this check */
+ herr_t ret; /* Generic return value */
+
+ /* Retrieve the current # of reported errors */
+ old_nerrs = nerrors;
+
+ /* Iterate over attributes on object */
+ iter_info->nskipped = (unsigned)(skip = 0);
+ iter_info->order = order;
+ iter_info->stop = -1;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info);
+ CHECK(ret, FAIL, "H5Aiterate2");
+
+ /* Verify that we visited all the attributes */
+ VERIFY(skip, max_attrs, "H5Aiterate2");
+ for (v = 0; v < max_attrs; v++)
+ VERIFY(iter_info->visited[v], TRUE, "H5Aiterate2");
+
+ /* Iterate over attributes on object */
+ iter_info->nskipped = (unsigned)(skip = 0);
+ iter_info->order = order;
+ iter_info->stop = -1;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aiterate_by_name");
+
+ /* Verify that we visited all the attributes */
+ VERIFY(skip, max_attrs, "H5Aiterate_by_name");
+ for (v = 0; v < max_attrs; v++)
+ VERIFY(iter_info->visited[v], TRUE, "H5Aiterate_by_name");
+
+ /* Iterate over attributes on object */
+ iter_info->nskipped = (unsigned)(skip = 0);
+ iter_info->order = order;
+ iter_info->stop = -1;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aiterate_by_name");
+
+ /* Verify that we visited all the attributes */
+ VERIFY(skip, max_attrs, "H5Aiterate_by_name");
+ for (v = 0; v < max_attrs; v++)
+ VERIFY(iter_info->visited[v], TRUE, "H5Aiterate_by_name");
+
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ /* Iterate over attributes on object, with H5Aiterate1 */
+ iter_info->nskipped = oskip = 0;
+ iter_info->order = order;
+ iter_info->stop = -1;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info);
+ CHECK(ret, FAIL, "H5Aiterate1");
+
+ /* Verify that we visited all the attributes */
+ VERIFY(skip, max_attrs, "H5Aiterate1");
+ for (v = 0; v < max_attrs; v++)
+ VERIFY(iter_info->visited[v], TRUE, "H5Aiterate1");
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+
+ /* Skip over some attributes on object */
+ iter_info->nskipped = (unsigned)(skip = max_attrs / 2);
+ iter_info->order = order;
+ iter_info->stop = -1;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info);
+ CHECK(ret, FAIL, "H5Aiterate2");
+
+ /* Verify that we visited all the attributes */
+ VERIFY(skip, max_attrs, "H5Aiterate2");
+ if (order == H5_ITER_INC) {
+ for (v = 0; v < (max_attrs / 2); v++)
+ VERIFY(iter_info->visited[v + (max_attrs / 2)], TRUE, "H5Aiterate2");
+ } /* end if */
+ else if (order == H5_ITER_DEC) {
+ for (v = 0; v < (max_attrs / 2); v++)
+ VERIFY(iter_info->visited[v], TRUE, "H5Aiterate2");
+ } /* end if */
+ else {
+ unsigned nvisit = 0; /* # of links visited */
+
+ HDassert(order == H5_ITER_NATIVE);
+ for (v = 0; v < max_attrs; v++)
+ if (iter_info->visited[v] == TRUE)
+ nvisit++;
+
+ VERIFY(skip, (max_attrs / 2), "H5Aiterate2");
+ } /* end else */
+
+ /* Skip over some attributes on object */
+ iter_info->nskipped = (unsigned)(skip = max_attrs / 2);
+ iter_info->order = order;
+ iter_info->stop = -1;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aiterate_by_name");
+
+ /* Verify that we visited all the attributes */
+ VERIFY(skip, max_attrs, "H5Aiterate_by_name");
+ if (order == H5_ITER_INC) {
+ for (v = 0; v < (max_attrs / 2); v++)
+ VERIFY(iter_info->visited[v + (max_attrs / 2)], TRUE, "H5Aiterate_by_name");
+ } /* end if */
+ else if (order == H5_ITER_DEC) {
+ for (v = 0; v < (max_attrs / 2); v++)
+ VERIFY(iter_info->visited[v], TRUE, "H5Aiterate_by_name");
+ } /* end if */
+ else {
+ unsigned nvisit = 0; /* # of links visited */
+
+ HDassert(order == H5_ITER_NATIVE);
+ for (v = 0; v < max_attrs; v++)
+ if (iter_info->visited[v] == TRUE)
+ nvisit++;
+
+ VERIFY(skip, (max_attrs / 2), "H5Aiterate_by_name");
+ } /* end else */
+
+ /* Skip over some attributes on object */
+ iter_info->nskipped = (unsigned)(skip = max_attrs / 2);
+ iter_info->order = order;
+ iter_info->stop = -1;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? skip : ((max_attrs - 1) - skip);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aiterate_by_name");
+
+ /* Verify that we visited all the attributes */
+ VERIFY(skip, max_attrs, "H5Aiterate_by_name");
+ if (order == H5_ITER_INC) {
+ for (v = 0; v < (max_attrs / 2); v++)
+ VERIFY(iter_info->visited[v + (max_attrs / 2)], TRUE, "H5Aiterate_by_name");
+ } /* end if */
+ else if (order == H5_ITER_DEC) {
+ for (v = 0; v < (max_attrs / 2); v++)
+ VERIFY(iter_info->visited[v], TRUE, "H5Aiterate_by_name");
+ } /* end if */
+ else {
+ unsigned nvisit = 0; /* # of links visited */
+
+ HDassert(order == H5_ITER_NATIVE);
+ for (v = 0; v < max_attrs; v++)
+ if (iter_info->visited[v] == TRUE)
+ nvisit++;
+
+ VERIFY(skip, (max_attrs / 2), "H5Aiterate_by_name");
+ } /* end else */
+
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ /* Skip over some attributes on object, with H5Aiterate1 */
+ iter_info->nskipped = oskip = max_attrs / 2;
+ iter_info->order = order;
+ iter_info->stop = -1;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? (unsigned)oskip : ((max_attrs - 1) - oskip);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info);
+ CHECK(ret, FAIL, "H5Aiterate1");
+
+ /* Verify that we visited all the links */
+ VERIFY(oskip, max_attrs, "H5Aiterate1");
+ if (order == H5_ITER_INC) {
+ for (v = 0; v < (max_attrs / 2); v++)
+ VERIFY(iter_info->visited[v + (max_attrs / 2)], TRUE, "H5Aiterate1");
+ } /* end if */
+ else if (order == H5_ITER_DEC) {
+ for (v = 0; v < (max_attrs / 2); v++)
+ VERIFY(iter_info->visited[v], TRUE, "H5Aiterate1");
+ } /* end if */
+ else {
+ unsigned nvisit = 0; /* # of links visited */
+
+ HDassert(order == H5_ITER_NATIVE);
+ for (v = 0; v < max_attrs; v++)
+ if (iter_info->visited[v] == TRUE)
+ nvisit++;
+
+ VERIFY(skip, (max_attrs / 2), "H5Aiterate1");
+ } /* end else */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+
+ /* Iterate over attributes on object, stopping in the middle */
+ iter_info->nskipped = (unsigned)(skip = 0);
+ iter_info->order = order;
+ iter_info->stop = 3;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_cb, iter_info);
+ CHECK(ret, FAIL, "H5Aiterate2");
+ VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate2");
+ VERIFY(iter_info->ncalled, 3, "H5Aiterate2");
+
+ /* Iterate over attributes on object, stopping in the middle */
+ iter_info->nskipped = (unsigned)(skip = 0);
+ iter_info->order = order;
+ iter_info->stop = 3;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aiterate_by_name");
+ VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate_by_name");
+ VERIFY(iter_info->ncalled, 3, "H5Aiterate_by_name");
+
+ /* Iterate over attributes on object, stopping in the middle */
+ iter_info->nskipped = (unsigned)(skip = 0);
+ iter_info->order = order;
+ iter_info->stop = 3;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_cb, iter_info, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aiterate_by_name");
+ VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate_by_name");
+ VERIFY(iter_info->ncalled, 3, "H5Aiterate_by_name");
+
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ /* Iterate over attributes on object, stopping in the middle, with H5Aiterate1() */
+ iter_info->nskipped = oskip = 0;
+ iter_info->order = order;
+ iter_info->stop = 3;
+ iter_info->ncalled = 0;
+ iter_info->curr = order != H5_ITER_DEC ? 0 : (max_attrs - 1);
+ HDmemset(iter_info->visited, 0, sizeof(hbool_t) * iter_info->max_visit);
+ ret = H5Aiterate1(obj_id, &oskip, attr_iterate1_cb, iter_info);
+ CHECK(ret, FAIL, "H5Aiterate1");
+ VERIFY(ret, CORDER_ITER_STOP, "H5Aiterate1");
+ VERIFY(iter_info->ncalled, 3, "H5Aiterate1");
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+
+ /* Check for iteration routine indicating failure */
+ skip = 0;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate2(obj_id, idx_type, order, &skip, attr_iterate2_fail_cb, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate2");
+
+ skip = 0;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &skip, attr_iterate2_fail_cb, NULL,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate_by_name");
+
+ skip = 0;
+ H5E_BEGIN_TRY
+ {
+ ret =
+ H5Aiterate_by_name(obj_id, ".", idx_type, order, &skip, attr_iterate2_fail_cb, NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate_by_name");
+
+ /* Retrieve current # of errors */
+ if (old_nerrs == nerrors)
+ return (0);
+ else
+ return (-1);
+} /* end attr_iterate_check() */
+#endif
+
+/****************************************************************
+**
+** test_attr_iterate2(): Test basic H5A (attribute) code.
+** Tests iterating over attributes by index
+**
+****************************************************************/
+static void
+test_attr_iterate2(hbool_t new_format, hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ H5_index_t idx_type; /* Type of index to operate on */
+ H5_iter_order_t order; /* Order within in the index */
+ attr_iter_info_t iter_info; /* Iterator info */
+ hbool_t *visited = NULL; /* Array of flags for visiting links */
+#ifndef NO_ITERATION_RESTART
+ hsize_t idx; /* Start index for iteration */
+#endif
+ unsigned use_index; /* Use index on creation order values */
+ const char *dsetname; /* Name of dataset for attributes */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Allocate the "visited link" array */
+ iter_info.max_visit = max_compact * 2;
+ visited = (hbool_t *)HDmalloc(sizeof(hbool_t) * iter_info.max_visit);
+ CHECK_PTR(visited, "HDmalloc");
+ iter_info.visited = visited;
+
+ /* Loop over operating on different indices on link fields */
+ for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) {
+ /* Loop over operating in different orders */
+ for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) {
+ /* Loop over using index for creation order value */
+ for (use_index = FALSE; use_index <= TRUE; use_index++) {
+ /* Print appropriate test message */
+ if (idx_type == H5_INDEX_CRT_ORDER) {
+ if (order == H5_ITER_INC) {
+ if (use_index)
+ MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in "
+ "Increasing Order w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in "
+ "Increasing Order w/o Creation Order Index\n"))
+ } /* end if */
+ else {
+ if (use_index)
+ MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in "
+ "Decreasing Order w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Iterating over Attributes By Creation Order Index in "
+ "Decreasing Order w/o Creation Order Index\n"))
+ } /* end else */
+ } /* end if */
+ else {
+ if (order == H5_ITER_INC) {
+ if (use_index)
+ MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Increasing Order "
+ "w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Increasing Order "
+ "w/o Creation Order Index\n"))
+ } /* end if */
+ else {
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (use_index)
+ MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Decreasing Order "
+ "w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Iterating over Attributes By Name Index in Decreasing Order "
+ "w/o Creation Order Index\n"))
+#else
+ continue;
+#endif
+ } /* end else */
+ } /* end else */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Set attribute creation order tracking & indexing for object */
+ if (new_format == TRUE) {
+ ret = H5Pset_attr_creation_order(
+ dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0)));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ } /* end if */
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ dsetname = DSET1_NAME;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ dsetname = DSET2_NAME;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ dsetname = DSET3_NAME;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for iterating over object with no attributes (should be OK) */
+ ret = H5Aiterate2(my_dataset, idx_type, order, NULL, attr_iterate2_cb, NULL);
+ CHECK(ret, FAIL, "H5Aiterate2");
+
+ ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, NULL, attr_iterate2_cb, NULL,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aiterate_by_name");
+
+ ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, NULL, attr_iterate2_cb, NULL,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aiterate_by_name");
+
+ /* Create attributes, up to limit of compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr =
+ H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, max_compact, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+#ifndef NO_ITERATION_RESTART
+ /* Check for out of bound iteration */
+ idx = u;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate2");
+
+ idx = u;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate_by_name");
+
+ idx = u;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb,
+ NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate_by_name");
+
+ /* Test iteration over attributes stored compactly */
+ ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info);
+ CHECK(ret, FAIL, "attr_iterate_check");
+#endif
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ dsetname = DSET1_NAME;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ dsetname = DSET2_NAME;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ dsetname = DSET3_NAME;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Create more attributes, to push into dense form */
+ for (u = max_compact; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr =
+ H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ if (u >= max_compact) {
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+ } /* end if */
+#endif
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+
+ if (new_format) {
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ if (use_index)
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+ VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test");
+ } /* end if */
+#endif
+#ifndef NO_ITERATION_RESTART
+ /* Check for out of bound iteration */
+ idx = u;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate2(my_dataset, idx_type, order, &idx, attr_iterate2_cb, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate2");
+
+ idx = u;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate_by_name(fid, dsetname, idx_type, order, &idx, attr_iterate2_cb, NULL,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate_by_name");
+
+ idx = u;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate_by_name(my_dataset, ".", idx_type, order, &idx, attr_iterate2_cb,
+ NULL, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate_by_name");
+
+ /* Test iteration over attributes stored densely */
+ ret = attr_iterate_check(fid, dsetname, my_dataset, idx_type, order, u, &iter_info);
+ CHECK(ret, FAIL, "attr_iterate_check");
+#endif
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Free the "visited link" array */
+ HDfree(visited);
+} /* test_attr_iterate2() */
+
+/*-------------------------------------------------------------------------
+ * Function: attr_open_by_idx_check
+ *
+ * Purpose: Check opening attribute by index on an object
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, February 21, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+attr_open_by_idx_check(hid_t obj_id, H5_index_t idx_type, H5_iter_order_t order, unsigned max_attrs)
+{
+ hid_t attr_id; /* ID of attribute to test */
+ H5A_info_t ainfo; /* Attribute info */
+ int old_nerrs; /* Number of errors when entering this check */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Retrieve the current # of reported errors */
+ old_nerrs = nerrors;
+
+ /* Open each attribute on object by index and check that it's the correct one */
+ for (u = 0; u < max_attrs; u++) {
+ /* Open the attribute */
+ attr_id = H5Aopen_by_idx(obj_id, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Aopen_by_idx");
+
+ /* Get the attribute's information */
+ ret = H5Aget_info(attr_id, &ainfo);
+ CHECK(ret, FAIL, "H5Aget_info");
+
+ /* Check that the object is the correct one */
+ if (order == H5_ITER_INC) {
+ VERIFY(ainfo.corder, u, "H5Aget_info");
+ } /* end if */
+ else if (order == H5_ITER_DEC) {
+ VERIFY(ainfo.corder, (max_attrs - (u + 1)), "H5Aget_info");
+ } /* end if */
+ else {
+ /* XXX: What to do about native order? */
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Retrieve current # of errors */
+ if (old_nerrs == nerrors)
+ return (0);
+ else
+ return (-1);
+} /* end attr_open_by_idx_check() */
+
+/****************************************************************
+**
+** test_attr_open_by_idx(): Test basic H5A (attribute) code.
+** Tests opening attributes by index
+**
+****************************************************************/
+static void
+test_attr_open_by_idx(hbool_t new_format, hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ H5_index_t idx_type; /* Type of index to operate on */
+ H5_iter_order_t order; /* Order within in the index */
+ unsigned use_index; /* Use index on creation order values */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ hid_t ret_id; /* Generic hid_t return value */
+ herr_t ret; /* Generic return value */
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Loop over operating on different indices on link fields */
+ for (idx_type = H5_INDEX_NAME; idx_type <= H5_INDEX_CRT_ORDER; idx_type++) {
+ /* Loop over operating in different orders */
+ for (order = H5_ITER_INC; order <= H5_ITER_DEC; order++) {
+ /* Loop over using index for creation order value */
+ for (use_index = FALSE; use_index <= TRUE; use_index++) {
+ /* Print appropriate test message */
+ if (idx_type == H5_INDEX_CRT_ORDER) {
+ if (order == H5_ITER_INC) {
+ if (use_index)
+ MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Increasing "
+ "Order w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Increasing "
+ "Order w/o Creation Order Index\n"))
+ } /* end if */
+ else {
+ if (use_index)
+ MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Decreasing "
+ "Order w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Opening Attributes By Creation Order Index in Decreasing "
+ "Order w/o Creation Order Index\n"))
+ } /* end else */
+ } /* end if */
+ else {
+ if (order == H5_ITER_INC) {
+ if (use_index)
+ MESSAGE(5, ("Testing Opening Attributes By Name Index in Increasing Order "
+ "w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Opening Attributes By Name Index in Increasing Order w/o "
+ "Creation Order Index\n"))
+ } /* end if */
+ else {
+#ifndef NO_DECREASING_ALPHA_ITER_ORDER
+ if (use_index)
+ MESSAGE(5, ("Testing Opening Attributes By Name Index in Decreasing Order "
+ "w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Opening Attributes By Name Index in Decreasing Order w/o "
+ "Creation Order Index\n"))
+#else
+ continue;
+#endif
+ } /* end else */
+ } /* end else */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Set attribute creation order tracking & indexing for object */
+ if (new_format == TRUE) {
+ ret = H5Pset_attr_creation_order(
+ dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0)));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ } /* end if */
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for opening an attribute on an object with no attributes */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)0, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_idx");
+
+ /* Create attributes, up to limit of compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr =
+ H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, max_compact, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for out of bound opening an attribute on an object */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_idx");
+
+ /* Test opening attributes by index stored compactly */
+ ret = attr_open_by_idx_check(my_dataset, idx_type, order, u);
+ CHECK(ret, FAIL, "attr_open_by_idx_check");
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Create more attributes, to push into dense form */
+ for (u = max_compact; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr =
+ H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ if (u >= max_compact) {
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+ } /* end if */
+#endif
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+
+ if (new_format) {
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ if (use_index)
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+ VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test");
+ } /* end if */
+#endif
+ /* Check for out of bound opening an attribute on an object */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_idx(my_dataset, ".", idx_type, order, (hsize_t)u, H5P_DEFAULT,
+ H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_idx");
+
+ /* Test opening attributes by index stored compactly */
+ ret = attr_open_by_idx_check(my_dataset, idx_type, order, u);
+ CHECK(ret, FAIL, "attr_open_by_idx_check");
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_open_by_idx() */
+
+/*-------------------------------------------------------------------------
+ * Function: attr_open_check
+ *
+ * Purpose: Check opening attribute on an object
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, February 21, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+attr_open_check(hid_t fid, const char *dsetname, hid_t obj_id, unsigned max_attrs)
+{
+ hid_t attr_id; /* ID of attribute to test */
+ H5A_info_t ainfo; /* Attribute info */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ int old_nerrs; /* Number of errors when entering this check */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Retrieve the current # of reported errors */
+ old_nerrs = nerrors;
+
+ /* Open each attribute on object by index and check that it's the correct one */
+ for (u = 0; u < max_attrs; u++) {
+ /* Open the attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr_id = H5Aopen(obj_id, attrname, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Aopen");
+
+ /* Get the attribute's information */
+ ret = H5Aget_info(attr_id, &ainfo);
+ CHECK(ret, FAIL, "H5Aget_info");
+
+ /* Check that the object is the correct one */
+ VERIFY(ainfo.corder, u, "H5Aget_info");
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open the attribute */
+ attr_id = H5Aopen_by_name(obj_id, ".", attrname, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Aopen_by_name");
+
+ /* Get the attribute's information */
+ ret = H5Aget_info(attr_id, &ainfo);
+ CHECK(ret, FAIL, "H5Aget_info");
+
+ /* Check that the object is the correct one */
+ VERIFY(ainfo.corder, u, "H5Aget_info");
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open the attribute */
+ attr_id = H5Aopen_by_name(fid, dsetname, attrname, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Aopen_by_name");
+
+ /* Get the attribute's information */
+ ret = H5Aget_info(attr_id, &ainfo);
+ CHECK(ret, FAIL, "H5Aget_info");
+
+ /* Check that the object is the correct one */
+ VERIFY(ainfo.corder, u, "H5Aget_info");
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Retrieve current # of errors */
+ if (old_nerrs == nerrors)
+ return (0);
+ else
+ return (-1);
+} /* end attr_open_check() */
+
+/****************************************************************
+**
+** test_attr_open_by_name(): Test basic H5A (attribute) code.
+** Tests opening attributes by name
+**
+****************************************************************/
+static void
+test_attr_open_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ unsigned use_index; /* Use index on creation order values */
+ const char *dsetname; /* Name of dataset for attributes */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ hid_t ret_id; /* Generic hid_t return value */
+ herr_t ret; /* Generic return value */
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Loop over using index for creation order value */
+ for (use_index = FALSE; use_index <= TRUE; use_index++) {
+ /* Print appropriate test message */
+ if (use_index)
+ MESSAGE(5, ("Testing Opening Attributes By Name w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Opening Attributes By Name w/o Creation Order Index\n"))
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Set attribute creation order tracking & indexing for object */
+ if (new_format == TRUE) {
+ ret = H5Pset_attr_creation_order(
+ dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0)));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ } /* end if */
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ dsetname = DSET1_NAME;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ dsetname = DSET2_NAME;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ dsetname = DSET3_NAME;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for opening a non-existent attribute on an object with no attributes */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen");
+
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_name");
+
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_name");
+
+ /* Create attributes, up to limit of compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, max_compact, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Check for opening a non-existent attribute on an object with compact attribute storage */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen");
+
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_name");
+
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_name");
+
+ /* Test opening attributes stored compactly */
+ ret = attr_open_check(fid, dsetname, my_dataset, u);
+ CHECK(ret, FAIL, "attr_open_check");
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ dsetname = DSET1_NAME;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ dsetname = DSET2_NAME;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ dsetname = DSET3_NAME;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Create more attributes, to push into dense form */
+ for (u = max_compact; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate2(my_dataset, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ if (u >= max_compact) {
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+ } /* end if */
+#endif
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+
+ if (new_format) {
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ if (use_index)
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+ VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test");
+ } /* end if */
+#endif
+ /* Check for opening a non-existent attribute on an object with dense attribute storage */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen(my_dataset, "foo", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen");
+
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_name(my_dataset, ".", "foo", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_name");
+
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Aopen_by_name(fid, dsetname, "foo", H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Aopen_by_name");
+
+ /* Test opening attributes stored compactly */
+ ret = attr_open_check(fid, dsetname, my_dataset, u);
+ CHECK(ret, FAIL, "attr_open_check");
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_open_by_name() */
+
+/****************************************************************
+**
+** test_attr_create_by_name(): Test basic H5A (attribute) code.
+** Tests creating attributes by name
+**
+****************************************************************/
+static void
+test_attr_create_by_name(hbool_t new_format, hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dset1, dset2, dset3; /* Dataset IDs */
+ hid_t my_dataset; /* Current dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ unsigned max_compact; /* Maximum # of links to store in group compactly */
+ unsigned min_dense; /* Minimum # of links to store in group "densely" */
+#if 0
+ htri_t is_empty; /* Are there any attributes? */
+ htri_t is_dense; /* Are attributes stored densely? */
+ hsize_t nattrs; /* Number of attributes on object */
+ hsize_t name_count; /* # of records in name index */
+ hsize_t corder_count; /* # of records in creation order index */
+#endif
+ unsigned use_index; /* Use index on creation order values */
+ const char *dsetname; /* Name of dataset for attributes */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned curr_dset; /* Current dataset to work on */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Create dataspace for dataset & attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataset creation property list */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Query the attribute creation properties */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Loop over using index for creation order value */
+ for (use_index = FALSE; use_index <= TRUE; use_index++) {
+ /* Print appropriate test message */
+ if (use_index)
+ MESSAGE(5, ("Testing Creating Attributes By Name w/Creation Order Index\n"))
+ else
+ MESSAGE(5, ("Testing Creating Attributes By Name w/o Creation Order Index\n"))
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Set attribute creation order tracking & indexing for object */
+ if (new_format == TRUE) {
+ ret = H5Pset_attr_creation_order(
+ dcpl, (H5P_CRT_ORDER_TRACKED | (use_index ? H5P_CRT_ORDER_INDEXED : (unsigned)0)));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ } /* end if */
+
+ /* Create datasets */
+ dset1 = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+ dset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dcreate2");
+ dset3 = H5Dcreate2(fid, DSET3_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset3, FAIL, "H5Dcreate2");
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ dsetname = DSET1_NAME;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ dsetname = DSET2_NAME;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ dsetname = DSET3_NAME;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, TRUE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Create attributes, up to limit of compact form */
+ for (u = 0; u < max_compact; u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate_by_name(fid, dsetname, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate_by_name");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, max_compact, "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Test opening attributes stored compactly */
+ ret = attr_open_check(fid, dsetname, my_dataset, u);
+ CHECK(ret, FAIL, "attr_open_check");
+ } /* end for */
+
+ /* Work on all the datasets */
+ for (curr_dset = 0; curr_dset < NUM_DSETS; curr_dset++) {
+ switch (curr_dset) {
+ case 0:
+ my_dataset = dset1;
+ dsetname = DSET1_NAME;
+ break;
+
+ case 1:
+ my_dataset = dset2;
+ dsetname = DSET2_NAME;
+ break;
+
+ case 2:
+ my_dataset = dset3;
+ dsetname = DSET3_NAME;
+ break;
+
+ default:
+ HDassert(0 && "Too many datasets!");
+ } /* end switch */
+
+ /* Create more attributes, to push into dense form */
+ for (u = max_compact; u < (max_compact * 2); u++) {
+ /* Create attribute */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+ attr = H5Acreate_by_name(fid, dsetname, attrname, H5T_NATIVE_UINT, sid, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate_by_name");
+
+ /* Write data into the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &u);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Verify state of object */
+ if (u >= max_compact) {
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+ } /* end if */
+#endif
+ /* Verify information for new attribute */
+ ret = attr_info_by_idx_check(my_dataset, attrname, (hsize_t)u, use_index);
+ CHECK(ret, FAIL, "attr_info_by_idx_check");
+ } /* end for */
+#if 0
+ /* Verify state of object */
+ ret = H5O__num_attrs_test(my_dataset, &nattrs);
+ CHECK(ret, FAIL, "H5O__num_attrs_test");
+ VERIFY(nattrs, (max_compact * 2), "H5O__num_attrs_test");
+ is_empty = H5O__is_attr_empty_test(my_dataset);
+ VERIFY(is_empty, FALSE, "H5O__is_attr_empty_test");
+ is_dense = H5O__is_attr_dense_test(my_dataset);
+ VERIFY(is_dense, (new_format ? TRUE : FALSE), "H5O__is_attr_dense_test");
+
+ if (new_format) {
+ /* Retrieve & verify # of records in the name & creation order indices */
+ ret = H5O__attr_dense_info_test(my_dataset, &name_count, &corder_count);
+ CHECK(ret, FAIL, "H5O__attr_dense_info_test");
+ if (use_index)
+ VERIFY(name_count, corder_count, "H5O__attr_dense_info_test");
+ VERIFY(name_count, (max_compact * 2), "H5O__attr_dense_info_test");
+ } /* end if */
+#endif
+ /* Test opening attributes stored compactly */
+ ret = attr_open_check(fid, dsetname, my_dataset, u);
+ CHECK(ret, FAIL, "attr_open_check");
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_create_by_name() */
+
+/****************************************************************
+**
+** test_attr_shared_write(): Test basic H5A (attribute) code.
+** Tests writing mix of shared & un-shared attributes in "compact" & "dense" storage
+**
+****************************************************************/
+static void
+test_attr_shared_write(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t my_fcpl; /* File creation property list ID */
+ hid_t dataset, dataset2; /* Dataset IDs */
+ hid_t attr_tid; /* Attribute's datatype ID */
+ hid_t sid, big_sid; /* Dataspace IDs */
+ hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+ htri_t is_shared; /* Is attributes shared? */
+ hsize_t shared_refcount; /* Reference count of shared attribute */
+#endif
+ unsigned attr_value; /* Attribute value */
+ unsigned *big_value; /* Data for "big" attribute */
+#if 0
+ size_t mesg_count; /* # of shared messages */
+#endif
+ unsigned test_shared; /* Index over shared component type */
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Writing Shared & Unshared Attributes in Compact & Dense Storage\n"));
+
+ /* Allocate & initialize "big" attribute data */
+ big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned));
+ CHECK_PTR(big_value, "HDmalloc");
+ HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3));
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create "big" dataspace for "large" attributes */
+ big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL);
+ CHECK(big_sid, FAIL, "H5Screate_simple");
+
+ /* Loop over type of shared components */
+ for (test_shared = 0; test_shared < 3; test_shared++) {
+ /* Make copy of file creation property list */
+ my_fcpl = H5Pcopy(fcpl);
+ CHECK(my_fcpl, FAIL, "H5Pcopy");
+
+ /* Set up datatype for attributes */
+ attr_tid = H5Tcopy(H5T_NATIVE_UINT);
+ CHECK(attr_tid, FAIL, "H5Tcopy");
+
+ /* Special setup for each type of shared components */
+ if (test_shared == 0) {
+ /* Make attributes > 500 bytes shared */
+ ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ } /* end if */
+ else {
+ /* Set up copy of file creation property list */
+
+ ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ /* Make attributes > 500 bytes shared */
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ } /* end else */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close FCPL copy */
+ ret = H5Pclose(my_fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Commit datatype to file */
+ if (test_shared == 2) {
+ ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ } /* end if */
+
+ /* Set up to query the object creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Create datasets */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+ dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset2, FAIL, "H5Dcreate2");
+
+ /* Check on dataset's message storage status */
+ if (test_shared != 0) {
+#if 0
+ /* Datasets' datatypes can be shared */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test");
+
+ /* Datasets' dataspace can be shared */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test");
+#endif
+ } /* end if */
+
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on datasets' attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ is_dense = H5O__is_attr_dense_test(dataset2);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes to each dataset, until after converting to dense storage */
+ for (u = 0; u < max_compact * 2; u++) {
+ /* Create attribute name */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+
+ /* Alternate between creating "small" & "big" attributes */
+ if (u % 2) {
+ /* Create "small" attribute on first dataset */
+ attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+#endif
+ /* Write data into the attribute */
+ attr_value = u + 1;
+ ret = H5Awrite(attr, attr_tid, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+ } /* end if */
+ else {
+ /* Create "big" attribute on first dataset */
+ attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ /* Write data into the attribute */
+ big_value[0] = u + 1;
+ ret = H5Awrite(attr, attr_tid, big_value);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ if (u < max_compact)
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ else
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Alternate between creating "small" & "big" attributes */
+ if (u % 2) {
+ /* Create "small" attribute on second dataset */
+ attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+#endif
+ /* Write data into the attribute */
+ attr_value = u + 1;
+ ret = H5Awrite(attr, attr_tid, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+ } /* end if */
+ else {
+ /* Create "big" attribute on second dataset */
+ attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ /* Write data into the attribute */
+ big_value[0] = u + 1;
+ ret = H5Awrite(attr, attr_tid, big_value);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test");
+#endif
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset2);
+ if (u < max_compact)
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ else
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ } /* end for */
+
+ /* Close attribute's datatype */
+ ret = H5Tclose(attr_tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Datasets */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dataset2);
+ CHECK(ret, FAIL, "H5Dclose");
+#if 0
+ /* Check on shared message status now */
+ if (test_shared != 0) {
+ if (test_shared == 1) {
+ /* Check on datatype storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+
+ /* Check on dataspace storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Unlink datasets with attributes */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Unlink committed datatype */
+ if (test_shared == 2) {
+ ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end if */
+#if 0
+ /* Check on attribute storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+
+ if (test_shared != 0) {
+ /* Check on datatype storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+
+ /* Check on dataspace storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+ } /* end for */
+
+ /* Close dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(big_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release memory */
+ HDfree(big_value);
+} /* test_attr_shared_write() */
+
+/****************************************************************
+**
+** test_attr_shared_rename(): Test basic H5A (attribute) code.
+** Tests renaming shared attributes in "compact" & "dense" storage
+**
+****************************************************************/
+static void
+test_attr_shared_rename(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t my_fcpl; /* File creation property list ID */
+ hid_t dataset, dataset2; /* Dataset ID2 */
+ hid_t attr_tid; /* Attribute's datatype ID */
+ hid_t sid, big_sid; /* Dataspace IDs */
+ hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */
+ char attrname2[NAME_BUF_SIZE]; /* Name of attribute on second dataset */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+ htri_t is_shared; /* Is attributes shared? */
+ hsize_t shared_refcount; /* Reference count of shared attribute */
+#endif
+ unsigned attr_value; /* Attribute value */
+ unsigned *big_value; /* Data for "big" attribute */
+#if 0
+ size_t mesg_count; /* # of shared messages */
+#endif
+ unsigned test_shared; /* Index over shared component type */
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Renaming Shared & Unshared Attributes in Compact & Dense Storage\n"));
+
+ /* Allocate & initialize "big" attribute data */
+ big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned));
+ CHECK_PTR(big_value, "HDmalloc");
+ HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3));
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create "big" dataspace for "large" attributes */
+ big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL);
+ CHECK(big_sid, FAIL, "H5Screate_simple");
+
+ /* Loop over type of shared components */
+ for (test_shared = 0; test_shared < 3; test_shared++) {
+ /* Make copy of file creation property list */
+ my_fcpl = H5Pcopy(fcpl);
+ CHECK(my_fcpl, FAIL, "H5Pcopy");
+
+ /* Set up datatype for attributes */
+ attr_tid = H5Tcopy(H5T_NATIVE_UINT);
+ CHECK(attr_tid, FAIL, "H5Tcopy");
+
+ /* Special setup for each type of shared components */
+ if (test_shared == 0) {
+ /* Make attributes > 500 bytes shared */
+ ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ } /* end if */
+ else {
+ /* Set up copy of file creation property list */
+
+ ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ /* Make attributes > 500 bytes shared */
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ } /* end else */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close FCPL copy */
+ ret = H5Pclose(my_fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Commit datatype to file */
+ if (test_shared == 2) {
+ ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ } /* end if */
+
+ /* Set up to query the object creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Create datasets */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+ dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset2, FAIL, "H5Dcreate2");
+#if 0
+ /* Check on dataset's message storage status */
+ if (test_shared != 0) {
+ /* Datasets' datatypes can be shared */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test");
+
+ /* Datasets' dataspace can be shared */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on datasets' attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ is_dense = H5O__is_attr_dense_test(dataset2);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes to each dataset, until after converting to dense storage */
+ for (u = 0; u < max_compact * 2; u++) {
+ /* Create attribute name */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+
+ /* Alternate between creating "small" & "big" attributes */
+ if (u % 2) {
+ /* Create "small" attribute on first dataset */
+ attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+#endif
+ /* Write data into the attribute */
+ attr_value = u + 1;
+ ret = H5Awrite(attr, attr_tid, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+ } /* end if */
+ else {
+ /* Create "big" attribute on first dataset */
+ attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ /* Write data into the attribute */
+ big_value[0] = u + 1;
+ ret = H5Awrite(attr, attr_tid, big_value);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ if (u < max_compact)
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ else
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Alternate between creating "small" & "big" attributes */
+ if (u % 2) {
+ /* Create "small" attribute on second dataset */
+ attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+#endif
+ /* Write data into the attribute */
+ attr_value = u + 1;
+ ret = H5Awrite(attr, attr_tid, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+ } /* end if */
+ else {
+ /* Create "big" attribute on second dataset */
+ attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ /* Write data into the attribute */
+ big_value[0] = u + 1;
+ ret = H5Awrite(attr, attr_tid, big_value);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test");
+#endif
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset2);
+ if (u < max_compact)
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ else
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Create new attribute name */
+ HDsnprintf(attrname2, sizeof(attrname2), "new attr %02u", u);
+
+ /* Change second dataset's attribute's name */
+ ret = H5Arename_by_name(fid, DSET2_NAME, attrname, attrname2, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Arename_by_name");
+
+ /* Check refcount on attributes now */
+
+ /* Check refcount on renamed attribute */
+ attr = H5Aopen(dataset2, attrname2, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+#if 0
+ if (u % 2) {
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+ } /* end if */
+ else {
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+ } /* end else */
+#endif
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Check refcount on original attribute */
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+#if 0
+ if (u % 2) {
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+ } /* end if */
+ else {
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+ } /* end else */
+#endif
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Change second dataset's attribute's name back to original */
+ ret = H5Arename_by_name(fid, DSET2_NAME, attrname2, attrname, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Arename_by_name");
+
+ /* Check refcount on attributes now */
+
+ /* Check refcount on renamed attribute */
+ attr = H5Aopen(dataset2, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+#if 0
+ if (u % 2) {
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+ } /* end if */
+ else {
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test");
+ } /* end else */
+#endif
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Check refcount on original attribute */
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+#if 0
+ if (u % 2) {
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+ } /* end if */
+ else {
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test");
+ } /* end else */
+#endif
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Close attribute's datatype */
+ ret = H5Tclose(attr_tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Datasets */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dataset2);
+ CHECK(ret, FAIL, "H5Dclose");
+#if 0
+ /* Check on shared message status now */
+ if (test_shared != 0) {
+ if (test_shared == 1) {
+ /* Check on datatype storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+
+ /* Check on dataspace storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Unlink datasets with attributes */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "HLdelete");
+ ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Unlink committed datatype */
+ if (test_shared == 2) {
+ ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end if */
+#if 0
+ /* Check on attribute storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+
+ if (test_shared != 0) {
+ /* Check on datatype storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+
+ /* Check on dataspace storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+ } /* end for */
+
+ /* Close dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(big_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release memory */
+ HDfree(big_value);
+} /* test_attr_shared_rename() */
+
+/****************************************************************
+**
+** test_attr_shared_delete(): Test basic H5A (attribute) code.
+** Tests deleting shared attributes in "compact" & "dense" storage
+**
+****************************************************************/
+static void
+test_attr_shared_delete(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t my_fcpl; /* File creation property list ID */
+ hid_t dataset, dataset2; /* Dataset IDs */
+ hid_t attr_tid; /* Attribute's datatype ID */
+ hid_t sid, big_sid; /* Dataspace IDs */
+ hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+ htri_t is_shared; /* Is attributes shared? */
+ hsize_t shared_refcount; /* Reference count of shared attribute */
+#endif
+ unsigned attr_value; /* Attribute value */
+ unsigned *big_value; /* Data for "big" attribute */
+#if 0
+ size_t mesg_count; /* # of shared messages */
+#endif
+ unsigned test_shared; /* Index over shared component type */
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Deleting Shared & Unshared Attributes in Compact & Dense Storage\n"));
+
+ /* Allocate & initialize "big" attribute data */
+ big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned));
+ CHECK_PTR(big_value, "HDmalloc");
+ HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3));
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create "big" dataspace for "large" attributes */
+ big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL);
+ CHECK(big_sid, FAIL, "H5Screate_simple");
+
+ /* Loop over type of shared components */
+ for (test_shared = 0; test_shared < 3; test_shared++) {
+ /* Make copy of file creation property list */
+ my_fcpl = H5Pcopy(fcpl);
+ CHECK(my_fcpl, FAIL, "H5Pcopy");
+
+ /* Set up datatype for attributes */
+ attr_tid = H5Tcopy(H5T_NATIVE_UINT);
+ CHECK(attr_tid, FAIL, "H5Tcopy");
+
+ /* Special setup for each type of shared components */
+ if (test_shared == 0) {
+ /* Make attributes > 500 bytes shared */
+ ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ } /* end if */
+ else {
+ /* Set up copy of file creation property list */
+
+ ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ /* Make attributes > 500 bytes shared */
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ } /* end else */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close FCPL copy */
+ ret = H5Pclose(my_fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Commit datatype to file */
+ if (test_shared == 2) {
+ ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ } /* end if */
+
+ /* Set up to query the object creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Create datasets */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+ dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset2, FAIL, "H5Dcreate2");
+#if 0
+ /* Check on dataset's message storage status */
+ if (test_shared != 0) {
+ /* Datasets' datatypes can be shared */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test");
+
+ /* Datasets' dataspace can be shared */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on datasets' attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ is_dense = H5O__is_attr_dense_test(dataset2);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes to each dataset, until after converting to dense storage */
+ for (u = 0; u < max_compact * 2; u++) {
+ /* Create attribute name */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+
+ /* Alternate between creating "small" & "big" attributes */
+ if (u % 2) {
+ /* Create "small" attribute on first dataset */
+ attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+#endif
+ /* Write data into the attribute */
+ attr_value = u + 1;
+ ret = H5Awrite(attr, attr_tid, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+ } /* end if */
+ else {
+ /* Create "big" attribute on first dataset */
+ attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ /* Write data into the attribute */
+ big_value[0] = u + 1;
+ ret = H5Awrite(attr, attr_tid, big_value);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ if (u < max_compact)
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ else
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Alternate between creating "small" & "big" attributes */
+ if (u % 2) {
+ /* Create "small" attribute on second dataset */
+ attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+#endif
+ /* Write data into the attribute */
+ attr_value = u + 1;
+ ret = H5Awrite(attr, attr_tid, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+ } /* end if */
+ else {
+ /* Create "big" attribute on second dataset */
+ attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ /* Write data into the attribute */
+ big_value[0] = u + 1;
+ ret = H5Awrite(attr, attr_tid, big_value);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test");
+#endif
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset2);
+ if (u < max_compact)
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ else
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ } /* end for */
+
+ /* Delete attributes from second dataset */
+ for (u = 0; u < max_compact * 2; u++) {
+ /* Create attribute name */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+
+ /* Delete second dataset's attribute */
+ ret = H5Adelete_by_name(fid, DSET2_NAME, attrname, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Adelete_by_name");
+
+ /* Check refcount on attributes now */
+
+ /* Check refcount on first dataset's attribute */
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+#if 0
+ if (u % 2) {
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+ } /* end if */
+ else {
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+ } /* end else */
+#endif
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Close attribute's datatype */
+ ret = H5Tclose(attr_tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Datasets */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dataset2);
+ CHECK(ret, FAIL, "H5Dclose");
+#if 0
+ /* Check on shared message status now */
+ if (test_shared != 0) {
+ if (test_shared == 1) {
+ /* Check on datatype storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+
+ /* Check on dataspace storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 2, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Unlink datasets with attributes */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Unlink committed datatype */
+ if (test_shared == 2) {
+ ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end if */
+#if 0
+ /* Check on attribute storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+
+ if (test_shared != 0) {
+ /* Check on datatype storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+
+ /* Check on dataspace storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+ } /* end for */
+
+ /* Close dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(big_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release memory */
+ HDfree(big_value);
+} /* test_attr_shared_delete() */
+
+/****************************************************************
+**
+** test_attr_shared_unlink(): Test basic H5A (attribute) code.
+** Tests unlinking object with shared attributes in "compact" & "dense" storage
+**
+****************************************************************/
+static void
+test_attr_shared_unlink(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t my_fcpl; /* File creation property list ID */
+ hid_t dataset, dataset2; /* Dataset IDs */
+ hid_t attr_tid; /* Attribute's datatype ID */
+ hid_t sid, big_sid; /* Dataspace IDs */
+ hsize_t big_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3}; /* Dimensions for "big" attribute */
+ hid_t attr; /* Attribute ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ char attrname[NAME_BUF_SIZE]; /* Name of attribute on first dataset */
+ unsigned max_compact; /* Maximum # of attributes to store compactly */
+ unsigned min_dense; /* Minimum # of attributes to store "densely" */
+#if 0
+ htri_t is_dense; /* Are attributes stored densely? */
+ htri_t is_shared; /* Is attributes shared? */
+ hsize_t shared_refcount; /* Reference count of shared attribute */
+#endif
+ unsigned attr_value; /* Attribute value */
+ unsigned *big_value; /* Data for "big" attribute */
+#if 0
+ size_t mesg_count; /* # of shared messages */
+#endif
+ unsigned test_shared; /* Index over shared component type */
+ unsigned u; /* Local index variable */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of empty file */
+ h5_stat_size_t filesize; /* Size of file after modifications */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Unlinking Object with Shared Attributes in Compact & Dense Storage\n"));
+
+ /* Allocate & initialize "big" attribute data */
+ big_value = (unsigned *)HDmalloc((size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3) * sizeof(unsigned));
+ CHECK_PTR(big_value, "HDmalloc");
+ HDmemset(big_value, 1, sizeof(unsigned) * (size_t)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3));
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create "big" dataspace for "large" attributes */
+ big_sid = H5Screate_simple(SPACE1_RANK, big_dims, NULL);
+ CHECK(big_sid, FAIL, "H5Screate_simple");
+
+ /* Loop over type of shared components */
+ for (test_shared = 0; test_shared < 3; test_shared++) {
+ /* Make copy of file creation property list */
+ my_fcpl = H5Pcopy(fcpl);
+ CHECK(my_fcpl, FAIL, "H5Pcopy");
+
+ /* Set up datatype for attributes */
+ attr_tid = H5Tcopy(H5T_NATIVE_UINT);
+ CHECK(attr_tid, FAIL, "H5Tcopy");
+
+ /* Special setup for each type of shared components */
+ if (test_shared == 0) {
+ /* Make attributes > 500 bytes shared */
+ ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ } /* end if */
+ else {
+ /* Set up copy of file creation property list */
+
+ ret = H5Pset_shared_mesg_nindexes(my_fcpl, (unsigned)3);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+
+ /* Make attributes > 500 bytes shared */
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)500);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ /* Make datatypes & dataspaces > 1 byte shared (i.e. all of them :-) */
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)1, H5O_SHMESG_DTYPE_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ ret = H5Pset_shared_mesg_index(my_fcpl, (unsigned)2, H5O_SHMESG_SDSPACE_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+ } /* end else */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, my_fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close FCPL copy */
+ ret = H5Pclose(my_fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get size of file */
+ empty_filesize = h5_get_file_size(FILENAME, fapl);
+ if (empty_filesize < 0)
+ TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
+#endif
+
+ /* Re-open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Commit datatype to file */
+ if (test_shared == 2) {
+ ret = H5Tcommit2(fid, TYPE1_NAME, attr_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ } /* end if */
+
+ /* Set up to query the object creation properties */
+ if (dcpl_g == H5P_DEFAULT) {
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ }
+ else {
+ dcpl = H5Pcopy(dcpl_g);
+ CHECK(dcpl, FAIL, "H5Pcopy");
+ }
+
+ /* Create datasets */
+ dataset = H5Dcreate2(fid, DSET1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+ dataset2 = H5Dcreate2(fid, DSET2_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset2, FAIL, "H5Dcreate2");
+#if 0
+ /* Check on dataset's message storage status */
+ if (test_shared != 0) {
+ /* Datasets' datatypes can be shared */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test");
+
+ /* Datasets' dataspace can be shared */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 1, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Retrieve limits for compact/dense attribute storage */
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Close property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check on datasets' attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ is_dense = H5O__is_attr_dense_test(dataset2);
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+#endif
+ /* Add attributes to each dataset, until after converting to dense storage */
+ for (u = 0; u < max_compact * 2; u++) {
+ /* Create attribute name */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+
+ /* Alternate between creating "small" & "big" attributes */
+ if (u % 2) {
+ /* Create "small" attribute on first dataset */
+ attr = H5Acreate2(dataset, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+#endif
+ /* Write data into the attribute */
+ attr_value = u + 1;
+ ret = H5Awrite(attr, attr_tid, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+ } /* end if */
+ else {
+ /* Create "big" attribute on first dataset */
+ attr = H5Acreate2(dataset, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* ChecFk that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ /* Write data into the attribute */
+ big_value[0] = u + 1;
+ ret = H5Awrite(attr, attr_tid, big_value);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ if (u < max_compact)
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ else
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+
+ /* Alternate between creating "small" & "big" attributes */
+ if (u % 2) {
+ /* Create "small" attribute on second dataset */
+ attr = H5Acreate2(dataset2, attrname, attr_tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+#endif
+ /* Write data into the attribute */
+ attr_value = u + 1;
+ ret = H5Awrite(attr, attr_tid, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+ } /* end if */
+ else {
+ /* Create "big" attribute on second dataset */
+ attr = H5Acreate2(dataset2, attrname, attr_tid, big_sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+#if 0
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+#endif
+ /* Write data into the attribute */
+ big_value[0] = u + 1;
+ ret = H5Awrite(attr, attr_tid, big_value);
+ CHECK(ret, FAIL, "H5Awrite");
+#if 0
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 2, "H5A__get_shared_rc_test");
+#endif
+ } /* end else */
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+#if 0
+ /* Check on dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset2);
+ if (u < max_compact)
+ VERIFY(is_dense, FALSE, "H5O__is_attr_dense_test");
+ else
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ } /* end for */
+
+ /* Close attribute's datatype */
+ ret = H5Tclose(attr_tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close second dataset */
+ ret = H5Dclose(dataset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink second dataset */
+ ret = H5Ldelete(fid, DSET2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+#if 0
+ /* Check on first dataset's attribute storage status */
+ is_dense = H5O__is_attr_dense_test(dataset);
+ VERIFY(is_dense, TRUE, "H5O__is_attr_dense_test");
+#endif
+ /* Check ref count on attributes of first dataset */
+ for (u = 0; u < max_compact * 2; u++) {
+ /* Create attribute name */
+ HDsnprintf(attrname, sizeof(attrname), "attr %02u", u);
+
+ /* Open attribute on first dataset */
+ attr = H5Aopen(dataset, attrname, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+#if 0
+ if (u % 2) {
+ /* Check that attribute is not shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, FALSE, "H5A__is_shared_test");
+ } /* end if */
+ else {
+ /* Check that attribute is shared */
+ is_shared = H5A__is_shared_test(attr);
+ VERIFY(is_shared, TRUE, "H5A__is_shared_test");
+
+ /* Check refcount for attribute */
+ ret = H5A__get_shared_rc_test(attr, &shared_refcount);
+ CHECK(ret, FAIL, "H5A__get_shared_rc_test");
+ VERIFY(shared_refcount, 1, "H5A__get_shared_rc_test");
+ } /* end else */
+#endif
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Close Datasets */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Unlink first dataset */
+ ret = H5Ldelete(fid, DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Unlink committed datatype */
+ if (test_shared == 2) {
+ ret = H5Ldelete(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end if */
+#if 0
+ /* Check on attribute storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_ATTR_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+
+ if (test_shared != 0) {
+ /* Check on datatype storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_DTYPE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+
+ /* Check on dataspace storage status */
+ ret = H5F__get_sohm_mesg_count_test(fid, H5O_SDSPACE_ID, &mesg_count);
+ CHECK(ret, FAIL, "H5F__get_sohm_mesg_count_test");
+ VERIFY(mesg_count, 0, "H5F__get_sohm_mesg_count_test");
+ } /* end if */
+#endif
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ if (h5_using_default_driver(NULL)) {
+ /* Check size of file */
+ filesize = h5_get_file_size(FILENAME, fapl);
+ VERIFY(filesize, empty_filesize, "h5_get_file_size");
+ }
+#endif
+ } /* end for */
+
+ /* Close dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(big_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release memory */
+ HDfree(big_value);
+} /* test_attr_shared_unlink() */
+
+/****************************************************************
+**
+** test_attr_bug1(): Test basic H5A (attribute) code.
+** Tests odd sequence of allocating and deallocating space in the file.
+** The series of actions below constructs a file with an attribute
+** in each object header chunk, except the first. Then, the attributes
+** are removed and re-created in a way that makes the object header
+** allocation code remove an object header chunk "in the middle" of
+** the sequence of the chunks.
+**
+****************************************************************/
+static void
+test_attr_bug1(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t gid; /* Group ID */
+ hid_t aid; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Allocating and De-allocating Attributes in Unusual Way\n"));
+
+ /* Create dataspace ID for attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create main group to operate on */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file and create another group, then attribute on first group */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create second group */
+ gid = H5Gcreate2(fid, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Create attribute on first group */
+ aid = H5Acreate2(gid, ATTR7_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file and create another group, then another attribute on first group */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create third group */
+ gid = H5Gcreate2(fid, GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Unlink second group */
+ ret = H5Ldelete(fid, GROUP2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Create another attribute on first group */
+ aid = H5Acreate2(gid, ATTR8_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file and re-create attributes on first group */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Delete first attribute */
+ ret = H5Adelete(gid, ATTR7_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Re-create first attribute */
+ aid = H5Acreate2(gid, ATTR7_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Delete second attribute */
+ ret = H5Adelete(gid, ATTR8_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Re-create second attribute */
+ aid = H5Acreate2(gid, ATTR8_NAME, H5T_NATIVE_DOUBLE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close dataspace ID */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Gclose");
+} /* test_attr_bug1() */
+
+/****************************************************************
+**
+** test_attr_bug2(): Test basic H5A (attribute) code.
+** Tests deleting a large number of attributes with the
+** intention of creating a null message with a size that
+** is too large. This routine deletes every other
+** attribute, but the original bug could also be
+** reproduced by deleting every attribute except a few to
+** keep the chunk open.
+**
+****************************************************************/
+static void
+test_attr_bug2(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t gid; /* Group ID */
+ hid_t aid; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hid_t gcpl; /* Group creation property list */
+ hsize_t dims[2] = {10, 100}; /* Attribute dimensions */
+ char aname[16]; /* Attribute name */
+ unsigned i; /* index */
+ herr_t ret; /* Generic return status */
+ htri_t tri_ret; /* htri_t return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Allocating and De-allocating Attributes in Unusual Way\n"));
+
+ /* Create group creation property list */
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+ CHECK(gcpl, FAIL, "H5Pcreate");
+
+ /* Prevent the library from switching to dense attribute storage */
+ /* Not doing this with the latest format actually triggers a different bug.
+ * This will be tested here as soon as it is fixed. -NAF
+ */
+ ret = H5Pset_attr_phase_change(gcpl, BUG2_NATTR + 10, BUG2_NATTR + 5);
+ CHECK(ret, FAIL, "H5Pset_attr_phase_change");
+
+ /* Create dataspace ID for attributes */
+ sid = H5Screate_simple(2, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create main group to operate on */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create attributes on group */
+ for (i = 0; i < BUG2_NATTR; i++) {
+ HDsnprintf(aname, sizeof(aname), "%03u", i);
+ aid = H5Acreate2(gid, aname, H5T_STD_I32LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ }
+
+ /* Delete every other attribute */
+ for (i = 1; i < BUG2_NATTR; i += 2) {
+ HDsnprintf(aname, sizeof(aname), "%03u", i);
+ ret = H5Adelete(gid, aname);
+ CHECK(ret, FAIL, "H5Adelete");
+ }
+
+ /* Close IDs */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Reopen file and group */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen");
+
+ /* Open an attribute in the middle */
+ i = (BUG2_NATTR / 4) * 2;
+ HDsnprintf(aname, sizeof(aname), "%03u", i);
+ aid = H5Aopen(gid, aname, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Aopen");
+
+ /* Verify that the attribute has the correct datatype */
+ tid = H5Aget_type(aid);
+ CHECK(tid, FAIL, "H5Aget_type");
+
+ tri_ret = H5Tequal(tid, H5T_STD_I32LE);
+ VERIFY(tri_ret, TRUE, "H5Tequal");
+
+ /* Close IDs */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Now test a variation on this bug - where either the size of chunk 0 goes
+ * down a "notch" or two, or chunk 1 becomes completely null at the same
+ * time that a null message that is too large is formed */
+ dims[0] = 25;
+ dims[1] = 41; /* 1025*4 byte attribute size */
+
+ /* Create dataspace ID for attributes */
+ sid = H5Screate_simple(2, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create main group to operate on */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, gcpl, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create attributes on group */
+ for (i = 0; i < BUG2_NATTR2; i++) {
+ HDsnprintf(aname, sizeof(aname), "%03u", i);
+ aid = H5Acreate2(gid, aname, H5T_STD_I32LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ }
+
+ /* Delete every other attribute */
+ for (i = 0; i < BUG2_NATTR2; i++) {
+ HDsnprintf(aname, sizeof(aname), "%03u", i);
+ ret = H5Adelete(gid, aname);
+ CHECK(ret, FAIL, "H5Adelete");
+ }
+
+ /* Close IDs */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Pclose(gcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* test_attr_bug2() */
+
+/****************************************************************
+**
+** test_attr_bug3(): Test basic H5A (attribute) code.
+** Tests creating and deleting attributes which use a
+** datatype and/or dataspace stored in the same object
+** header.
+**
+****************************************************************/
+static void
+test_attr_bug3(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t aid1, aid2; /* Attribute IDs */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t tid1, tid2; /* Datatype IDs */
+ hid_t did; /* Dataset ID */
+ hsize_t dims1[2] = {2, 2}, dims2[2] = {3, 3}; /* Dimensions */
+ int wdata1[2][2];
+ unsigned wdata2[3][3]; /* Write buffers */
+ unsigned u, v; /* Local index variables */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Attributes in the Same Header as their Datatypes\n"));
+
+ /* Create dataspaces */
+ sid1 = H5Screate_simple(2, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+ sid2 = H5Screate_simple(2, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create file to operate on */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create datatypes and commit tid1 */
+ tid1 = H5Tcopy(H5T_STD_I16BE);
+ CHECK(tid1, FAIL, "H5Tcopy");
+ tid2 = H5Tcopy(H5T_STD_U64LE);
+ CHECK(tid1, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "dtype", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Create dataset */
+ did = H5Dcreate2(fid, "dset", tid2, sid2, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Create attribute on datatype, using that datatype as its datatype */
+ aid1 = H5Acreate2(tid1, "attr", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid1, FAIL, "H5Acreate2");
+
+ /* Create attribute on dataset, using its datatype and dataspace */
+ aid2 = H5Acreate2(did, "attr", tid2, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid2, FAIL, "H5Acreate2");
+
+ /* Close attributes */
+ ret = H5Aclose(aid1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aid2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Reopen attributes */
+ aid1 = H5Aopen(tid1, "attr", H5P_DEFAULT);
+ CHECK(aid1, FAIL, "H5Aopen");
+ aid2 = H5Aopen(did, "attr", H5P_DEFAULT);
+ CHECK(aid2, FAIL, "H5Aopen");
+
+ /* Initialize the write buffers */
+ for (u = 0; u < dims1[0]; u++)
+ for (v = 0; v < dims1[1]; v++)
+ wdata1[u][v] = (int)((u * dims1[1]) + v);
+ for (u = 0; u < dims2[0]; u++)
+ for (v = 0; v < dims2[1]; v++)
+ wdata2[u][v] = (unsigned)((u * dims2[1]) + v);
+
+ /* Write data to the attributes */
+ ret = H5Awrite(aid1, H5T_NATIVE_INT, wdata1);
+ CHECK(ret, FAIL, "H5Awrite");
+ ret = H5Awrite(aid2, H5T_NATIVE_UINT, wdata2);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attributes */
+ ret = H5Aclose(aid1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aid2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Delete attributes */
+ ret = H5Adelete(tid1, "attr");
+ CHECK(ret, FAIL, "H5Adelete");
+ ret = H5Adelete(did, "attr");
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Recreate attributes */
+ aid1 = H5Acreate2(tid1, "attr", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid1, FAIL, "H5Acreate2");
+ aid2 = H5Acreate2(did, "attr", tid2, sid2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid2, FAIL, "H5Acreate2");
+
+ /* Delete attributes (note they are still open) */
+ ret = H5Adelete(tid1, "attr");
+ CHECK(ret, FAIL, "H5Adelete");
+ ret = H5Adelete(did, "attr");
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Close dataspaces and transient datatype */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close dataset and committed datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Delete dataset and committed datatype */
+ ret = H5Ldelete(fid, "dtype", H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Ldelete(fid, "dset", H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close attributes */
+ ret = H5Aclose(aid1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aid2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_bug3() */
+
+/****************************************************************
+**
+** test_attr_bug4(): Test basic H5A (attribute) code.
+** Attempts to trigger a bug which would result in being
+** unable to add an attribute to a named datatype. This
+** happened when an object header chunk was too small to
+** hold a continuation message and could not be extended.
+**
+****************************************************************/
+static void
+test_attr_bug4(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t gid; /* Group ID */
+ hid_t aid1, aid2, aid3; /* Attribute IDs */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hid_t did; /* Dataset ID */
+ hsize_t dims[1] = {5}; /* Attribute dimensions */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing that attributes can always be added to named datatypes\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(1, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Open root group */
+ gid = H5Gopen2(fid, "/", H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create committed datatype */
+ tid = H5Tcopy(H5T_STD_I32LE);
+ CHECK(tid, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "dtype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Create dataset */
+ did = H5Dcreate2(fid, "dset", tid, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Create attributes on group and dataset */
+ aid1 = H5Acreate2(gid, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid1, FAIL, "H5Acreate2");
+ aid2 = H5Acreate2(did, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid2, FAIL, "H5Acreate2");
+
+ /* Create attribute on datatype (this is the main test) */
+ aid3 = H5Acreate2(tid, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid3, FAIL, "H5Acreate2");
+
+ /* Close IDs */
+ ret = H5Aclose(aid3);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(aid2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Aclose(aid1);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_bug4() */
+
+/****************************************************************
+**
+** test_attr_bug5(): Test basic H5A (attribute) code.
+** Tests opening an attribute multiple times through
+** objects opened through different file handles.
+**
+****************************************************************/
+static void
+test_attr_bug5(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid1, fid2; /* File IDs */
+ hid_t gid1, gid2; /* Group IDs */
+ hid_t did1, did2; /* Dataset IDs */
+ hid_t tid1, tid2; /* Datatype IDs */
+ hid_t aidg1, aidg2, aidd1, aidd2, aidt1, aidt2; /* Attribute IDs */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims[1] = {5}; /* Attribute dimensions */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Opening an Attribute Through Multiple Files Concurrently\n"));
+
+ /* Create dataspace ID for attributes and datasets */
+ sid = H5Screate_simple(1, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Open root group */
+ gid1 = H5Gopen2(fid1, "/", H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gopen2");
+
+ /* Create and commit datatype */
+ tid1 = H5Tcopy(H5T_STD_I32LE);
+ CHECK(tid1, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid1, BUG3_DT_NAME, tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Create dataset */
+ did1 = H5Dcreate2(fid1, BUG3_DSET_NAME, tid1, sid, H5P_DEFAULT, dcpl_g, H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dcreate2");
+
+ /* Create attribute on root group */
+ aidg1 = H5Acreate2(gid1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aidg1, FAIL, "H5Acreate2");
+
+ /* Create attribute on dataset */
+ aidd1 = H5Acreate2(did1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aidd1, FAIL, "H5Acreate2");
+
+ /* Create attribute on datatype */
+ aidt1 = H5Acreate2(tid1, BUG3_ATTR_NAME, tid1, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aidt1, FAIL, "H5Acreate2");
+
+ /* Close all IDs */
+ ret = H5Aclose(aidt1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aidd1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aidg1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Open file twice */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+ fid2 = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Open the root group twice */
+ gid1 = H5Gopen2(fid1, "/", H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gopen2");
+ gid2 = H5Gopen2(fid2, "/", H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gopen2");
+
+ /* Open the root group attribute twice */
+ aidg1 = H5Aopen(gid1, BUG3_ATTR_NAME, H5P_DEFAULT);
+ CHECK(aidg1, FAIL, "H5Aopen");
+ aidg2 = H5Aopen(gid2, BUG3_ATTR_NAME, H5P_DEFAULT);
+ CHECK(aidg1, FAIL, "H5Aopen");
+
+ /* Open the dataset twice */
+ did1 = H5Dopen2(fid1, BUG3_DSET_NAME, H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dopen2");
+ did2 = H5Dopen2(fid2, BUG3_DSET_NAME, H5P_DEFAULT);
+ CHECK(did2, FAIL, "H5Dopen2");
+
+ /* Open the dataset attribute twice */
+ aidd1 = H5Aopen(did1, BUG3_ATTR_NAME, H5P_DEFAULT);
+ CHECK(aidd1, FAIL, "H5Aopen");
+ aidd2 = H5Aopen(did2, BUG3_ATTR_NAME, H5P_DEFAULT);
+ CHECK(aidd1, FAIL, "H5Aopen");
+
+ /* Open the datatype twice */
+ tid1 = H5Topen2(fid1, BUG3_DT_NAME, H5P_DEFAULT);
+ CHECK(tid1, FAIL, "H5Topen2");
+ tid2 = H5Topen2(fid2, BUG3_DT_NAME, H5P_DEFAULT);
+ CHECK(tid2, FAIL, "H5Topen2");
+
+ /* Open the datatype attribute twice */
+ aidt1 = H5Aopen(tid1, BUG3_ATTR_NAME, H5P_DEFAULT);
+ CHECK(aidt1, FAIL, "H5Aopen");
+ aidt2 = H5Aopen(tid2, BUG3_ATTR_NAME, H5P_DEFAULT);
+ CHECK(aidt2, FAIL, "H5Aopen");
+
+ /* Close all attributes */
+ ret = H5Aclose(aidg1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aidg2);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aidd1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aidd2);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aidt1);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Aclose(aidt2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close root groups */
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close datasets */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(did2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatypes */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close files */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_bug5() */
+
+/****************************************************************
+**
+** test_attr_bug6(): Test basic H5A (attribute) code.
+** Tests if reading an empty attribute is OK.
+**
+****************************************************************/
+static void
+test_attr_bug6(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t gid; /* Group ID */
+ hid_t aid1, aid2; /* Attribute IDs */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims[ATTR1_RANK] = {ATTR1_DIM1}; /* Attribute dimensions */
+ int intar[ATTR1_DIM1]; /* Data reading buffer */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing that empty attribute can be read\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Open root group */
+ gid = H5Gopen2(fid, "/", H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Create dataspace */
+ sid = H5Screate_simple(1, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create attribute on group */
+ aid1 = H5Acreate2(gid, ATTR1_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid1, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid1);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open the attribute again */
+ aid2 = H5Aopen(gid, ATTR1_NAME, H5P_DEFAULT);
+ CHECK(aid2, FAIL, "H5Aopen");
+
+ ret = H5Aread(aid2, H5T_NATIVE_INT, intar);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Close IDs */
+ ret = H5Aclose(aid2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_bug6() */
+
+/****************************************************************
+**
+** test_attr_bug7(): Test basic H5A (attribute) code.
+** (Really tests object header allocation code).
+** Tests creating and deleting attributes in such a way as
+** to change the size of the "chunk #0 size" field.
+** Includes testing "skipping" a possible size of the
+** field, i.e. going from 1 to 4 bytes or 4 to 1 byte.
+**
+****************************************************************/
+#if 0
+static void
+test_attr_bug7(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t aid; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hsize_t dims_s = 140; /* Small attribute dimensions */
+ hsize_t dims_l = 65480; /* Large attribute dimensions */
+ H5A_info_t ainfo; /* Attribute info */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing adding and deleting large attributes\n"));
+
+ /* Create committed datatype to operate on. Use a committed datatype so that
+ * there is nothing after the object header and the first chunk can expand and
+ * contract as necessary. */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+ tid = H5Tcopy(H5T_STD_I32LE);
+ CHECK(tid, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, TYPE1_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /*
+ * Create small attribute
+ */
+ sid = H5Screate_simple(1, &dims_s, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+ aid = H5Acreate2(tid, ATTR1_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close file */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Check attribute */
+ tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+ ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_s)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_s);
+
+ /*
+ * Create another small attribute. Should cause chunk size field to expand by
+ * 1 byte (1->2).
+ */
+ aid = H5Acreate2(tid, ATTR2_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close file */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Check attributes */
+ tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+ ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_s)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_s);
+ ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_s)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_s);
+
+ /*
+ * Create large attribute. Should cause chunk size field to expand by 2 bytes
+ * (2->4).
+ */
+ ret = H5Sset_extent_simple(sid, 1, &dims_l, NULL);
+ CHECK(ret, FAIL, "H5Sset_extent_simple");
+ aid = H5Acreate2(tid, ATTR3_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close file */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Check attributes */
+ tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+ ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_s)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_s);
+ ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_s)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_s);
+ ret = H5Aget_info_by_name(tid, ".", ATTR3_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_l)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_l);
+
+ /*
+ * Delete last two attributes - should merge into a null message that is too
+ * large, causing the chunk size field to shrink by 3 bytes (4->1).
+ */
+ ret = H5Sset_extent_simple(sid, 1, &dims_l, NULL);
+ CHECK(ret, FAIL, "H5Sset_extent_simple");
+ ret = H5Adelete(tid, ATTR2_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+ ret = H5Adelete(tid, ATTR3_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Check attribute */
+ tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+ ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_s)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_s);
+
+ /*
+ * Create large attribute. Should cause chunk size field to expand by 3 bytes
+ * (1->4).
+ */
+ aid = H5Acreate2(tid, ATTR2_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close file */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Check attributes */
+ tid = H5Topen2(fid, TYPE1_NAME, H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+ ret = H5Aget_info_by_name(tid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_s)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_s);
+ ret = H5Aget_info_by_name(tid, ".", ATTR2_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims_l)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims_l);
+
+ /* Close IDs */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_bug7() */
+#endif
+
+/****************************************************************
+**
+** test_attr_bug8(): Test basic H5A (attribute) code.
+** (Really tests object header code).
+** Tests adding a link and attribute to a group in such a
+** way as to cause the "chunk #0 size" field to expand
+** when some object header messages are not loaded into
+** cache. Before the bug was fixed, this would prevent
+** these messages from being shifted to the correct
+** position as the expansion algorithm marked them dirty,
+** invalidating the raw form, when there was no native
+** form to encode.
+**
+****************************************************************/
+static void
+test_attr_bug8(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t aid; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t gid; /* Group ID */
+ hid_t oid; /* Object ID */
+ hsize_t dims = 256; /* Attribute dimensions */
+ H5O_info2_t oinfo; /* Object info */
+ H5A_info_t ainfo; /* Attribute info */
+ H5O_token_t root_token; /* Root group token */
+ int cmp_value; /* Result from H5Otoken_cmp */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing attribute expanding object header with undecoded messages\n"));
+
+ /* Create committed datatype to operate on. Use a committed datatype so that
+ * there is nothing after the object header and the first chunk can expand and
+ * contract as necessary. */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+ gid = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Get root group token */
+ ret = H5Oget_info3(fid, &oinfo, H5O_INFO_BASIC);
+ CHECK(ret, FAIL, "H5Oget_info");
+ root_token = oinfo.token;
+
+ /*
+ * Create link to root group
+ */
+ ret = H5Lcreate_hard(fid, "/", gid, LINK1_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_hard");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Check link */
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+ oid = H5Oopen(gid, LINK1_NAME, H5P_DEFAULT);
+ CHECK(oid, FAIL, "H5Oopen");
+ ret = H5Oget_info3(oid, &oinfo, H5O_INFO_BASIC);
+ CHECK(ret, FAIL, "H5Oget_info");
+ ret = H5Otoken_cmp(oid, &oinfo.token, &root_token, &cmp_value);
+ CHECK(ret, FAIL, "H5Otoken_cmp");
+ VERIFY(cmp_value, 0, "H5Otoken_cmp");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Oclose(oid);
+ CHECK(ret, FAIL, "H5Oclose");
+
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /*
+ * Create attribute. Should cause chunk size field to expand by 1 byte
+ * (1->2).
+ */
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+ sid = H5Screate_simple(1, &dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+ aid = H5Acreate2(gid, ATTR1_NAME, H5T_STD_I8LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close file */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Check link and attribute */
+ gid = H5Gopen2(fid, GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+ oid = H5Oopen(gid, LINK1_NAME, H5P_DEFAULT);
+ CHECK(oid, FAIL, "H5Oopen");
+ ret = H5Oget_info3(oid, &oinfo, H5O_INFO_BASIC);
+ CHECK(ret, FAIL, "H5Oget_info");
+ ret = H5Otoken_cmp(oid, &oinfo.token, &root_token, &cmp_value);
+ CHECK(ret, FAIL, "H5Otoken_cmp");
+ VERIFY(cmp_value, 0, "H5Otoken_cmp");
+ ret = H5Aget_info_by_name(gid, ".", ATTR1_NAME, &ainfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Aget_info_by_name");
+ if (ainfo.data_size != dims)
+ TestErrPrintf("attribute data size different: data_size=%llu, should be %llu\n",
+ (long long unsigned)ainfo.data_size, (long long unsigned)dims);
+
+ /* Close IDs */
+ ret = H5Oclose(oid);
+ CHECK(ret, FAIL, "H5Oclose");
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_attr_bug8() */
+
+/****************************************************************
+**
+** test_attr_bug9(): Test basic H5A (attribute) code.
+** (Really tests object header code).
+** Tests adding several large attributes to an object until
+** they convert to dense storage. The total size of all
+** attributes is larger than 64K, causing the internal
+** object header code to, after merging the deleted
+** messages in to a NULL message, shrink the object header
+** chunk. Do this twice: once with only attributes in the
+** object header chunk and once with a (small) soft link in
+** the chunk as well. In both cases, the shrunk chunk will
+** initially be too small and a new NULL message must be
+** created.
+**
+****************************************************************/
+static void
+test_attr_bug9(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t gid = -1; /* Group ID */
+ hid_t aid = -1; /* Attribute ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hsize_t dims[1] = {32768}; /* Attribute dimensions */
+ int create_link; /* Whether to create a soft link */
+ unsigned max_compact; /* Setting from fcpl */
+ unsigned min_dense; /* Setting from fcpl */
+ char aname[11]; /* Attribute name */
+ unsigned i; /* Local index variable */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing that attributes can always be added to named datatypes\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(1, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Obtain attribute phase change settings */
+ ret = H5Pget_attr_phase_change(fcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Run with and without the soft link */
+ for (create_link = 0; create_link < 2; create_link++) {
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create second group */
+ gid = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Close second group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open root group */
+ gid = H5Gopen2(fid, "/", H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Create enough attributes to cause a change to dense storage */
+ for (i = 0; i < max_compact + 1; i++) {
+ /* Create attribute */
+ HDsnprintf(aname, sizeof(aname), "%u", i);
+ aid = H5Acreate2(gid, aname, H5T_NATIVE_CHAR, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create enough soft links that exactly one goes into chunk 1 if
+ * requested */
+ if (i == 0 && create_link) {
+ ret = H5Lcreate_soft("b", gid, "a", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_soft");
+ ret = H5Lcreate_soft("d", gid, "c", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_soft");
+ ret = H5Lcreate_soft("f", gid, "e", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_soft");
+ } /* end if */
+ } /* end for */
+
+ /* Close IDs */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_bug9() */
+
+/****************************************************************
+**
+** test_attr_bug10(): Test basic H5A (attribute) code.
+** Attempts to trigger a bug which would result in a
+** segfault. Create a vlen attribute through a file
+** handle, then open the same file through a different
+** handle, open the same attribute through the second file
+** handle, then close the second file and attribute
+** handles, then write to the attribute through the first
+** handle.
+**
+****************************************************************/
+static void
+test_attr_bug10(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid1, fid2; /* File IDs */
+ hid_t aid1, aid2; /* Attribute IDs */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hsize_t dims[1] = {1}; /* Attribute dimensions */
+ const char *wbuf[1] = {"foo"}; /* Write buffer */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing that vlen attributes can be written to after a second file handle is closed\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(1, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create VL string datatype */
+ tid = H5Tcopy(H5T_C_S1);
+ CHECK(tid, FAIL, "H5Tcreate");
+ ret = H5Tset_size(tid, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create attribute on root group */
+ aid1 = H5Acreate2(fid1, "attr", tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid1, FAIL, "H5Acreate2");
+
+ /* Open the same file again */
+ fid2 = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid2, FAIL, "H5Fcreate");
+
+ /* Open the same attribute through the second file handle */
+ aid2 = H5Aopen(fid2, "attr", H5P_DEFAULT);
+ CHECK(aid2, FAIL, "H5Aopen");
+
+ /* Close the second attribute and file handles */
+ ret = H5Aclose(aid2);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Write to the attribute through the first handle */
+ ret = H5Awrite(aid1, tid, wbuf);
+
+ /* Close IDs */
+ ret = H5Aclose(aid1);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_attr_bug10() */
+
+/****************************************************************
+**
+** test_attr_delete_dense():
+** This is to verify the error as described in HDFFV-9277
+** is fixed when deleting the last "large" attribute that
+** is stored densely.
+**
+****************************************************************/
+#if 0 /* Native VOL connector only supports large attributes with latest format */
+static void
+test_attr_delete_last_dense(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ hid_t gid; /* Group ID */
+ hid_t aid; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim2[2] = {DIM0, DIM1}; /* Dimension sizes */
+ int i, j; /* Local index variables */
+ double *data = NULL; /* Pointer to the data buffer */
+ herr_t ret; /* Generic return status */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Deleting the last large attribute stored densely\n"));
+
+ /* Create the file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create the group */
+ gid = H5Gcreate2(fid, GRPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate");
+
+ /* Create the dataspace */
+ sid = H5Screate_simple(RANK, dim2, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Attach the attribute to the group */
+ aid = H5Acreate2(gid, ATTRNAME, H5T_IEEE_F64LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Allocate the data buffer */
+ data = (double *)HDmalloc((size_t)(DIM0 * DIM1) * sizeof(double));
+ CHECK_PTR(data, "HDmalloc");
+
+ /* Initialize the data */
+ for (i = 0; i < DIM0; i++)
+ for (j = 0; j < DIM1; j++)
+ *(data + i * DIM1 + j) = i + j;
+
+ /* Write to the attribute */
+ ret = H5Awrite(aid, H5T_NATIVE_DOUBLE, data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Closing */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the group */
+ gid = H5Gopen2(fid, GRPNAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen");
+
+ /* Delete the attribute */
+ ret = H5Adelete(gid, ATTRNAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Closing */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free the data buffer */
+ if (data)
+ HDfree(data);
+
+} /* test_attr_delete_last_dense() */
+#endif
+
+/****************************************************************
+**
+** test_attr(): Main H5A (attribute) testing routine.
+**
+****************************************************************/
+void
+test_attr(void)
+{
+ hid_t fapl = (-1), fapl2 = (-1); /* File access property lists */
+ hid_t fcpl = (-1), fcpl2 = (-1); /* File creation property lists */
+ hid_t dcpl = -1; /* Dataset creation property list */
+ unsigned new_format; /* Whether to use the new format or not */
+ unsigned use_shared; /* Whether to use shared attributes or not */
+ unsigned minimize_dset_oh; /* Whether to use minimized dataset object headers */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(5, ("Testing Attributes\n"));
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* fapl2 uses "latest version of the format" for creating objects in the file */
+ fapl2 = H5Pcopy(fapl);
+ CHECK(fapl2, FAIL, "H5Pcopy");
+ ret = H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* files with fcpl2 make all attributes ( > 1 byte) shared
+ * (i.e. all of them :-) */
+ fcpl2 = H5Pcopy(fcpl);
+ CHECK(fcpl2, FAIL, "H5Pcopy");
+ ret = H5Pset_shared_mesg_nindexes(fcpl2, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl2, (unsigned)0, H5O_SHMESG_ATTR_FLAG, (unsigned)1);
+ CHECK_I(ret, "H5Pset_shared_mesg_index");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate");
+
+ ret = H5Pset_attr_creation_order(dcpl, H5P_CRT_ORDER_TRACKED);
+ CHECK(ret, FAIL, "");
+
+ dcpl_g = dcpl;
+
+ for (minimize_dset_oh = 0; minimize_dset_oh <= 1; minimize_dset_oh++) {
+ if (minimize_dset_oh != 0)
+ continue;
+
+#if 0
+ if (minimize_dset_oh == 0) {
+ MESSAGE(7, ("testing with default dataset object headers\n"));
+ dcpl_g = H5P_DEFAULT;
+ }
+ else {
+ MESSAGE(7, ("testing with minimzied dataset object headers\n"));
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_dset_no_attrs_hint(dcpl, TRUE);
+ CHECK_I(ret, "H5Pset_dset_no_attrs_hint");
+ dcpl_g = dcpl;
+ }
+#endif
+
+ for (new_format = FALSE; new_format <= TRUE; new_format++) {
+ hid_t my_fapl = fapl;
+
+ if (new_format)
+ continue;
+
+#if 0
+ /* Set the FAPL for the type of format */
+ if (new_format) {
+ MESSAGE(7, ("testing with new file format\n"));
+ my_fapl = fapl2;
+ }
+ else {
+ MESSAGE(7, ("testing with old file format\n"));
+ my_fapl = fapl;
+ }
+#endif
+
+ /* These next two tests use the same file information */
+ test_attr_basic_write(my_fapl); /* Test basic H5A writing code */
+ test_attr_basic_read(my_fapl); /* Test basic H5A reading code */
+
+ /* These next two tests use their own file information */
+ test_attr_flush(my_fapl); /* Test H5A I/O in the presence of H5Fflush calls */
+ test_attr_plist(my_fapl); /* Test attribute property lists */
+
+ /* These next two tests use the same file information */
+ test_attr_compound_write(my_fapl); /* Test complex datatype H5A writing code */
+ test_attr_compound_read(my_fapl); /* Test complex datatype H5A reading code */
+
+ /* These next two tests use the same file information */
+ test_attr_scalar_write(my_fapl); /* Test scalar dataspace H5A writing code */
+ test_attr_scalar_read(my_fapl); /* Test scalar dataspace H5A reading code */
+
+ /* These next four tests use the same file information */
+ test_attr_mult_write(my_fapl); /* Test H5A writing code for multiple attributes */
+ test_attr_mult_read(my_fapl); /* Test H5A reading code for multiple attributes */
+ test_attr_iterate(my_fapl); /* Test H5A iterator code */
+ test_attr_delete(my_fapl); /* Test H5A code for deleting attributes */
+
+ /* This next test uses its own file information */
+ test_attr_dtype_shared(my_fapl); /* Test using shared dataypes in attributes */
+
+ /* This next test uses its own file information */
+ test_attr_duplicate_ids(my_fapl);
+
+ for (use_shared = FALSE; use_shared <= TRUE; use_shared++) {
+ hid_t my_fcpl;
+
+ if (new_format == TRUE && use_shared) {
+ MESSAGE(7, ("testing with shared attributes\n"));
+ my_fcpl = fcpl2;
+ }
+ else {
+ MESSAGE(7, ("testing without shared attributes\n"));
+ my_fcpl = fcpl;
+ }
+
+ test_attr_big(my_fcpl, my_fapl); /* Test storing big attribute */
+ test_attr_null_space(my_fcpl, my_fapl); /* Test storing attribute with NULL dataspace */
+ test_attr_deprec(fcpl, my_fapl); /* Test deprecated API routines */
+ test_attr_many(new_format, my_fcpl, my_fapl); /* Test storing lots of attributes */
+ test_attr_info_null_info_pointer(my_fcpl,
+ my_fapl); /* Test passing a NULL attribute info pointer to
+ H5Aget_info(_by_name/_by_idx) */
+ test_attr_rename_invalid_name(
+ my_fcpl,
+ my_fapl); /* Test passing a NULL or empty attribute name to H5Arename(_by_name) */
+ test_attr_get_name_invalid_buf(
+ my_fcpl, my_fapl); /* Test passing NULL buffer to H5Aget_name(_by_idx) */
+
+ /* New attribute API routine tests */
+ test_attr_info_by_idx(new_format, my_fcpl,
+ my_fapl); /* Test querying attribute info by index */
+ test_attr_delete_by_idx(new_format, my_fcpl, my_fapl); /* Test deleting attribute by index */
+ test_attr_iterate2(new_format, my_fcpl,
+ my_fapl); /* Test iterating over attributes by index */
+ test_attr_open_by_idx(new_format, my_fcpl, my_fapl); /* Test opening attributes by index */
+ test_attr_open_by_name(new_format, my_fcpl, my_fapl); /* Test opening attributes by name */
+ test_attr_create_by_name(new_format, my_fcpl, my_fapl); /* Test creating attributes by name */
+
+ /* Tests that address specific bugs */
+ test_attr_bug1(my_fcpl, my_fapl); /* Test odd allocation operations */
+ test_attr_bug2(my_fcpl, my_fapl); /* Test many deleted attributes */
+ test_attr_bug3(my_fcpl, my_fapl); /* Test "self referential" attributes */
+ test_attr_bug4(my_fcpl, my_fapl); /* Test attributes on named datatypes */
+ test_attr_bug5(my_fcpl,
+ my_fapl); /* Test opening/closing attributes through different file handles */
+ test_attr_bug6(my_fcpl, my_fapl); /* Test reading empty attribute */
+ /* test_attr_bug7 is specific to the "new" object header format,
+ * and in fact fails if used with the old format due to the
+ * attributes being larger than 64K */
+ test_attr_bug8(my_fcpl,
+ my_fapl); /* Test attribute expanding object header with undecoded messages */
+ test_attr_bug9(my_fcpl, my_fapl); /* Test large attributes converting to dense storage */
+ test_attr_bug10(my_fcpl, my_fapl); /* Test writing an attribute after opening and closing
+ through a different file handle */
+
+ /* tests specific to the "new format" */
+ if (new_format == TRUE) {
+ /* General attribute tests */
+ test_attr_dense_create(my_fcpl, my_fapl); /* Test dense attribute storage creation */
+ test_attr_dense_open(my_fcpl, my_fapl); /* Test opening attributes in dense storage */
+ test_attr_dense_delete(my_fcpl, my_fapl); /* Test deleting attributes in dense storage */
+ test_attr_dense_rename(my_fcpl, my_fapl); /* Test renaming attributes in dense storage */
+ test_attr_dense_unlink(
+ my_fcpl, my_fapl); /* Test unlinking object with attributes in dense storage */
+ test_attr_dense_limits(my_fcpl, my_fapl); /* Test dense attribute storage limits */
+ test_attr_dense_dup_ids(my_fcpl,
+ my_fapl); /* Test duplicated IDs for dense attribute storage */
+
+ /* Attribute creation order tests */
+ test_attr_corder_create_basic(
+ my_fcpl, my_fapl); /* Test creating an object w/attribute creation order info */
+ test_attr_corder_create_compact(my_fcpl,
+ my_fapl); /* Test compact attribute storage on an object
+ w/attribute creation order info */
+ test_attr_corder_create_dense(my_fcpl,
+ my_fapl); /* Test dense attribute storage on an object
+ w/attribute creation order info */
+ test_attr_corder_create_reopen(my_fcpl,
+ my_fapl); /* Test creating attributes w/reopening file from
+ using new format to using old format */
+ test_attr_corder_transition(my_fcpl,
+ my_fapl); /* Test attribute storage transitions on an object
+ w/attribute creation order info */
+ test_attr_corder_delete(my_fcpl, my_fapl); /* Test deleting object using dense storage
+ w/attribute creation order info */
+
+ /* More complex tests with exclusively both "new format" and "shared" attributes */
+ if (use_shared == TRUE) {
+ test_attr_shared_write(
+ my_fcpl,
+ my_fapl); /* Test writing to shared attributes in compact & dense storage */
+ test_attr_shared_rename(
+ my_fcpl,
+ my_fapl); /* Test renaming shared attributes in compact & dense storage */
+ test_attr_shared_delete(
+ my_fcpl,
+ my_fapl); /* Test deleting shared attributes in compact & dense storage */
+ test_attr_shared_unlink(my_fcpl, my_fapl); /* Test unlinking object with shared
+ attributes in compact & dense storage */
+ } /* if using shared attributes */
+
+#if 0 /* Native VOL connector only supports large attributes with latest format */
+ test_attr_delete_last_dense(my_fcpl, my_fapl);
+
+ /* test_attr_bug7 is specific to the "new" object header format,
+ * and in fact fails if used with the old format due to the
+ * attributes being larger than 64K */
+ test_attr_bug7(my_fcpl,
+ my_fapl); /* Test creating and deleting large attributes in ohdr chunk 0 */
+#endif
+
+ } /* if using "new format" */
+ } /* for unshared/shared attributes */
+ } /* for old/new format */
+
+ if (minimize_dset_oh != 0) {
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ dcpl_g = H5P_DEFAULT;
+ }
+
+ } /* for default/minimized dataset object headers */
+
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close FCPLs */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fcpl2);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close FAPLs */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl2);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* test_attr() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_attr
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Albert Cheng
+ * July 2, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_attr(void)
+{
+ H5Fdelete(FILENAME, H5P_DEFAULT);
+}
diff --git a/test/API/tchecksum.c b/test/API/tchecksum.c
new file mode 100644
index 0000000..a77ffcd
--- /dev/null
+++ b/test/API/tchecksum.c
@@ -0,0 +1,251 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: tchecksum.c
+ * Aug 21 2006
+ * Quincey Koziol
+ *
+ * Purpose: Test internal checksum routine(s)
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+#include "testhdf5.h"
+
+/**********/
+/* Macros */
+/**********/
+#define BUF_LEN 3093 /* No particular value */
+
+/*******************/
+/* Local variables */
+/*******************/
+
+/****************************************************************
+**
+** test_chksum_size_one(): Checksum 1 byte buffer
+**
+****************************************************************/
+static void
+test_chksum_size_one(void)
+{
+ uint8_t buf[1] = {23}; /* Buffer to checksum */
+ uint32_t chksum; /* Checksum value */
+
+ /* Buffer w/real data */
+ chksum = H5_checksum_fletcher32(buf, sizeof(buf));
+ VERIFY(chksum, 0x17001700, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(buf, sizeof(buf));
+ VERIFY(chksum, 0xfa2568b7, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(buf, sizeof(buf), 0);
+ VERIFY(chksum, 0xa209c931, "H5_checksum_lookup3");
+
+ /* Buffer w/zero(s) for data */
+ HDmemset(buf, 0, sizeof(buf));
+ chksum = H5_checksum_fletcher32(buf, sizeof(buf));
+ VERIFY(chksum, 0, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(buf, sizeof(buf));
+ VERIFY(chksum, 0xfa60fb57, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(buf, sizeof(buf), 0);
+ VERIFY(chksum, 0x8ba9414b, "H5_checksum_lookup3");
+} /* test_chksum_size_one() */
+
+/****************************************************************
+**
+** test_chksum_size_two(): Checksum 2 byte buffer
+**
+****************************************************************/
+static void
+test_chksum_size_two(void)
+{
+ uint8_t buf[2] = {23, 187}; /* Buffer to checksum */
+ uint32_t chksum; /* Checksum value */
+
+ /* Buffer w/real data */
+ chksum = H5_checksum_fletcher32(buf, sizeof(buf));
+ VERIFY(chksum, 0x17bb17bb, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(buf, sizeof(buf));
+ VERIFY(chksum, 0xfc856608, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(buf, sizeof(buf), 0);
+ VERIFY(chksum, 0x8ba7a6c9, "H5_checksum_lookup3");
+
+ /* Buffer w/zero(s) for data */
+ HDmemset(buf, 0, sizeof(buf));
+ chksum = H5_checksum_fletcher32(buf, sizeof(buf));
+ VERIFY(chksum, 0, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(buf, sizeof(buf));
+ VERIFY(chksum, 0xfc7e9b20, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(buf, sizeof(buf), 0);
+ VERIFY(chksum, 0x62cd61b3, "H5_checksum_lookup3");
+} /* test_chksum_size_two() */
+
+/****************************************************************
+**
+** test_chksum_size_three(): Checksum 3 byte buffer
+**
+****************************************************************/
+static void
+test_chksum_size_three(void)
+{
+ uint8_t buf[3] = {23, 187, 98}; /* Buffer to checksum */
+ uint32_t chksum; /* Checksum value */
+
+ /* Buffer w/real data */
+ chksum = H5_checksum_fletcher32(buf, sizeof(buf));
+ VERIFY(chksum, 0x917679bb, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(buf, sizeof(buf));
+ VERIFY(chksum, 0xfebc5d70, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(buf, sizeof(buf), 0);
+ VERIFY(chksum, 0xcebdf4f0, "H5_checksum_lookup3");
+
+ /* Buffer w/zero(s) for data */
+ HDmemset(buf, 0, sizeof(buf));
+ chksum = H5_checksum_fletcher32(buf, sizeof(buf));
+ VERIFY(chksum, 0, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(buf, sizeof(buf));
+ VERIFY(chksum, 0xf9cc4c7a, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(buf, sizeof(buf), 0);
+ VERIFY(chksum, 0x6bd0060f, "H5_checksum_lookup3");
+} /* test_chksum_size_three() */
+
+/****************************************************************
+**
+** test_chksum_size_four(): Checksum 4 byte buffer
+**
+****************************************************************/
+static void
+test_chksum_size_four(void)
+{
+ uint8_t buf[4] = {23, 187, 98, 217}; /* Buffer to checksum */
+ uint32_t chksum; /* Checksum value */
+
+ /* Buffer w/real data */
+ chksum = H5_checksum_fletcher32(buf, sizeof(buf));
+ VERIFY(chksum, 0x924f7a94, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(buf, sizeof(buf));
+ VERIFY(chksum, 0xff398a46, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(buf, sizeof(buf), 0);
+ VERIFY(chksum, 0x2c88bb51, "H5_checksum_lookup3");
+
+ /* Buffer w/zero(s) for data */
+ HDmemset(buf, 0, sizeof(buf));
+ chksum = H5_checksum_fletcher32(buf, sizeof(buf));
+ VERIFY(chksum, 0, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(buf, sizeof(buf));
+ VERIFY(chksum, 0xff117081, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(buf, sizeof(buf), 0);
+ VERIFY(chksum, 0x049396b8, "H5_checksum_lookup3");
+} /* test_chksum_size_four() */
+
+/****************************************************************
+**
+** test_chksum_large(): Checksum larger buffer
+**
+****************************************************************/
+static void
+test_chksum_large(void)
+{
+ uint8_t *large_buf; /* Buffer for checksum calculations */
+ uint32_t chksum; /* Checksum value */
+ size_t u; /* Local index variable */
+
+ /* Allocate the buffer */
+ large_buf = (uint8_t *)HDmalloc((size_t)BUF_LEN);
+ CHECK_PTR(large_buf, "HDmalloc");
+
+ /* Initialize buffer w/known data */
+ for (u = 0; u < BUF_LEN; u++)
+ large_buf[u] = (uint8_t)(u * 3);
+
+ /* Buffer w/real data */
+ chksum = H5_checksum_fletcher32(large_buf, (size_t)BUF_LEN);
+ VERIFY(chksum, 0x85b4e2a, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(large_buf, (size_t)BUF_LEN);
+ VERIFY(chksum, 0xfbd0f7c0, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(large_buf, (size_t)BUF_LEN, 0);
+ VERIFY(chksum, 0x1bd2ee7b, "H5_checksum_lookup3");
+
+ /* Buffer w/zero(s) for data */
+ HDmemset(large_buf, 0, (size_t)BUF_LEN);
+ chksum = H5_checksum_fletcher32(large_buf, (size_t)BUF_LEN);
+ VERIFY(chksum, 0, "H5_checksum_fletcher32");
+
+ chksum = H5_checksum_crc(large_buf, (size_t)BUF_LEN);
+ VERIFY(chksum, 0xfac8b4c4, "H5_checksum_crc");
+
+ chksum = H5_checksum_lookup3(large_buf, (size_t)BUF_LEN, 0);
+ VERIFY(chksum, 0x930c7afc, "H5_checksum_lookup3");
+
+ /* Release memory for buffer */
+ HDfree(large_buf);
+} /* test_chksum_large() */
+
+/****************************************************************
+**
+** test_checksum(): Main checksum testing routine.
+**
+****************************************************************/
+void
+test_checksum(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing checksum algorithms\n"));
+
+ /* Various checks for fletcher32 checksum algorithm */
+ test_chksum_size_one(); /* Test buffer w/only 1 byte */
+ test_chksum_size_two(); /* Test buffer w/only 2 bytes */
+ test_chksum_size_three(); /* Test buffer w/only 3 bytes */
+ test_chksum_size_four(); /* Test buffer w/only 4 bytes */
+ test_chksum_large(); /* Test buffer w/larger # of bytes */
+
+} /* test_checksum() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_checksum
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Quincey Koziol
+ * August 21, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_checksum(void)
+{
+ /* no file to clean */
+}
diff --git a/test/API/tconfig.c b/test/API/tconfig.c
new file mode 100644
index 0000000..fdab5ef
--- /dev/null
+++ b/test/API/tconfig.c
@@ -0,0 +1,199 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tconfig
+ *
+ * Test the definitions in the H5config.h as much as possible
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+
+/* macros definitions */
+/* verify C int type: verify the size of signed and unsigned int type
+ * with the macro size.
+ */
+#define vrfy_cint_type(ctype, uctype, ctype_macro) \
+ /* check signed type size */ \
+ vrfy_macrosize(ctype, ctype_macro, #ctype_macro); \
+ /* check unsigned type size */ \
+ vrfy_macrosize(uctype, ctype_macro, #ctype_macro);
+
+/* verify C type sizes: verify the sizeof type with the macro size. */
+#define vrfy_ctype(type, macro) vrfy_macrosize(type, macro, #macro);
+
+/* verify if the sizeof(type) matches size defined in macro. */
+/* Needs this extra step so that we can print the macro name. */
+#define vrfy_macrosize(type, macro, macroname) \
+ if (sizeof(type) != (macro)) \
+ TestErrPrintf("Error: sizeof(%s) is %zu but %s is %d\n", #type, sizeof(type), macroname, \
+ (int)(macro));
+
+/* local routine prototypes */
+void test_config_ctypes(void);
+void test_exit_definitions(void);
+
+/*-------------------------------------------------------------------------
+ * Function: test_configure
+ *
+ * Purpose: Main configure definitions testing routine
+ *
+ * Return: none (error is fed back via global variable num_errs)
+ *
+ * Programmer: Albert Cheng
+ * September 25, 2001
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+test_configure(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing configure definitions\n"));
+ test_config_ctypes();
+ test_exit_definitions();
+}
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_configure
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Albert Cheng
+ * September 25, 2001
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_configure(void)
+{
+ /* no file to clean */
+}
+
+/*-------------------------------------------------------------------------
+ * Function: test_config_ctypes
+ *
+ * Purpose: test C language data type sizes
+ *
+ * Return: none (error is fed back via global variable num_errs)
+ *
+ * Programmer: Albert Cheng
+ * September 25, 2001
+ *
+ * Modifications:
+ * Albert Cheng, 2004/10/14
+ * Verified both signed and unsigned int types.
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+test_config_ctypes(void)
+{
+ /* standard C89 basic types */
+ /* char, signed char, unsigned char are three distinct types. */
+ vrfy_ctype(char, H5_SIZEOF_CHAR);
+ vrfy_cint_type(signed char, unsigned char, H5_SIZEOF_CHAR);
+ vrfy_cint_type(int, unsigned int, H5_SIZEOF_INT);
+ vrfy_cint_type(short, unsigned short, H5_SIZEOF_SHORT);
+ vrfy_cint_type(long, unsigned long, H5_SIZEOF_LONG);
+ vrfy_ctype(float, H5_SIZEOF_FLOAT);
+ vrfy_ctype(double, H5_SIZEOF_DOUBLE);
+ vrfy_ctype(long double, H5_SIZEOF_LONG_DOUBLE);
+
+ /* standard C99 basic types */
+ vrfy_cint_type(long long, unsigned long long, H5_SIZEOF_LONG_LONG);
+ vrfy_cint_type(int8_t, uint8_t, H5_SIZEOF_INT8_T);
+ vrfy_cint_type(int16_t, uint16_t, H5_SIZEOF_INT16_T);
+ vrfy_cint_type(int32_t, uint32_t, H5_SIZEOF_INT32_T);
+ vrfy_cint_type(int64_t, uint64_t, H5_SIZEOF_INT64_T);
+
+ /* Some vendors have different sizes for the signed and unsigned */
+ /* fast8_t. Need to check them individually. */
+#if H5_SIZEOF_INT_FAST8_T > 0
+ vrfy_ctype(int_fast8_t, H5_SIZEOF_INT_FAST8_T);
+#endif
+
+#if H5_SIZEOF_UINT_FAST8_T > 0
+ vrfy_ctype(uint_fast8_t, H5_SIZEOF_UINT_FAST8_T);
+#endif
+
+#if H5_SIZEOF_INT_FAST16_T > 0
+ vrfy_cint_type(int_fast16_t, uint_fast16_t, H5_SIZEOF_INT_FAST16_T);
+#endif
+
+#if H5_SIZEOF_INT_FAST32_T > 0
+ vrfy_cint_type(int_fast32_t, uint_fast32_t, H5_SIZEOF_INT_FAST32_T);
+#endif
+
+#if H5_SIZEOF_INT_FAST64_T > 0
+ vrfy_cint_type(int_fast64_t, uint_fast64_t, H5_SIZEOF_INT_FAST64_T);
+#endif
+
+#if H5_SIZEOF_INT_LEAST8_T > 0
+ vrfy_cint_type(int_least8_t, uint_least8_t, H5_SIZEOF_INT_LEAST8_T);
+#endif
+
+#if H5_SIZEOF_INT_LEAST16_T > 0
+ vrfy_cint_type(int_least16_t, uint_least16_t, H5_SIZEOF_INT_LEAST16_T);
+#endif
+
+#if H5_SIZEOF_INT_LEAST32_T > 0
+ vrfy_cint_type(int_least32_t, uint_least32_t, H5_SIZEOF_INT_LEAST32_T);
+#endif
+
+#if H5_SIZEOF_INT_LEAST64_T > 0
+ vrfy_cint_type(int_least64_t, uint_least64_t, H5_SIZEOF_INT_LEAST64_T);
+#endif
+
+#if H5_SIZEOF_OFF_T > 0
+ vrfy_ctype(off_t, H5_SIZEOF_OFF_T);
+#endif
+
+#if H5_SIZEOF_SIZE_T > 0
+ vrfy_ctype(size_t, H5_SIZEOF_SIZE_T);
+#endif
+
+#if H5_SIZEOF_SSIZE_T > 0
+ vrfy_ctype(ssize_t, H5_SIZEOF_SSIZE_T);
+#endif
+}
+
+/*-------------------------------------------------------------------------
+ * Function: test_exit_definitions
+ *
+ * Purpose: test the exit macros values
+ *
+ * Return: none (error is fed back via global variable num_errs)
+ *
+ * Programmer: Albert Cheng
+ * October 12, 2009
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+test_exit_definitions(void)
+{
+ /* Verify the EXIT_SUCCESS and EXIT_FAILURE are 0 and 1 respectively. */
+ /* This should be true for POSIX compliant systems. */
+ if (EXIT_SUCCESS != 0)
+ TestErrPrintf("Error: EXIT_SUCCESS is %d, should be %d\n", EXIT_SUCCESS, 0);
+ if (EXIT_FAILURE != 1)
+ TestErrPrintf("Error: EXIT_FAILURE is %d, should be %d\n", EXIT_FAILURE, 1);
+}
diff --git a/test/API/tcoords.c b/test/API/tcoords.c
new file mode 100644
index 0000000..9c66b40
--- /dev/null
+++ b/test/API/tcoords.c
@@ -0,0 +1,724 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tcoords
+ *
+ * Test the element coordinates for dataspace selection. For
+ * chunked dataset, when the hyperslab selection of some
+ * dimensions is full, the library optimize it by "flattening"
+ * the fully selected dimensions. This program tests if the
+ * coordinates of selected elements are correctly calculated.
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+
+#define FILENAME "coord.h5"
+
+#define SINGLE_END_DSET "single_end_dset"
+#define MULTI_ENDS_SEL_HYPER_DSET "multiple_ends_dset"
+
+#define NAME_LEN 128
+
+/* Data written to the dataset for single block test. Global variable
+ * for convenience. */
+int da_buffer[2][3][6][2];
+
+/***********************************************************
+**
+** test_singleEnd_selElements(): Test element selection of only
+** one block.
+**
+*************************************************************/
+static void
+test_singleEnd_selElements(hid_t file, hbool_t is_chunked)
+{
+ hid_t sid, plid, did, msid;
+ char dset_name[NAME_LEN]; /* Dataset name */
+ size_t elmts_numb;
+ herr_t ret; /* Generic error return */
+ int i, j, k;
+ hsize_t da_dims[4] = {2, 3, 6, 2};
+ hsize_t da_chunksize[4] = {1, 3, 3, 2};
+
+ /* For testing the full selection in the fastest-growing end */
+ int mem1_buffer[1][1][6][2];
+ hsize_t mem1_dims[4] = {1, 1, 6, 2};
+ hsize_t da_elmts1[12][4] = {{0, 0, 0, 0}, {0, 0, 0, 1}, {0, 0, 1, 0}, {0, 0, 1, 1},
+ {0, 0, 2, 0}, {0, 0, 2, 1}, {0, 0, 3, 0}, {0, 0, 3, 1},
+ {0, 0, 4, 0}, {0, 0, 4, 1}, {0, 0, 5, 0}, {0, 0, 5, 1}};
+
+ /* For testing the full selection in the slowest-growing end */
+ int mem2_buffer[2][3][1][1];
+ hsize_t mem2_dims[4] = {2, 3, 1, 1};
+ hsize_t da_elmts2[6][4] = {{0, 0, 0, 0}, {0, 1, 0, 0}, {0, 2, 0, 0},
+ {1, 0, 0, 0}, {1, 1, 0, 0}, {1, 2, 0, 0}};
+
+ /* For testing the full selection in the middle dimensions */
+ int mem3_buffer[1][3][6][1];
+ hsize_t mem3_dims[4] = {1, 3, 6, 1};
+ hsize_t da_elmts3[18][4] = {{0, 0, 0, 0}, {0, 0, 1, 0}, {0, 0, 2, 0}, {0, 0, 3, 0}, {0, 0, 4, 0},
+ {0, 0, 5, 0}, {0, 1, 0, 0}, {0, 1, 1, 0}, {0, 1, 2, 0}, {0, 1, 3, 0},
+ {0, 1, 4, 0}, {0, 1, 5, 0}, {0, 2, 0, 0}, {0, 2, 1, 0}, {0, 2, 2, 0},
+ {0, 2, 3, 0}, {0, 2, 4, 0}, {0, 2, 5, 0}};
+
+ /* Create and write the dataset */
+ sid = H5Screate_simple(4, da_dims, da_dims);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ plid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plid, FAIL, "H5Pcreate");
+
+ if (is_chunked) {
+ ret = H5Pset_chunk(plid, 4, da_chunksize);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ }
+
+ /* Construct dataset's name */
+ HDmemset(dset_name, 0, (size_t)NAME_LEN);
+ HDstrcat(dset_name, SINGLE_END_DSET);
+ if (is_chunked)
+ HDstrcat(dset_name, "_chunked");
+
+ did = H5Dcreate2(file, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, plid, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Initialize the data to be written to file */
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 3; j++) {
+ for (k = 0; k < 6; k++) {
+ da_buffer[i][j][k][0] = i * 100 + j * 10 + k;
+ da_buffer[i][j][k][1] = i * 100 + j * 10 + k + 1;
+ }
+ }
+ }
+
+ ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, da_buffer);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* ****** Case 1: ******
+ * Testing the full selection in the fastest-growing end */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ elmts_numb = 12;
+
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Dataspace for memory buffer */
+ msid = H5Screate_simple(4, mem1_dims, mem1_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 2; j++)
+ if (da_buffer[0][0][i][j] != mem1_buffer[0][0][i][j]) {
+ TestErrPrintf("%u: Read different values than written at index 0,0,%d,%d\n", __LINE__, i, j);
+ }
+
+ /* ****** Case 2: ******
+ * Testing the full selection in the slowest-growing end */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ elmts_numb = 6;
+
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Dataspace for memory buffer */
+ msid = H5Screate_simple(4, mem2_dims, mem2_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 3; j++)
+ if (da_buffer[i][j][0][0] != mem2_buffer[i][j][0][0]) {
+ TestErrPrintf("%u: Read different values than written at index %d,%d,0,0, da_buffer = %d, "
+ "mem2_buffer = %d\n",
+ __LINE__, i, j, da_buffer[i][j][0][0], mem2_buffer[i][j][0][0]);
+ }
+
+ /* ****** Case 3: ******
+ * Testing the full selection in the middle dimensions */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ elmts_numb = 18;
+
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, elmts_numb, (const hsize_t *)da_elmts3);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Dataspace for memory buffer */
+ msid = H5Screate_simple(4, mem3_dims, mem3_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 6; j++)
+ if (da_buffer[0][i][j][0] != mem3_buffer[0][i][j][0]) {
+ TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0\n", __LINE__, i, j);
+ }
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Pclose(plid);
+ CHECK(ret, FAIL, "H5Pclose");
+}
+
+/***********************************************************
+**
+** test_singleEnd_selHyperslab(): Test full hyperslab selection
+** of only one block.
+**
+*************************************************************/
+static void
+test_singleEnd_selHyperslab(hid_t file, hbool_t is_chunked)
+{
+ hid_t sid, did, msid;
+ char dset_name[NAME_LEN]; /* Dataset name */
+ herr_t ret; /* Generic error return */
+ int i, j;
+ hsize_t da_dims[4] = {2, 3, 6, 2};
+
+ /* For testing the full selection in the fastest-growing end */
+ int mem1_buffer[1][1][6][2];
+ hsize_t mem1_dims[4] = {1, 1, 6, 2};
+ hsize_t mem1_start[4] = {0, 0, 0, 0};
+ hsize_t mem1_count[4] = {1, 1, 1, 1};
+ hsize_t mem1_stride[4] = {1, 1, 1, 1};
+ hsize_t mem1_block[4] = {1, 1, 6, 2};
+
+ /* For testing the full selection in the slowest-growing end */
+ int mem2_buffer[2][3][1][1];
+ hsize_t mem2_dims[4] = {2, 3, 1, 1};
+ hsize_t mem2_start[4] = {0, 0, 0, 0};
+ hsize_t mem2_count[4] = {1, 1, 1, 1};
+ hsize_t mem2_stride[4] = {1, 1, 1, 1};
+ hsize_t mem2_block[4] = {2, 3, 1, 1};
+
+ /* For testing the full selection in the middle dimensions */
+ int mem3_buffer[1][3][6][1];
+ hsize_t mem3_dims[4] = {1, 3, 6, 1};
+ hsize_t mem3_start[4] = {0, 0, 0, 0};
+ hsize_t mem3_count[4] = {1, 1, 1, 1};
+ hsize_t mem3_stride[4] = {1, 1, 1, 1};
+ hsize_t mem3_block[4] = {1, 3, 6, 1};
+
+ /* Construct dataset's name */
+ HDmemset(dset_name, 0, NAME_LEN);
+ HDstrcat(dset_name, SINGLE_END_DSET);
+ if (is_chunked)
+ HDstrcat(dset_name, "_chunked");
+
+ /* Dataspace for the dataset in file */
+ sid = H5Screate_simple(4, da_dims, da_dims);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* ****** Case 1: ******
+ * Testing the full selection in the fastest-growing end */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem1_start, mem1_stride, mem1_count, mem1_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Dataspace for memory buffer */
+ msid = H5Screate_simple(4, mem1_dims, mem1_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 2; j++)
+ if (da_buffer[0][0][i][j] != mem1_buffer[0][0][i][j]) {
+ TestErrPrintf("%u: Read different values than written at index 0,0,%d,%d\n", __LINE__, i, j);
+ }
+
+ /* ****** Case 2: ******
+ * Testing the full selection in the slowest-growing end */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem2_start, mem2_stride, mem2_count, mem2_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Dataspace for memory buffer */
+ msid = H5Screate_simple(4, mem2_dims, mem2_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 3; j++)
+ if (da_buffer[i][j][0][0] != mem2_buffer[i][j][0][0]) {
+ TestErrPrintf("%u: Read different values than written at index %d,%d,0,0\n", __LINE__, i, j);
+ }
+
+ /* ****** Case 3: ******
+ * Testing the full selection in the middle dimensions */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem3_start, mem3_stride, mem3_count, mem3_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Dataspace for memory buffer */
+ msid = H5Screate_simple(4, mem3_dims, mem3_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 6; j++)
+ if (da_buffer[0][i][j][0] != mem3_buffer[0][i][j][0]) {
+ TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0\n", __LINE__, i, j);
+ }
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+}
+
+/***********************************************************
+**
+** test_multiple_end(): Test full hyperslab selection of
+** multiple blocks.
+**
+*************************************************************/
+static void
+test_multiple_ends(hid_t file, hbool_t is_chunked)
+{
+ hid_t sid, plid, did, msid;
+ char dset_name[NAME_LEN]; /* Dataset name */
+ herr_t ret; /* Generic error return */
+ int i, j, k, l, m, n, p;
+ hsize_t da_dims[8] = {4, 5, 3, 4, 2, 3, 6, 2};
+ hsize_t da_chunksize[8] = {1, 5, 3, 2, 2, 3, 3, 2};
+ struct {
+ int arr[4][5][3][4][2][3][6][2];
+ } *data_buf = NULL;
+
+ /* For testing the full selections in the fastest-growing end and in the middle dimensions */
+ struct {
+ int arr[1][1][1][4][2][1][6][2];
+ } *mem1_buffer = NULL;
+ hsize_t mem1_dims[8] = {1, 1, 1, 4, 2, 1, 6, 2};
+ hsize_t mem1_start[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ hsize_t mem1_count[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem1_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem1_block[8] = {1, 1, 1, 4, 2, 1, 6, 2};
+
+ /* For testing the full selections in the slowest-growing end and in the middle dimensions */
+ struct {
+ int arr[4][5][1][4][2][1][1][1];
+ } *mem2_buffer = NULL;
+ hsize_t mem2_dims[8] = {4, 5, 1, 4, 2, 1, 1, 1};
+ hsize_t mem2_start[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ hsize_t mem2_count[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem2_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem2_block[8] = {4, 5, 1, 4, 2, 1, 1, 1};
+
+ /* For testing two unadjacent full selections in the middle dimensions */
+ struct {
+ int arr[1][5][3][1][1][3][6][1];
+ } *mem3_buffer = NULL;
+ hsize_t mem3_dims[8] = {1, 5, 3, 1, 1, 3, 6, 1};
+ hsize_t mem3_start[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ hsize_t mem3_count[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem3_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem3_block[8] = {1, 5, 3, 1, 1, 3, 6, 1};
+
+ /* For testing the full selections in the fastest-growing end and the slowest-growing end */
+ struct {
+ int arr[4][5][1][1][1][1][6][2];
+ } *mem4_buffer = NULL;
+ hsize_t mem4_dims[8] = {4, 5, 1, 1, 1, 1, 6, 2};
+ hsize_t mem4_start[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ hsize_t mem4_count[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem4_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem4_block[8] = {4, 5, 1, 1, 1, 1, 6, 2};
+
+ /* For testing the full selections in the fastest-growing end and slowest-growing end,
+ * also in the middle dimensions */
+ struct {
+ int arr[4][5][1][4][2][1][6][2];
+ } *mem5_buffer = NULL;
+ hsize_t mem5_dims[8] = {4, 5, 1, 4, 2, 1, 6, 2};
+ hsize_t mem5_start[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ hsize_t mem5_count[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem5_stride[8] = {1, 1, 1, 1, 1, 1, 1, 1};
+ hsize_t mem5_block[8] = {4, 5, 1, 4, 2, 1, 6, 2};
+
+ /* Initialize dynamic arrays */
+ data_buf = HDcalloc(1, sizeof(*data_buf));
+ CHECK_PTR(data_buf, "HDcalloc");
+ mem1_buffer = HDcalloc(1, sizeof(*mem1_buffer));
+ CHECK_PTR(data_buf, "HDcalloc");
+ mem2_buffer = HDcalloc(1, sizeof(*mem2_buffer));
+ CHECK_PTR(data_buf, "HDcalloc");
+ mem3_buffer = HDcalloc(1, sizeof(*mem3_buffer));
+ CHECK_PTR(data_buf, "HDcalloc");
+ mem4_buffer = HDcalloc(1, sizeof(*mem4_buffer));
+ CHECK_PTR(data_buf, "HDcalloc");
+ mem5_buffer = HDcalloc(1, sizeof(*mem5_buffer));
+ CHECK_PTR(data_buf, "HDcalloc");
+
+ /* Create and write the dataset */
+ sid = H5Screate_simple(8, da_dims, da_dims);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ plid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plid, FAIL, "H5Pcreate");
+
+ if (is_chunked) {
+ ret = H5Pset_chunk(plid, 8, da_chunksize);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ }
+
+ /* Construct dataset's name */
+ HDmemset(dset_name, 0, NAME_LEN);
+ HDstrcat(dset_name, MULTI_ENDS_SEL_HYPER_DSET);
+ if (is_chunked)
+ HDstrcat(dset_name, "_chunked");
+
+ did = H5Dcreate2(file, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, plid, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 5; j++)
+ for (k = 0; k < 3; k++)
+ for (l = 0; l < 4; l++)
+ for (m = 0; m < 2; m++)
+ for (n = 0; n < 3; n++)
+ for (p = 0; p < 6; p++) {
+ data_buf->arr[i][j][k][l][m][n][p][0] =
+ i * 1000000 + j * 100000 + k * 10000 + l * 1000 + m * 100 + n * 10 + p;
+ data_buf->arr[i][j][k][l][m][n][p][1] = i * 1000000 + j * 100000 + k * 10000 +
+ l * 1000 + m * 100 + n * 10 + p + 1;
+ }
+
+ ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* ****** Case 1: ******
+ * Testing the full selections in the fastest-growing end and in the middle dimensions*/
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem1_start, mem1_stride, mem1_count, mem1_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ msid = H5Screate_simple(8, mem1_dims, mem1_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem1_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 2; j++)
+ for (k = 0; k < 6; k++)
+ for (l = 0; l < 2; l++)
+ if (data_buf->arr[0][0][0][i][j][0][k][l] != mem1_buffer->arr[0][0][0][i][j][0][k][l]) {
+ TestErrPrintf("%u: Read different values than written at index 0,0,0,%d,%d,0,%d,%d\n",
+ __LINE__, i, j, k, l);
+ }
+
+ /* ****** Case 2: ******
+ * Testing the full selections in the slowest-growing end and in the middle dimensions*/
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem2_start, mem2_stride, mem2_count, mem2_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ msid = H5Screate_simple(8, mem2_dims, mem2_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem2_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 5; j++)
+ for (k = 0; k < 4; k++)
+ for (l = 0; l < 2; l++)
+ if (data_buf->arr[i][j][0][k][l][0][0][0] != mem2_buffer->arr[i][j][0][k][l][0][0][0]) {
+ TestErrPrintf("%u: Read different values than written at index %d,%d,0,%d,%d,0,0,0\n",
+ __LINE__, i, j, k, l);
+ }
+
+ /* ****** Case 3: ******
+ * Testing two unadjacent full selections in the middle dimensions */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem3_start, mem3_stride, mem3_count, mem3_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ msid = H5Screate_simple(8, mem3_dims, mem3_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem3_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 3; j++)
+ for (k = 0; k < 3; k++)
+ for (l = 0; l < 6; l++)
+ if (data_buf->arr[0][i][j][0][0][k][l][0] != mem3_buffer->arr[0][i][j][0][0][k][l][0]) {
+ TestErrPrintf("%u: Read different values than written at index 0,%d,%d,0,0,%d,%d,0\n",
+ __LINE__, i, j, k, l);
+ }
+
+ /* ****** Case 4: ******
+ * Testing the full selections in the fastest-growing end and the slowest-growing end */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem4_start, mem4_stride, mem4_count, mem4_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ msid = H5Screate_simple(8, mem4_dims, mem4_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem4_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 5; j++)
+ for (k = 0; k < 6; k++)
+ for (l = 0; l < 2; l++)
+ if (data_buf->arr[i][j][0][0][0][0][k][l] != mem4_buffer->arr[i][j][0][0][0][0][k][l]) {
+ TestErrPrintf("%u: Read different values than written at index %d,%d,0,0,0,0,%d,%d\n",
+ __LINE__, i, j, k, l);
+ }
+
+ /* ****** Case 5: ******
+ * Testing the full selections in the fastest-growing end and the slowest-growing end,
+ * and also in the middle dimensions */
+ did = H5Dopen2(file, dset_name, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Select the elements in the dataset */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, mem5_start, mem5_stride, mem5_count, mem5_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ msid = H5Screate_simple(8, mem5_dims, mem5_dims);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_all(msid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, msid, sid, H5P_DEFAULT, mem5_buffer);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ for (i = 0; i < 4; i++)
+ for (j = 0; j < 5; j++)
+ for (k = 0; k < 4; k++)
+ for (l = 0; l < 2; l++)
+ for (m = 0; m < 6; m++)
+ for (n = 0; n < 2; n++)
+ if (data_buf->arr[i][j][0][k][l][0][m][n] !=
+ mem5_buffer->arr[i][j][0][k][l][0][m][n]) {
+ TestErrPrintf(
+ "%u: Read different values than written at index %d,%d,0,%d,%d,0,%d,%d\n",
+ __LINE__, i, j, k, l, m, n);
+ }
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Pclose(plid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ HDfree(data_buf);
+ HDfree(mem1_buffer);
+ HDfree(mem2_buffer);
+ HDfree(mem3_buffer);
+ HDfree(mem4_buffer);
+ HDfree(mem5_buffer);
+}
+
+/****************************************************************
+**
+** test_coords(): Main testing routine.
+**
+****************************************************************/
+void
+test_coords(void)
+{
+ hid_t fid;
+ hbool_t is_chunk[2] = {TRUE, FALSE};
+ int i;
+ herr_t ret; /* Generic error return */
+
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ for (i = 0; i < 2; i++) {
+ /* Test H5Sselect_elements with selection of one block of data */
+ test_singleEnd_selElements(fid, is_chunk[i]);
+
+ /* Test H5Sselect_hyperslab with selection of one block of data */
+ test_singleEnd_selHyperslab(fid, is_chunk[i]);
+
+ /* Test H5Sselect_hyperslab with selection of multiple blocks of data */
+ test_multiple_ends(fid, is_chunk[i]);
+ }
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_coords
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Raymond Lu
+ * 20 Dec. 2007
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_coords(void)
+{
+ H5Fdelete(FILENAME, H5P_DEFAULT);
+}
diff --git a/test/API/testhdf5.c b/test/API/testhdf5.c
new file mode 100644
index 0000000..f29b603
--- /dev/null
+++ b/test/API/testhdf5.c
@@ -0,0 +1,729 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ FILE
+ testhdf5.c - HDF5 testing framework main file.
+
+ REMARKS
+ General test wrapper for HDF5 base library test programs
+
+ DESIGN
+ Each test function should be implemented as function having no
+ parameters and returning void (i.e. no return value). They should be put
+ into the list of AddTest() calls in main() below. Functions which depend
+ on other functionality should be placed below the AddTest() call for the
+ base functionality testing.
+ Each test module should include testhdf5.h and define a unique set of
+ names for test files they create.
+
+ BUGS/LIMITATIONS
+
+
+ */
+
+/* ANY new test needs to have a prototype in testhdf5.h */
+#include "testhdf5.h"
+
+int nerrors = 0;
+
+char *paraprefix = NULL; /* for command line option para-prefix */
+
+/* Length of multi-file VFD filename buffers */
+#define H5TEST_MULTI_FILENAME_LEN 1024
+
+/*
+ * This routine is designed to provide equivalent functionality to 'printf'
+ * and allow easy replacement for environments which don't have stdin/stdout
+ * available. (i.e. Windows & the Mac)
+ */
+H5_ATTR_FORMAT(printf, 1, 2)
+int
+print_func(const char *format, ...)
+{
+ va_list arglist;
+ int ret_value;
+
+ HDva_start(arglist, format);
+ ret_value = HDvprintf(format, arglist);
+ HDva_end(arglist);
+ return ret_value;
+}
+
+/*
+ * This routine is designed to provide equivalent functionality to 'printf'
+ * and also increment the error count for the testing framework.
+ */
+int
+TestErrPrintf(const char *format, ...)
+{
+ va_list arglist;
+ int ret_value;
+
+ /* Increment the error count */
+ nerrors++;
+
+ /* Print the requested information */
+ HDva_start(arglist, format);
+ ret_value = HDvprintf(format, arglist);
+ HDva_end(arglist);
+
+ /* Return the length of the string produced (like printf() does) */
+ return ret_value;
+}
+
+#ifdef H5_HAVE_PARALLEL
+/*-------------------------------------------------------------------------
+ * Function: getenv_all
+ *
+ * Purpose: Used to get the environment that the root MPI task has.
+ * name specifies which environment variable to look for
+ * val is the string to which the value of that environment
+ * variable will be copied.
+ *
+ * NOTE: The pointer returned by this function is only
+ * valid until the next call to getenv_all and the data
+ * stored there must be copied somewhere else before any
+ * further calls to getenv_all take place.
+ *
+ * Return: pointer to a string containing the value of the environment variable
+ * NULL if the variable doesn't exist in task 'root's environment.
+ *
+ * Programmer: Leon Arber
+ * 4/4/05
+ *
+ * Modifications:
+ * Use original getenv if MPI is not initialized. This happens
+ * one uses the PHDF5 library to build a serial nature code.
+ * Albert 2006/04/07
+ *
+ *-------------------------------------------------------------------------
+ */
+char *
+getenv_all(MPI_Comm comm, int root, const char *name)
+{
+ int mpi_size, mpi_rank, mpi_initialized, mpi_finalized;
+ int len;
+ static char *env = NULL;
+
+ HDassert(name);
+
+ MPI_Initialized(&mpi_initialized);
+ MPI_Finalized(&mpi_finalized);
+
+ if (mpi_initialized && !mpi_finalized) {
+ MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_size(comm, &mpi_size);
+ HDassert(root < mpi_size);
+
+ /* The root task does the getenv call
+ * and sends the result to the other tasks */
+ if (mpi_rank == root) {
+ env = HDgetenv(name);
+ if (env) {
+ len = (int)HDstrlen(env);
+ MPI_Bcast(&len, 1, MPI_INT, root, comm);
+ MPI_Bcast(env, len, MPI_CHAR, root, comm);
+ }
+ else {
+ /* len -1 indicates that the variable was not in the environment */
+ len = -1;
+ MPI_Bcast(&len, 1, MPI_INT, root, comm);
+ }
+ }
+ else {
+ MPI_Bcast(&len, 1, MPI_INT, root, comm);
+ if (len >= 0) {
+ if (env == NULL)
+ env = (char *)HDmalloc((size_t)len + 1);
+ else if (HDstrlen(env) < (size_t)len)
+ env = (char *)HDrealloc(env, (size_t)len + 1);
+
+ MPI_Bcast(env, len, MPI_CHAR, root, comm);
+ env[len] = '\0';
+ }
+ else {
+ if (env)
+ HDfree(env);
+ env = NULL;
+ }
+ }
+#ifndef NDEBUG
+ MPI_Barrier(comm);
+#endif
+ }
+ else {
+ /* use original getenv */
+ if (env)
+ HDfree(env);
+ env = HDgetenv(name);
+ } /* end if */
+
+ return env;
+}
+
+#endif
+
+/*-------------------------------------------------------------------------
+ * Function: h5_fileaccess
+ *
+ * Purpose: Returns a file access template which is the default template
+ * but with a file driver, VOL connector, or libver bound set
+ * according to a constant or environment variable
+ *
+ * Return: Success: A file access property list
+ * Failure: H5I_INVALID_HID
+ *
+ * Programmer: Robb Matzke
+ * Thursday, November 19, 1998
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+h5_fileaccess(void)
+{
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+
+ /* Finally, check for libver bounds */
+ if (h5_get_libver_fapl(fapl_id) < 0)
+ goto error;
+
+ return fapl_id;
+
+error:
+ if (fapl_id != H5I_INVALID_HID)
+ H5Pclose(fapl_id);
+ return H5I_INVALID_HID;
+} /* end h5_fileaccess() */
+
+/*-------------------------------------------------------------------------
+ * Function: h5_get_libver_fapl
+ *
+ * Purpose: Sets the library version bounds for a FAPL according to the
+ * value in the constant or environment variable "HDF5_LIBVER_BOUNDS".
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Quincey Koziol
+ * November 2018
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+h5_get_libver_fapl(hid_t fapl)
+{
+ const char *env = NULL; /* HDF5_DRIVER environment variable */
+ const char *tok = NULL; /* strtok pointer */
+ char *lasts = NULL; /* Context pointer for strtok_r() call */
+ char buf[1024]; /* buffer for tokenizing HDF5_DRIVER */
+
+ /* Get the environment variable, if it exists */
+ env = HDgetenv("HDF5_LIBVER_BOUNDS");
+#ifdef HDF5_LIBVER_BOUNDS
+ /* Use the environment variable, then the compile-time constant */
+ if (!env)
+ env = HDF5_LIBVER_BOUNDS;
+#endif
+
+ /* If the environment variable was not set, just return
+ * without modifying the FAPL.
+ */
+ if (!env || !*env)
+ goto done;
+
+ /* Get the first 'word' of the environment variable.
+ * If it's nothing (environment variable was whitespace)
+ * just return the default fapl.
+ */
+ HDstrncpy(buf, env, sizeof(buf));
+ buf[sizeof(buf) - 1] = '\0';
+ if (NULL == (tok = HDstrtok_r(buf, " \t\n\r", &lasts)))
+ goto done;
+
+ if (!HDstrcmp(tok, "latest")) {
+ /* use the latest format */
+ if (H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ goto error;
+ } /* end if */
+ else {
+ /* Unknown setting */
+ goto error;
+ } /* end else */
+
+done:
+ return 0;
+
+error:
+ return -1;
+} /* end h5_get_libver_fapl() */
+
+#ifndef HDF5_PARAPREFIX
+#define HDF5_PARAPREFIX ""
+#endif
+static char *
+h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fullname, size_t size,
+ hbool_t nest_printf, hbool_t subst_for_superblock)
+{
+ const char *prefix = NULL;
+ const char *driver_env_var = NULL; /* HDF5_DRIVER environment variable */
+ char *ptr, last = '\0';
+ const char *suffix = _suffix;
+ size_t i, j;
+ hid_t driver = -1;
+ int isppdriver = 0; /* if the driver is MPI parallel */
+
+ if (!base_name || !fullname || size < 1)
+ return NULL;
+
+ HDmemset(fullname, 0, size);
+
+ /* Determine if driver is set by environment variable. If it is,
+ * only generate a suffix if fixing the filename for the superblock
+ * file. */
+ driver_env_var = HDgetenv(HDF5_DRIVER);
+ if (driver_env_var && (H5P_DEFAULT == fapl) && subst_for_superblock)
+ fapl = H5P_FILE_ACCESS_DEFAULT;
+
+ /* figure out the suffix */
+ if (H5P_DEFAULT != fapl) {
+ if ((driver = H5Pget_driver(fapl)) < 0)
+ return NULL;
+
+ if (suffix) {
+ if (H5FD_FAMILY == driver) {
+ if (subst_for_superblock)
+ suffix = "-000000.h5";
+ else
+ suffix = nest_printf ? "-%%06d.h5" : "-%06d.h5";
+ }
+ else if (H5FD_MULTI == driver) {
+
+ /* Check the HDF5_DRIVER environment variable in case
+ * we are using the split driver since both of those
+ * use the multi VFD under the hood.
+ */
+ if (driver_env_var && !HDstrcmp(driver_env_var, "split")) {
+ /* split VFD */
+ if (subst_for_superblock)
+ suffix = ".h5.meta";
+ }
+ else {
+ /* multi VFD */
+ if (subst_for_superblock)
+ suffix = "-s.h5";
+ else
+ suffix = NULL;
+ }
+ }
+ }
+ }
+
+ /* Must first check fapl is not H5P_DEFAULT (-1) because H5FD_XXX
+ * could be of value -1 if it is not defined.
+ */
+ isppdriver = ((H5P_DEFAULT != fapl) || driver_env_var) && (H5FD_MPIO == driver);
+#if 0
+ /* Check HDF5_NOCLEANUP environment setting.
+ * (The #ifdef is needed to prevent compile failure in case MPI is not
+ * configured.)
+ */
+ if (isppdriver) {
+#ifdef H5_HAVE_PARALLEL
+ if (getenv_all(MPI_COMM_WORLD, 0, HDF5_NOCLEANUP))
+ SetTestNoCleanup();
+#endif /* H5_HAVE_PARALLEL */
+ }
+ else {
+ if (HDgetenv(HDF5_NOCLEANUP))
+ SetTestNoCleanup();
+ }
+#endif
+ /* Check what prefix to use for test files. Process HDF5_PARAPREFIX and
+ * HDF5_PREFIX.
+ * Use different ones depending on parallel or serial driver used.
+ * (The #ifdef is needed to prevent compile failure in case MPI is not
+ * configured.)
+ */
+ if (isppdriver) {
+#ifdef H5_HAVE_PARALLEL
+ /*
+ * For parallel:
+ * First use command line option, then the environment
+ * variable, then try the constant
+ */
+ static int explained = 0;
+
+ prefix = (paraprefix ? paraprefix : getenv_all(MPI_COMM_WORLD, 0, "HDF5_PARAPREFIX"));
+
+ if (!prefix && !explained) {
+ /* print hint by process 0 once. */
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if (mpi_rank == 0)
+ HDprintf("*** Hint ***\n"
+ "You can use environment variable HDF5_PARAPREFIX to "
+ "run parallel test files in a\n"
+ "different directory or to add file type prefix. e.g.,\n"
+ " HDF5_PARAPREFIX=pfs:/PFS/user/me\n"
+ " export HDF5_PARAPREFIX\n"
+ "*** End of Hint ***\n");
+
+ explained = TRUE;
+#ifdef HDF5_PARAPREFIX
+ prefix = HDF5_PARAPREFIX;
+#endif /* HDF5_PARAPREFIX */
+ }
+#endif /* H5_HAVE_PARALLEL */
+ }
+ else {
+ /*
+ * For serial:
+ * First use the environment variable, then try the constant
+ */
+ prefix = HDgetenv("HDF5_PREFIX");
+
+#ifdef HDF5_PREFIX
+ if (!prefix)
+ prefix = HDF5_PREFIX;
+#endif /* HDF5_PREFIX */
+ }
+
+ /* Prepend the prefix value to the base name */
+ if (prefix && *prefix) {
+ if (isppdriver) {
+ /* This is a parallel system */
+ char *subdir;
+
+ if (!HDstrcmp(prefix, HDF5_PARAPREFIX)) {
+ /*
+ * If the prefix specifies the HDF5_PARAPREFIX directory, then
+ * default to using the "/tmp/$USER" or "/tmp/$LOGIN"
+ * directory instead.
+ */
+ char *user, *login;
+
+ user = HDgetenv("USER");
+ login = HDgetenv("LOGIN");
+ subdir = (user ? user : login);
+
+ if (subdir) {
+ for (i = 0; i < size && prefix[i]; i++)
+ fullname[i] = prefix[i];
+
+ fullname[i++] = '/';
+
+ for (j = 0; i < size && subdir[j]; ++i, ++j)
+ fullname[i] = subdir[j];
+ }
+ }
+
+ if (!fullname[0]) {
+ /* We didn't append the prefix yet */
+ HDstrncpy(fullname, prefix, size);
+ fullname[size - 1] = '\0';
+ }
+
+ if (HDstrlen(fullname) + HDstrlen(base_name) + 1 < size) {
+ /*
+ * Append the base_name with a slash first. Multiple
+ * slashes are handled below.
+ */
+ h5_stat_t buf;
+
+ if (HDstat(fullname, &buf) < 0)
+ /* The directory doesn't exist just yet */
+ if (HDmkdir(fullname, (mode_t)0755) < 0 && errno != EEXIST)
+ /*
+ * We couldn't make the "/tmp/${USER,LOGIN}"
+ * subdirectory. Default to PREFIX's original
+ * prefix value.
+ */
+ HDstrcpy(fullname, prefix);
+
+ HDstrcat(fullname, "/");
+ HDstrcat(fullname, base_name);
+ }
+ else {
+ /* Buffer is too small */
+ return NULL;
+ }
+ }
+ else {
+ if (HDsnprintf(fullname, size, "%s/%s", prefix, base_name) == (int)size)
+ /* Buffer is too small */
+ return NULL;
+ }
+ }
+ else if (HDstrlen(base_name) >= size) {
+ /* Buffer is too small */
+ return NULL;
+ }
+ else {
+ HDstrcpy(fullname, base_name);
+ }
+
+ /* Append a suffix */
+ if (suffix) {
+ if (HDstrlen(fullname) + HDstrlen(suffix) >= size)
+ return NULL;
+
+ HDstrcat(fullname, suffix);
+ }
+
+ /* Remove any double slashes in the filename */
+ for (ptr = fullname, i = j = 0; ptr && i < size; i++, ptr++) {
+ if (*ptr != '/' || last != '/')
+ fullname[j++] = *ptr;
+
+ last = *ptr;
+ }
+
+ return fullname;
+}
+
+char *
+h5_fixname(const char *base_name, hid_t fapl, char *fullname, size_t size)
+{
+ return (h5_fixname_real(base_name, fapl, ".h5", fullname, size, FALSE, FALSE));
+}
+
+char *
+h5_fixname_superblock(const char *base_name, hid_t fapl_id, char *fullname, size_t size)
+{
+ return (h5_fixname_real(base_name, fapl_id, ".h5", fullname, size, FALSE, TRUE));
+}
+
+hbool_t
+h5_using_default_driver(const char *drv_name)
+{
+ hbool_t ret_val = TRUE;
+
+ HDassert(H5_DEFAULT_VFD == H5FD_SEC2);
+
+ if (!drv_name)
+ drv_name = HDgetenv(HDF5_DRIVER);
+
+ if (drv_name)
+ return (!HDstrcmp(drv_name, "sec2") || !HDstrcmp(drv_name, "nomatch"));
+
+ return ret_val;
+}
+
+herr_t
+h5_driver_is_default_vfd_compatible(hid_t fapl_id, hbool_t *default_vfd_compatible)
+{
+ unsigned long feat_flags = 0;
+ hid_t driver_id = H5I_INVALID_HID;
+ herr_t ret_value = SUCCEED;
+
+ HDassert(fapl_id >= 0);
+ HDassert(default_vfd_compatible);
+
+ if (fapl_id == H5P_DEFAULT)
+ fapl_id = H5P_FILE_ACCESS_DEFAULT;
+
+ if ((driver_id = H5Pget_driver(fapl_id)) < 0)
+ return FAIL;
+
+ if (H5FDdriver_query(driver_id, &feat_flags) < 0)
+ return FAIL;
+
+ *default_vfd_compatible = (feat_flags & H5FD_FEAT_DEFAULT_VFD_COMPATIBLE);
+
+ return ret_value;
+} /* end h5_driver_is_default_vfd_compatible() */
+
+int
+main(int argc, char *argv[])
+{
+#if defined(H5_PARALLEL_TEST)
+ MPI_Init(&argc, &argv);
+#else
+ (void)argc;
+ (void)argv;
+#endif
+
+ HDprintf("===================================\n");
+ HDprintf("HDF5 TESTS START\n");
+ HDprintf("===================================\n");
+
+ /* Initialize testing framework */
+ /* TestInit(argv[0], NULL, NULL); */
+
+ /* Tests are generally arranged from least to most complexity... */
+ /* AddTest("config", test_configure, cleanup_configure, "Configure definitions", NULL); */
+ HDprintf("** CONFIGURE DEFINITIONS **\n");
+ test_configure();
+ HDprintf("\n");
+
+ /* AddTest("metadata", test_metadata, cleanup_metadata, "Encoding/decoding metadata", NULL); */
+
+ /* AddTest("checksum", test_checksum, cleanup_checksum, "Checksum algorithm", NULL); */
+ HDprintf("** CHECKSUM ALGORITHM **\n");
+ test_checksum();
+ HDprintf("\n");
+
+ /* AddTest("tst", test_tst, NULL, "Ternary Search Trees", NULL); */
+
+ /* AddTest("heap", test_heap, NULL, "Memory Heaps", NULL); */
+
+ /* AddTest("skiplist", test_skiplist, NULL, "Skip Lists", NULL); */
+
+ /* AddTest("refstr", test_refstr, NULL, "Reference Counted Strings", NULL); */
+
+ /* AddTest("file", test_file, cleanup_file, "Low-Level File I/O", NULL); */
+ HDprintf("** LOW-LEVEL FILE I/O **\n");
+ test_file();
+ HDprintf("\n");
+
+ /* AddTest("objects", test_h5o, cleanup_h5o, "Generic Object Functions", NULL); */
+ HDprintf("** GENERIC OBJECT FUNCTIONS **\n");
+ test_h5o();
+ HDprintf("\n");
+
+ /* AddTest("h5s", test_h5s, cleanup_h5s, "Dataspaces", NULL); */
+ HDprintf("** DATASPACES **\n");
+ test_h5s();
+ HDprintf("\n");
+
+ /* AddTest("coords", test_coords, cleanup_coords, "Dataspace coordinates", NULL); */
+ HDprintf("** DATASPACE COORDINATES **\n");
+ test_coords();
+ HDprintf("\n");
+
+ /* AddTest("sohm", test_sohm, cleanup_sohm, "Shared Object Header Messages", NULL); */
+
+ /* AddTest("attr", test_attr, cleanup_attr, "Attributes", NULL); */
+ HDprintf("** ATTRIBUTES **\n");
+ test_attr();
+ HDprintf("\n");
+
+ /* AddTest("select", test_select, cleanup_select, "Selections", NULL); */
+ HDprintf("** SELECTIONS **\n");
+ test_select();
+ HDprintf("\n");
+
+ /* AddTest("time", test_time, cleanup_time, "Time Datatypes", NULL); */
+ HDprintf("** TIME DATATYPES**\n");
+ test_time();
+ HDprintf("\n");
+
+ /* AddTest("ref_deprec", test_reference_deprec, cleanup_reference_deprec, "Deprecated References", NULL);
+ */
+
+ /* AddTest("ref", test_reference, cleanup_reference, "References", NULL); */
+ HDprintf("** REFERENCES **\n");
+ test_reference();
+ HDprintf("\n");
+
+ /* AddTest("vltypes", test_vltypes, cleanup_vltypes, "Variable-Length Datatypes", NULL); */
+ HDprintf("** VARIABLE-LENGTH DATATYPES **\n");
+ test_vltypes();
+ HDprintf("\n");
+
+ /* AddTest("vlstrings", test_vlstrings, cleanup_vlstrings, "Variable-Length Strings", NULL); */
+ HDprintf("** VARIABLE-LENGTH STRINGS **\n");
+ test_vlstrings();
+ HDprintf("\n");
+
+ /* AddTest("iterate", test_iterate, cleanup_iterate, "Group & Attribute Iteration", NULL); */
+ HDprintf("** GROUP & ATTRIBUTE ITERATION **\n");
+ test_iterate();
+ HDprintf("\n");
+
+ /* AddTest("array", test_array, cleanup_array, "Array Datatypes", NULL); */
+ HDprintf("** ARRAY DATATYPES **\n");
+ test_array();
+ HDprintf("\n");
+
+ /* AddTest("genprop", test_genprop, cleanup_genprop, "Generic Properties", NULL); */
+ HDprintf("** GENERIC PROPERTIES **\n");
+ test_genprop();
+ HDprintf("\n");
+
+ /* AddTest("unicode", test_unicode, cleanup_unicode, "UTF-8 Encoding", NULL); */
+ HDprintf("** UTF-8 ENCODING **\n");
+ test_unicode();
+ HDprintf("\n");
+
+ /* AddTest("id", test_ids, NULL, "User-Created Identifiers", NULL); */
+ HDprintf("** USER-CREATED IDENTIFIERS **\n");
+ test_ids();
+ HDprintf("\n");
+
+ /* AddTest("misc", test_misc, cleanup_misc, "Miscellaneous", NULL); */
+ HDprintf("** MISCELLANEOUS **\n");
+ test_misc();
+ HDprintf("\n");
+
+ /* Display testing information */
+ /* TestInfo(argv[0]); */
+
+ /* Parse command line arguments */
+ /* TestParseCmdLine(argc,argv); */
+
+ /* Perform requested testing */
+ /* PerformTests(); */
+
+ /* Display test summary, if requested */
+ /* if (GetTestSummary())
+ TestSummary(); */
+
+ /* Clean up test files, if allowed */
+ if (/* GetTestCleanup() && */ !getenv("HDF5_NOCLEANUP")) {
+ /* TestCleanup(); */
+
+ HDprintf("TEST CLEANUP\n");
+
+ H5E_BEGIN_TRY
+ cleanup_configure();
+ cleanup_checksum();
+ cleanup_file();
+ cleanup_h5o();
+ cleanup_h5s();
+ cleanup_coords();
+ cleanup_attr();
+ cleanup_select();
+ cleanup_time();
+ cleanup_reference();
+ cleanup_vltypes();
+ cleanup_vlstrings();
+ cleanup_iterate();
+ cleanup_array();
+ cleanup_genprop();
+ cleanup_unicode();
+ cleanup_misc();
+ H5E_END_TRY;
+
+ HDprintf("\n");
+ }
+
+ /* Release test infrastructure */
+ /* TestShutdown(); */
+
+ /* Exit failure if errors encountered; else exit success. */
+ /* No need to print anything since PerformTests() already does. */
+ if (nerrors /* GetTestNumErrs() */ > 0) {
+ HDprintf("** HDF5 tests failed with %d errors **\n", nerrors);
+ HDexit(EXIT_FAILURE);
+ }
+ else {
+ HDprintf("** HDF5 tests ran successfully **\n");
+ HDexit(EXIT_SUCCESS);
+ }
+} /* end main() */
diff --git a/test/API/testhdf5.h b/test/API/testhdf5.h
new file mode 100644
index 0000000..44ccfe0
--- /dev/null
+++ b/test/API/testhdf5.h
@@ -0,0 +1,349 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This header file contains information required for testing the HDF5 library.
+ */
+
+#ifndef TESTHDF5_H
+#define TESTHDF5_H
+
+/* Include generic testing header also */
+/* #include "h5test.h" */
+#include "hdf5.h"
+#include "H5private.h"
+#include "H5_api_tests_disabled.h"
+
+#define VERBO_NONE 0 /* None */
+#define VERBO_DEF 3 /* Default */
+#define VERBO_LO 5 /* Low */
+#define VERBO_MED 7 /* Medium */
+#define VERBO_HI 9 /* High */
+
+/* Turn off verbose reporting by default */
+#define VERBOSE_MED (FALSE)
+#define VERBOSE_HI (FALSE)
+
+/* Use %ld to print the value because long should cover most cases. */
+/* Used to make certain a return value _is_not_ a value */
+#define CHECK(ret, val, where) \
+ do { \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d " \
+ "in %s returned %ld \n", \
+ where, (int)__LINE__, __FILE__, (long)(ret)); \
+ } \
+ if ((ret) == (val)) { \
+ TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld at line %4d " \
+ "in %s\n", \
+ where, (long)(ret), (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ } while (0)
+
+#define CHECK_I(ret, where) \
+ { \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d in %s returned %ld\n", (where), (int)__LINE__, \
+ __FILE__, (long)(ret)); \
+ } \
+ if ((ret) < 0) { \
+ TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld line %4d in %s\n", (where), (long)(ret), \
+ (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ }
+
+/* Check that a pointer is valid (i.e.: not NULL) */
+#define CHECK_PTR(ret, where) \
+ { \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \
+ __FILE__, ((const void *)ret)); \
+ } \
+ if (!(ret)) { \
+ TestErrPrintf("*** UNEXPECTED RETURN from %s is NULL line %4d in %s\n", (where), (int)__LINE__, \
+ __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ }
+
+/* Check that a pointer is NULL */
+#define CHECK_PTR_NULL(ret, where) \
+ { \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \
+ __FILE__, ((const void *)ret)); \
+ } \
+ if (ret) { \
+ TestErrPrintf("*** UNEXPECTED RETURN from %s is not NULL line %4d in %s\n", (where), \
+ (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ }
+
+/* Check that two pointers are equal */
+#define CHECK_PTR_EQ(ret, val, where) \
+ { \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d in %s returned %p\n", (where), (int)__LINE__, \
+ __FILE__, (const void *)(ret)); \
+ } \
+ if (ret != val) { \
+ TestErrPrintf( \
+ "*** UNEXPECTED RETURN from %s: returned value of %p is not equal to %p line %4d in %s\n", \
+ (where), (const void *)(ret), (const void *)(val), (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ }
+
+/* Used to make certain a return value _is_ a value */
+#define VERIFY(_x, _val, where) \
+ do { \
+ long __x = (long)_x, __val = (long)_val; \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d in %s had value " \
+ "%ld \n", \
+ (where), (int)__LINE__, __FILE__, __x); \
+ } \
+ if ((__x) != (__val)) { \
+ TestErrPrintf("*** UNEXPECTED VALUE from %s should be %ld, but is %ld at line %4d " \
+ "in %s\n", \
+ (where), __val, __x, (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ } while (0)
+
+/* Used to make certain a (non-'long' type's) return value _is_ a value */
+#define VERIFY_TYPE(_x, _val, _type, _format, where) \
+ do { \
+ _type __x = (_type)_x, __val = (_type)_val; \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d in %s had value " _format " \n", (where), \
+ (int)__LINE__, __FILE__, __x); \
+ } \
+ if ((__x) != (__val)) { \
+ TestErrPrintf("*** UNEXPECTED VALUE from %s should be " _format ", but is " _format \
+ " at line %4d " \
+ "in %s\n", \
+ (where), __val, __x, (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ } while (0)
+
+/* Used to make certain a string return value _is_ a value */
+#define VERIFY_STR(x, val, where) \
+ do { \
+ if (VERBOSE_HI) { \
+ print_func(" Call to routine: %15s at line %4d in %s had value " \
+ "%s \n", \
+ (where), (int)__LINE__, __FILE__, x); \
+ } \
+ if (HDstrcmp(x, val) != 0) { \
+ TestErrPrintf("*** UNEXPECTED VALUE from %s should be %s, but is %s at line %4d " \
+ "in %s\n", \
+ where, val, x, (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ } while (0)
+
+/* Used to document process through a test and to check for errors */
+#define RESULT(ret, func) \
+ do { \
+ if (VERBOSE_MED) { \
+ print_func(" Call to routine: %15s at line %4d in %s returned " \
+ "%ld\n", \
+ func, (int)__LINE__, __FILE__, (long)(ret)); \
+ } \
+ if (VERBOSE_HI) \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ if ((ret) == FAIL) { \
+ TestErrPrintf("*** UNEXPECTED RETURN from %s is %ld at line %4d " \
+ "in %s\n", \
+ func, (long)(ret), (int)__LINE__, __FILE__); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ } while (0)
+
+/* Used to document process through a test */
+#if defined(H5_HAVE_PARALLEL) && defined(H5_PARALLEL_TEST)
+#define MESSAGE(V, A) \
+ { \
+ int mpi_rank; \
+ \
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); \
+ if (mpi_rank == 0 && VERBO_LO /* HDGetTestVerbosity() */ >= (V)) \
+ print_func A; \
+ }
+#else /* H5_HAVE_PARALLEL */
+#define MESSAGE(V, A) \
+ { \
+ if (VERBO_LO /* HDGetTestVerbosity() */ >= (V)) \
+ print_func A; \
+ }
+#endif /* H5_HAVE_PARALLEL */
+
+/* Used to indicate an error that is complex to check for */
+#define ERROR(where) \
+ do { \
+ if (VERBOSE_HI) \
+ print_func(" Call to routine: %15s at line %4d in %s returned " \
+ "invalid result\n", \
+ where, (int)__LINE__, __FILE__); \
+ TestErrPrintf("*** UNEXPECTED RESULT from %s at line %4d in %s\n", where, (int)__LINE__, __FILE__); \
+ } while (0)
+
+/* definitions for command strings */
+#define VERBOSITY_STR "Verbosity"
+#define SKIP_STR "Skip"
+#define TEST_STR "Test"
+#define CLEAN_STR "Cleanup"
+
+#define AT() HDprintf(" at %s:%d in %s()...\n", __FILE__, __LINE__, __func__);
+#define TESTING(WHAT) \
+ { \
+ HDprintf("Testing %-62s", WHAT); \
+ HDfflush(stdout); \
+ }
+#define TESTING_2(WHAT) \
+ { \
+ HDprintf(" Testing %-60s", WHAT); \
+ HDfflush(stdout); \
+ }
+#define PASSED() \
+ { \
+ HDputs(" PASSED"); \
+ HDfflush(stdout); \
+ }
+#define H5_FAILED() \
+ { \
+ HDputs("*FAILED*"); \
+ HDfflush(stdout); \
+ }
+#define H5_WARNING() \
+ { \
+ HDputs("*WARNING*"); \
+ HDfflush(stdout); \
+ }
+#define SKIPPED() \
+ { \
+ HDputs(" -SKIP-"); \
+ HDfflush(stdout); \
+ }
+#define PUTS_ERROR(s) \
+ { \
+ HDputs(s); \
+ AT(); \
+ goto error; \
+ }
+#define TEST_ERROR \
+ { \
+ H5_FAILED(); \
+ AT(); \
+ goto error; \
+ }
+#define STACK_ERROR \
+ { \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ goto error; \
+ }
+#define FAIL_STACK_ERROR \
+ { \
+ H5_FAILED(); \
+ AT(); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ goto error; \
+ }
+#define FAIL_PUTS_ERROR(s) \
+ { \
+ H5_FAILED(); \
+ AT(); \
+ HDputs(s); \
+ goto error; \
+ }
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int nerrors;
+
+int print_func(const char *format, ...);
+int TestErrPrintf(const char *format, ...);
+hid_t h5_fileaccess(void);
+/* Functions that will replace components of a FAPL */
+herr_t h5_get_vfd_fapl(hid_t fapl_id);
+herr_t h5_get_libver_fapl(hid_t fapl_id);
+char *h5_fixname(const char *base_name, hid_t fapl, char *fullname, size_t size);
+char *h5_fixname_superblock(const char *base_name, hid_t fapl, char *fullname, size_t size);
+hbool_t h5_using_default_driver(const char *drv_name);
+herr_t h5_driver_is_default_vfd_compatible(hid_t fapl_id, hbool_t *default_vfd_compatible);
+
+#ifdef H5_HAVE_PARALLEL
+char *getenv_all(MPI_Comm comm, int root, const char *name);
+#endif
+
+/* Prototypes for the test routines */
+void test_metadata(void);
+void test_checksum(void);
+void test_refstr(void);
+void test_file(void);
+void test_h5o(void);
+void test_h5t(void);
+void test_h5s(void);
+void test_coords(void);
+void test_h5d(void);
+void test_attr(void);
+void test_select(void);
+void test_time(void);
+void test_reference(void);
+void test_reference_deprec(void);
+void test_vltypes(void);
+void test_vlstrings(void);
+void test_iterate(void);
+void test_array(void);
+void test_genprop(void);
+void test_configure(void);
+void test_h5_system(void);
+void test_misc(void);
+void test_ids(void);
+void test_skiplist(void);
+void test_sohm(void);
+void test_unicode(void);
+
+/* Prototypes for the cleanup routines */
+void cleanup_metadata(void);
+void cleanup_checksum(void);
+void cleanup_file(void);
+void cleanup_h5o(void);
+void cleanup_h5s(void);
+void cleanup_coords(void);
+void cleanup_attr(void);
+void cleanup_select(void);
+void cleanup_time(void);
+void cleanup_reference(void);
+void cleanup_reference_deprec(void);
+void cleanup_vltypes(void);
+void cleanup_vlstrings(void);
+void cleanup_iterate(void);
+void cleanup_array(void);
+void cleanup_genprop(void);
+void cleanup_configure(void);
+void cleanup_h5_system(void);
+void cleanup_sohm(void);
+void cleanup_misc(void);
+void cleanup_unicode(void);
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* TESTHDF5_H */
diff --git a/test/API/tfile.c b/test/API/tfile.c
new file mode 100644
index 0000000..bc0f18e
--- /dev/null
+++ b/test/API/tfile.c
@@ -0,0 +1,8381 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tfile
+ *
+ * Test the low-level file I/O features.
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+/* #include "H5srcdir.h" */
+
+/* #include "H5Iprivate.h" */
+/* #include "H5Pprivate.h" */
+/* #include "H5VLprivate.h" */ /* Virtual Object Layer */
+
+#if 0
+/*
+ * This file needs to access private information from the H5F package.
+ * This file also needs to access the file testing code.
+ */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#include "H5Fpkg.h" /* File access */
+
+#define H5FD_FRIEND /*suppress error about including H5FDpkg.h */
+#define H5FD_TESTING
+#include "H5FDpkg.h"
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#include "H5Dpkg.h" /* Dataset access */
+
+#define H5S_FRIEND /*suppress error about including H5Spkg */
+#include "H5Spkg.h" /* Dataspace */
+
+#define H5T_FRIEND /*suppress error about including H5Tpkg */
+#include "H5Tpkg.h" /* Datatype */
+
+#define H5A_FRIEND /*suppress error about including H5Apkg */
+#include "H5Apkg.h" /* Attributes */
+
+#define H5O_FRIEND /*suppress error about including H5Opkg */
+#include "H5Opkg.h" /* Object headers */
+#endif
+
+#define BAD_USERBLOCK_SIZE1 (hsize_t)1
+#define BAD_USERBLOCK_SIZE2 (hsize_t)2
+#define BAD_USERBLOCK_SIZE3 (hsize_t)3
+#define BAD_USERBLOCK_SIZE4 (hsize_t)64
+#define BAD_USERBLOCK_SIZE5 (hsize_t)511
+#define BAD_USERBLOCK_SIZE6 (hsize_t)513
+#define BAD_USERBLOCK_SIZE7 (hsize_t)6144
+
+#define F1_USERBLOCK_SIZE (hsize_t)0
+#define F1_OFFSET_SIZE sizeof(haddr_t)
+#define F1_LENGTH_SIZE sizeof(hsize_t)
+#define F1_SYM_LEAF_K 4
+#define F1_SYM_INTERN_K 16
+#define FILE1 "tfile1.h5"
+#define SFILE1 "sys_file1"
+
+#define REOPEN_FILE "tfile_reopen.h5"
+#define REOPEN_DSET "dset"
+
+#define F2_USERBLOCK_SIZE (hsize_t)512
+#define F2_OFFSET_SIZE 8
+#define F2_LENGTH_SIZE 8
+#define F2_SYM_LEAF_K 8
+#define F2_SYM_INTERN_K 32
+#define F2_RANK 2
+#define F2_DIM0 4
+#define F2_DIM1 6
+#define F2_DSET "dset"
+#define FILE2 "tfile2.h5"
+
+#define F3_USERBLOCK_SIZE (hsize_t)0
+#define F3_OFFSET_SIZE F2_OFFSET_SIZE
+#define F3_LENGTH_SIZE F2_LENGTH_SIZE
+#define F3_SYM_LEAF_K F2_SYM_LEAF_K
+#define F3_SYM_INTERN_K F2_SYM_INTERN_K
+#define FILE3 "tfile3.h5"
+
+#define GRP_NAME "/group"
+#define DSET_NAME "dataset"
+#define ATTR_NAME "attr"
+#define TYPE_NAME "type"
+#define FILE4 "tfile4.h5"
+
+#define OBJ_ID_COUNT_0 0
+#define OBJ_ID_COUNT_1 1
+#define OBJ_ID_COUNT_2 2
+#define OBJ_ID_COUNT_3 3
+#define OBJ_ID_COUNT_4 4
+#define OBJ_ID_COUNT_6 6
+#define OBJ_ID_COUNT_8 8
+
+#define GROUP1 "Group1"
+#define DSET1 "Dataset1"
+#define DSET2 "/Group1/Dataset2"
+
+#define TESTA_GROUPNAME "group"
+#define TESTA_DSETNAME "dataset"
+#define TESTA_ATTRNAME "attribute"
+#define TESTA_DTYPENAME "compound"
+#define TESTA_NAME_BUF_SIZE 64
+#define TESTA_RANK 2
+#define TESTA_NX 4
+#define TESTA_NY 5
+
+#define USERBLOCK_SIZE ((hsize_t)512)
+
+/* Declarations for test_filespace_*() */
+#define FILENAME_LEN 1024 /* length of file name */
+#define DSETNAME "dset" /* Name of dataset */
+#define NELMTS(X) (sizeof(X) / sizeof(X[0])) /* # of elements */
+#define READ_OLD_BUFSIZE 1024 /* Buffer for holding file data */
+#define FILE5 "tfile5.h5" /* Test file */
+#define TEST_THRESHOLD10 10 /* Free space section threshold */
+#define FSP_SIZE_DEF 4096 /* File space page size default */
+#define FSP_SIZE512 512 /* File space page size */
+#define FSP_SIZE1G (1024 * 1024 * 1024) /* File space page size */
+
+/* Declaration for test_libver_macros2() */
+#define FILE6 "tfile6.h5" /* Test file */
+
+/* Declaration for test_get_obj_ids() */
+#define FILE7 "tfile7.h5" /* Test file */
+#define NGROUPS 2
+#define NDSETS 4
+
+/* Declaration for test_incr_filesize() */
+#define FILE8 "tfile8.h5" /* Test file */
+
+/* Files created under 1.6 branch and 1.8 branch--used in test_filespace_compatible() */
+const char *OLD_FILENAME[] = {
+ "filespace_1_6.h5", /* 1.6 HDF5 file */
+ "filespace_1_8.h5" /* 1.8 HDF5 file */
+};
+
+/* Files created in 1.10.0 release --used in test_filespace_1.10.0_compatible() */
+/* These files are copied from release 1.10.0 tools/h5format_convert/testfiles */
+const char *OLD_1_10_0_FILENAME[] = {
+ "h5fc_ext1_i.h5", /* 0 */
+ "h5fc_ext1_f.h5", /* 1 */
+ "h5fc_ext2_if.h5", /* 2 */
+ "h5fc_ext2_sf.h5", /* 3 */
+ "h5fc_ext3_isf.h5", /* 4 */
+ "h5fc_ext_none.h5" /* 5 */
+};
+
+/* Files used in test_filespace_round_compatible() */
+const char *FSPACE_FILENAMES[] = {
+ "fsm_aggr_nopersist.h5", /* H5F_FILE_SPACE_AGGR, not persisting free-space */
+ "fsm_aggr_persist.h5", /* H5F_FILE_SPACE_AGGR, persisting free-space */
+ "paged_nopersist.h5", /* H5F_FILE_SPACE_PAGE, not persisting free-space */
+ "paged_persist.h5", /* H5F_FILE_SPACE_PAGE, persisting free-space */
+ "aggr.h5", /* H5F_FILE_SPACE_AGGR */
+ "none.h5" /* H5F_FILE_SPACE_NONE */
+};
+
+const char *FILESPACE_NAME[] = {"tfilespace.h5", NULL};
+
+/* Declarations for test_libver_bounds_copy(): */
+/* SRC_FILE: source file created under 1.8 branch with latest format */
+/* DST_FILE: destination file for copying the dataset in SRC_FILE */
+/* DSET_DS1: the dataset created in SRC_FILE to be copied to DST_FILE */
+#define SRC_FILE "fill18.h5"
+#define DST_FILE "fill18_copy.h5"
+#define DSET_DS1 "DS1"
+
+#if 0
+/* Local test function declarations for version bounds */
+static void test_libver_bounds_low_high(const char *env_h5_drvr);
+static void test_libver_bounds_super(hid_t fapl, const char *env_h5_drvr);
+static void test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm);
+static void test_libver_bounds_super_open(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm);
+static void test_libver_bounds_obj(hid_t fapl);
+static void test_libver_bounds_dataset(hid_t fapl);
+static void test_libver_bounds_dataspace(hid_t fapl);
+static void test_libver_bounds_datatype(hid_t fapl);
+static void test_libver_bounds_datatype_check(hid_t fapl, hid_t tid);
+static void test_libver_bounds_attributes(hid_t fapl);
+#endif
+
+#define DSET_NULL "DSET_NULL"
+#define DSET "DSET"
+#define DSETA "DSETA"
+#define DSETB "DSETB"
+#define DSETC "DSETC"
+
+#if 0
+static void
+create_objects(hid_t, hid_t, hid_t *, hid_t *, hid_t *, hid_t *);
+static void
+test_obj_count_and_id(hid_t, hid_t, hid_t, hid_t, hid_t, hid_t);
+static void
+check_file_id(hid_t, hid_t);
+#endif
+
+#if 0
+/* Helper routine used by test_rw_noupdate() */
+static int cal_chksum(const char *file, uint32_t *chksum);
+
+static void test_rw_noupdate(void);
+#endif
+
+/****************************************************************
+**
+** test_file_create(): Low-level file creation I/O test routine.
+**
+****************************************************************/
+static void
+test_file_create(void)
+{
+ hid_t fid1 = H5I_INVALID_HID;
+ hid_t fid2 = H5I_INVALID_HID;
+ hid_t fid3 = H5I_INVALID_HID; /* HDF5 File IDs */
+ hid_t tmpl1, tmpl2; /* file creation templates */
+ hsize_t ublock; /* sizeof userblock */
+ size_t parm; /* file-creation parameters */
+ size_t parm2; /* file-creation parameters */
+ unsigned iparm;
+ unsigned iparm2;
+ herr_t ret; /*generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Low-Level File Creation I/O\n"));
+
+ /* First ensure the file does not exist */
+ H5E_BEGIN_TRY
+ {
+ H5Fdelete(FILE1, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ /* Try opening a non-existent file */
+ H5E_BEGIN_TRY
+ {
+ fid1 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(fid1, FAIL, "H5Fopen");
+
+ /* Test create with various sequences of H5F_ACC_EXCL and */
+ /* H5F_ACC_TRUNC flags */
+
+ /* Create with H5F_ACC_EXCL */
+ fid1 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+#ifndef NO_TRUNCATE_OPEN_FILE
+ /*
+ * try to create the same file with H5F_ACC_TRUNC. This should fail
+ * because fid1 is the same file and is currently open.
+ */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fcreate");
+#endif
+ /* Close all files */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fclose(fid2);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fclose"); /*file should not have been open */
+
+ /*
+ * Try again with H5F_ACC_EXCL. This should fail because the file already
+ * exists from the previous steps.
+ */
+ H5E_BEGIN_TRY
+ {
+ fid1 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(fid1, FAIL, "H5Fcreate");
+
+ /* Test create with H5F_ACC_TRUNC. This will truncate the existing file. */
+ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+#ifndef NO_TRUNCATE_OPEN_FILE
+ /*
+ * Try to truncate first file again. This should fail because fid1 is the
+ * same file and is currently open.
+ */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fcreate");
+#endif
+ /*
+ * Try with H5F_ACC_EXCL. This should fail too because the file already
+ * exists.
+ */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fcreate(FILE1, H5F_ACC_EXCL, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fcreate");
+
+ /* Get the file-creation template */
+ tmpl1 = H5Fget_create_plist(fid1);
+ CHECK(tmpl1, FAIL, "H5Fget_create_plist");
+
+ /* Get the file-creation parameters */
+ ret = H5Pget_userblock(tmpl1, &ublock);
+ CHECK(ret, FAIL, "H5Pget_userblock");
+ VERIFY(ublock, F1_USERBLOCK_SIZE, "H5Pget_userblock");
+
+ ret = H5Pget_sizes(tmpl1, &parm, &parm2);
+ CHECK(ret, FAIL, "H5Pget_sizes");
+ VERIFY(parm, F1_OFFSET_SIZE, "H5Pget_sizes");
+ VERIFY(parm2, F1_LENGTH_SIZE, "H5Pget_sizes");
+
+ ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2);
+ CHECK(ret, FAIL, "H5Pget_sym_k");
+ VERIFY(iparm, F1_SYM_INTERN_K, "H5Pget_sym_k");
+ VERIFY(iparm2, F1_SYM_LEAF_K, "H5Pget_sym_k");
+
+ /* Release file-creation template */
+ ret = H5Pclose(tmpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+
+#ifdef LATER
+ /* Double-check that the atom has been vaporized */
+ ret = H5Pclose(tmpl1);
+ VERIFY(ret, FAIL, "H5Pclose");
+#endif
+
+ if (h5_using_default_driver(NULL)) {
+
+ /* Create a new file with a non-standard file-creation template */
+ tmpl1 = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(tmpl1, FAIL, "H5Pcreate");
+
+ /* Try setting some bad userblock sizes */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE1);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_userblock");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE2);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_userblock");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE3);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_userblock");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE4);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_userblock");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE5);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_userblock");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE6);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_userblock");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_userblock(tmpl1, BAD_USERBLOCK_SIZE7);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_userblock");
+
+ /* Set the new file-creation parameters */
+ ret = H5Pset_userblock(tmpl1, F2_USERBLOCK_SIZE);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ ret = H5Pset_sizes(tmpl1, (size_t)F2_OFFSET_SIZE, (size_t)F2_LENGTH_SIZE);
+ CHECK(ret, FAIL, "H5Pset_sizes");
+
+ ret = H5Pset_sym_k(tmpl1, F2_SYM_INTERN_K, F2_SYM_LEAF_K);
+ CHECK(ret, FAIL, "H5Pset_sym_k");
+
+ /*
+ * Try to create second file, with non-standard file-creation template
+ * params.
+ */
+ fid2 = H5Fcreate(FILE2, H5F_ACC_TRUNC, tmpl1, H5P_DEFAULT);
+ CHECK(fid2, FAIL, "H5Fcreate");
+
+ /* Release file-creation template */
+ ret = H5Pclose(tmpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Make certain we can create a dataset properly in the file with the userblock */
+ {
+ hid_t dataset_id, dataspace_id; /* identifiers */
+ hsize_t dims[F2_RANK];
+ unsigned data[F2_DIM0][F2_DIM1];
+ unsigned i, j;
+
+ /* Create the data space for the dataset. */
+ dims[0] = F2_DIM0;
+ dims[1] = F2_DIM1;
+ dataspace_id = H5Screate_simple(F2_RANK, dims, NULL);
+ CHECK(dataspace_id, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dataset_id = H5Dcreate2(fid2, F2_DSET, H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dcreate2");
+
+ for (i = 0; i < F2_DIM0; i++)
+ for (j = 0; j < F2_DIM1; j++)
+ data[i][j] = i * 10 + j;
+
+ /* Write data to the new dataset */
+ ret = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* End access to the dataset and release resources used by it. */
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Terminate access to the data space. */
+ ret = H5Sclose(dataspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+ }
+
+ /* Get the file-creation template */
+ tmpl1 = H5Fget_create_plist(fid2);
+ CHECK(tmpl1, FAIL, "H5Fget_create_plist");
+
+ /* Get the file-creation parameters */
+ ret = H5Pget_userblock(tmpl1, &ublock);
+ CHECK(ret, FAIL, "H5Pget_userblock");
+ VERIFY(ublock, F2_USERBLOCK_SIZE, "H5Pget_userblock");
+
+ ret = H5Pget_sizes(tmpl1, &parm, &parm2);
+ CHECK(ret, FAIL, "H5Pget_sizes");
+ VERIFY(parm, F2_OFFSET_SIZE, "H5Pget_sizes");
+ VERIFY(parm2, F2_LENGTH_SIZE, "H5Pget_sizes");
+
+ ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2);
+ CHECK(ret, FAIL, "H5Pget_sym_k");
+ VERIFY(iparm, F2_SYM_INTERN_K, "H5Pget_sym_k");
+ VERIFY(iparm2, F2_SYM_LEAF_K, "H5Pget_sym_k");
+
+ /* Clone the file-creation template */
+ tmpl2 = H5Pcopy(tmpl1);
+ CHECK(tmpl2, FAIL, "H5Pcopy");
+
+ /* Release file-creation template */
+ ret = H5Pclose(tmpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Set the new file-creation parameter */
+ ret = H5Pset_userblock(tmpl2, F3_USERBLOCK_SIZE);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /*
+ * Try to create second file, with non-standard file-creation template
+ * params
+ */
+ fid3 = H5Fcreate(FILE3, H5F_ACC_TRUNC, tmpl2, H5P_DEFAULT);
+ CHECK(fid3, FAIL, "H5Fcreate");
+
+ /* Release file-creation template */
+ ret = H5Pclose(tmpl2);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Get the file-creation template */
+ tmpl1 = H5Fget_create_plist(fid3);
+ CHECK(tmpl1, FAIL, "H5Fget_create_plist");
+
+ /* Get the file-creation parameters */
+ ret = H5Pget_userblock(tmpl1, &ublock);
+ CHECK(ret, FAIL, "H5Pget_userblock");
+ VERIFY(ublock, F3_USERBLOCK_SIZE, "H5Pget_userblock");
+
+ ret = H5Pget_sizes(tmpl1, &parm, &parm2);
+ CHECK(ret, FAIL, "H5Pget_sizes");
+ VERIFY(parm, F3_OFFSET_SIZE, "H5Pget_sizes");
+ VERIFY(parm2, F3_LENGTH_SIZE, "H5Pget_sizes");
+
+ ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2);
+ CHECK(ret, FAIL, "H5Pget_sym_k");
+ VERIFY(iparm, F3_SYM_INTERN_K, "H5Pget_sym_k");
+ VERIFY(iparm2, F3_SYM_LEAF_K, "H5Pget_sym_k");
+
+ /* Release file-creation template */
+ ret = H5Pclose(tmpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close second file */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close third file */
+ ret = H5Fclose(fid3);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+
+ /* Close first file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_file_create() */
+
+/****************************************************************
+**
+** test_file_open(): Low-level file open I/O test routine.
+**
+****************************************************************/
+static void
+test_file_open(const char *env_h5_drvr)
+{
+ hid_t fid1; /*HDF5 File IDs */
+#if 0
+ hid_t fid2;
+ hid_t did; /*dataset ID */
+ hid_t fapl_id; /*file access property list ID */
+#endif
+ hid_t tmpl1; /*file creation templates */
+ hsize_t ublock; /*sizeof user block */
+ size_t parm; /*file-creation parameters */
+ size_t parm2; /*file-creation parameters */
+ unsigned iparm;
+ unsigned iparm2;
+ unsigned intent;
+ herr_t ret; /*generic return value */
+
+ /*
+ * Test single file open
+ */
+
+ /* Only run this test with sec2/default driver */
+ if (!h5_using_default_driver(env_h5_drvr))
+ return;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Low-Level File Opening I/O\n"));
+
+ /* Open first file */
+ fid1 = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Get the intent */
+ ret = H5Fget_intent(fid1, &intent);
+ CHECK(ret, FAIL, "H5Fget_intent");
+ VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent");
+
+ /* Get the file-creation template */
+ tmpl1 = H5Fget_create_plist(fid1);
+ CHECK(tmpl1, FAIL, "H5Fget_create_plist");
+
+ /* Get the file-creation parameters */
+ ret = H5Pget_userblock(tmpl1, &ublock);
+ CHECK(ret, FAIL, "H5Pget_userblock");
+ VERIFY(ublock, F2_USERBLOCK_SIZE, "H5Pget_userblock");
+
+ ret = H5Pget_sizes(tmpl1, &parm, &parm2);
+ CHECK(ret, FAIL, "H5Pget_sizes");
+ VERIFY(parm, F2_OFFSET_SIZE, "H5Pget_sizes");
+ VERIFY(parm2, F2_LENGTH_SIZE, "H5Pget_sizes");
+
+ ret = H5Pget_sym_k(tmpl1, &iparm, &iparm2);
+ CHECK(ret, FAIL, "H5Pget_sym_k");
+ VERIFY(iparm, F2_SYM_INTERN_K, "H5Pget_sym_k");
+ VERIFY(iparm2, F2_SYM_LEAF_K, "H5Pget_sym_k");
+
+ /* Release file-creation template */
+ ret = H5Pclose(tmpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close first file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*
+ * Test two file opens: one is opened H5F_ACC_RDONLY and H5F_CLOSE_WEAK.
+ * It's closed with an object left open. Then another is opened
+ * H5F_ACC_RDWR, which should fail.
+ */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 2 File Openings - SKIPPED for now due to no file close degree support\n"));
+#if 0
+ /* Create file access property list */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl_id, FAIL, "H5Pcreate");
+
+ /* Set file close mode to H5F_CLOSE_WEAK */
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* Open file for first time */
+ fid1 = H5Fopen(FILE2, H5F_ACC_RDONLY, fapl_id);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Check the intent */
+ ret = H5Fget_intent(fid1, &intent);
+ CHECK(ret, FAIL, "H5Fget_intent");
+ VERIFY(intent, H5F_ACC_RDONLY, "H5Fget_intent");
+
+ /* Open dataset */
+ did = H5Dopen2(fid1, F2_DSET, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Check that the intent works even if NULL is passed in */
+ ret = H5Fget_intent(fid1, NULL);
+ CHECK(ret, FAIL, "H5Fget_intent");
+
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open file for second time, which should fail. */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fopen(FILE2, H5F_ACC_RDWR, fapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fopen");
+
+ /* Check that the intent fails for an invalid ID */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fget_intent(fid1, &intent);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fget_intent");
+
+ /* Close dataset from first open */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Pclose(fapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+#endif
+} /* test_file_open() */
+
+/****************************************************************
+**
+** test_file_reopen(): File reopen test routine.
+**
+****************************************************************/
+static void
+test_file_reopen(void)
+{
+ hid_t fid = -1; /* file ID from initial open */
+ hid_t rfid = -1; /* file ID from reopen */
+ hid_t did = -1; /* dataset ID (both opens) */
+ hid_t sid = -1; /* dataspace ID for dataset creation */
+ hsize_t dims = 6; /* dataspace size */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing File Re-opening\n"));
+
+ /* Create file via first ID */
+ fid = H5Fcreate(REOPEN_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK_I(fid, "H5Fcreate");
+
+ /* Create a dataset in the file */
+ sid = H5Screate_simple(1, &dims, &dims);
+ CHECK_I(sid, "H5Screate_simple")
+ did = H5Dcreate2(fid, REOPEN_DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK_I(did, "H5Dcreate2");
+
+ /* Close dataset and dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Reopen the file with a different file ID */
+ rfid = H5Freopen(fid);
+ CHECK_I(rfid, "H5Freopen");
+
+ /* Reopen the dataset through the reopen file ID */
+ did = H5Dopen2(rfid, REOPEN_DSET, H5P_DEFAULT);
+ CHECK_I(did, "H5Dopen2");
+
+ /* Close and clean up */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(rfid);
+ CHECK(ret, FAIL, "H5Fclose");
+ H5Fdelete(REOPEN_FILE, H5P_DEFAULT);
+
+} /* test_file_reopen() */
+
+/****************************************************************
+**
+** test_file_close(): low-level file close test routine.
+** It mainly tests behavior with close degree.
+**
+*****************************************************************/
+static void
+test_file_close(void)
+{
+#if 0
+ hid_t fid1, fid2;
+ hid_t fapl_id, access_id;
+ hid_t dataset_id, group_id1, group_id2, group_id3;
+ H5F_close_degree_t fc_degree;
+ herr_t ret;
+#endif
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing File Closing with file close degrees - SKIPPED for now due to no file close degree "
+ "support\n"));
+#if 0
+ /* Test behavior while opening file multiple times with different
+ * file close degree value
+ */
+ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ ret = H5Pget_fclose_degree(fapl_id, &fc_degree);
+ VERIFY(fc_degree, H5F_CLOSE_STRONG, "H5Pget_fclose_degree");
+
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fopen");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should succeed */
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close second open */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Test behavior while opening file multiple times with different file
+ * close degree
+ */
+ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ ret = H5Pget_fclose_degree(fapl_id, &fc_degree);
+ VERIFY(fc_degree, H5F_CLOSE_WEAK, "H5Pget_fclose_degree");
+
+ /* should succeed */
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close second open */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Test behavior while opening file multiple times with file close
+ * degree STRONG */
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fopen");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_STRONG);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should succeed */
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Create a dataset and a group in each file open respectively */
+ create_objects(fid1, fid2, NULL, NULL, NULL, NULL);
+
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close second open */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Test behavior while opening file multiple times with file close
+ * degree SEMI */
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fopen");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should succeed */
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Create a dataset and a group in each file open respectively */
+ create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3);
+
+ /* Close first open, should fail since it is SEMI and objects are
+ * still open. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fclose(fid1);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fclose");
+
+ /* Close second open, should fail since it is SEMI and objects are
+ * still open. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fclose(fid2);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fclose");
+
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Gclose(group_id1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(group_id2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close second open, should fail since it is SEMI and one group ID is
+ * still open. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fclose(fid2);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fclose");
+
+ /* Same check with H5Idec_ref() (should fail also) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Idec_ref(fid2);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Idec_ref");
+
+ ret = H5Gclose(group_id3);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close second open again. Should succeed. */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Test behavior while opening file multiple times with file close
+ * degree WEAK */
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_WEAK);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fopen");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should succeed */
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Create a dataset and a group in each file open respectively */
+ create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3);
+
+ /* Create more new files and test object count and ID list functions */
+ test_obj_count_and_id(fid1, fid2, dataset_id, group_id1, group_id2, group_id3);
+
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close second open. File will be finally closed after all objects
+ * are closed. */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Gclose(group_id1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(group_id2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(group_id3);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Test behavior while opening file multiple times with file close
+ * degree DEFAULT */
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_SEMI);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Fopen");
+
+ ret = H5Pset_fclose_degree(fapl_id, H5F_CLOSE_DEFAULT);
+ CHECK(ret, FAIL, "H5Pset_fclose_degree");
+
+ /* should succeed */
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, fapl_id);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Create a dataset and a group in each file open respectively */
+ create_objects(fid1, fid2, &dataset_id, &group_id1, &group_id2, &group_id3);
+
+ access_id = H5Fget_access_plist(fid1);
+ CHECK(access_id, FAIL, "H5Fget_access_plist");
+
+ ret = H5Pget_fclose_degree(access_id, &fc_degree);
+ CHECK(ret, FAIL, "H5Pget_fclose_degree");
+
+ switch (fc_degree) {
+ case H5F_CLOSE_STRONG:
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ /* Close second open */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+ break;
+ case H5F_CLOSE_SEMI:
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Gclose(group_id1);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(group_id2);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(group_id3);
+ CHECK(ret, FAIL, "H5Gclose");
+ /* Close second open */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+ break;
+ case H5F_CLOSE_WEAK:
+ /* Close first open */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ /* Close second open */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Gclose(group_id1);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(group_id2);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(group_id3);
+ CHECK(ret, FAIL, "H5Gclose");
+ break;
+ case H5F_CLOSE_DEFAULT:
+ default:
+ CHECK(fc_degree, H5F_CLOSE_DEFAULT, "H5Pget_fclose_degree");
+ break;
+ }
+
+ /* Close file access property list */
+ ret = H5Pclose(fapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(access_id);
+ CHECK(ret, FAIL, "H5Pclose");
+#endif
+}
+
+/****************************************************************
+**
+** create_objects(): routine called by test_file_close to create
+** a dataset and a group in file.
+**
+****************************************************************/
+#if 0
+static void
+create_objects(hid_t fid1, hid_t fid2, hid_t *ret_did, hid_t *ret_gid1, hid_t *ret_gid2, hid_t *ret_gid3)
+{
+ ssize_t oid_count;
+ herr_t ret;
+
+ /* Check reference counts of file IDs and opened object IDs.
+ * The verification is hard-coded. If in any case, this testing
+ * is changed, remember to check this part and update the macros.
+ */
+ {
+ oid_count = H5Fget_obj_count(fid1, H5F_OBJ_ALL);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_2, "H5Fget_obj_count");
+
+ oid_count = H5Fget_obj_count(fid1, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count");
+
+ oid_count = H5Fget_obj_count(fid2, H5F_OBJ_ALL);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_2, "H5Fget_obj_count");
+
+ oid_count = H5Fget_obj_count(fid2, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count");
+ }
+
+ /* create a dataset in the first file open */
+ {
+ hid_t dataset_id, dataspace_id; /* identifiers */
+ hsize_t dims[F2_RANK];
+ unsigned data[F2_DIM0][F2_DIM1];
+ unsigned i, j;
+
+ /* Create the data space for the dataset. */
+ dims[0] = F2_DIM0;
+ dims[1] = F2_DIM1;
+ dataspace_id = H5Screate_simple(F2_RANK, dims, NULL);
+ CHECK(dataspace_id, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dataset_id =
+ H5Dcreate2(fid1, "/dset", H5T_NATIVE_UINT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dcreate2");
+
+ for (i = 0; i < F2_DIM0; i++)
+ for (j = 0; j < F2_DIM1; j++)
+ data[i][j] = i * 10 + j;
+
+ /* Write data to the new dataset */
+ ret = H5Dwrite(dataset_id, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ if (ret_did != NULL)
+ *ret_did = dataset_id;
+
+ /* Terminate access to the data space. */
+ ret = H5Sclose(dataspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+ }
+
+ /* Create a group in the second file open */
+ {
+ hid_t gid1, gid2, gid3;
+ gid1 = H5Gcreate2(fid2, "/group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
+ if (ret_gid1 != NULL)
+ *ret_gid1 = gid1;
+
+ gid2 = H5Gopen2(fid2, "/group", H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gopen2");
+ if (ret_gid2 != NULL)
+ *ret_gid2 = gid2;
+
+ gid3 = H5Gopen2(fid2, "/group", H5P_DEFAULT);
+ CHECK(gid3, FAIL, "H5Gopen2");
+ if (ret_gid3 != NULL)
+ *ret_gid3 = gid3;
+ }
+
+ /* Check reference counts of file IDs and opened object IDs.
+ * The verification is hard-coded. If in any case, this testing
+ * is changed, remember to check this part and update the macros.
+ */
+ {
+ oid_count = H5Fget_obj_count(fid1, H5F_OBJ_ALL);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_6, "H5Fget_obj_count");
+
+ oid_count = H5Fget_obj_count(fid1, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count");
+
+ oid_count = H5Fget_obj_count(fid2, H5F_OBJ_ALL);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_6, "H5Fget_obj_count");
+
+ oid_count = H5Fget_obj_count(fid2, H5F_OBJ_DATASET | H5F_OBJ_GROUP | H5F_OBJ_DATATYPE | H5F_OBJ_ATTR);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count");
+ }
+}
+#endif
+
+/****************************************************************
+**
+** test_get_obj_ids(): Test the bug and the fix for Jira 8528.
+** H5Fget_obj_ids overfilled the list of
+** object IDs by one. This is an enhancement
+** for test_obj_count_and_id().
+**
+****************************************************************/
+static void
+test_get_obj_ids(void)
+{
+ hid_t fid, gid[NGROUPS], dset[NDSETS];
+ hid_t filespace;
+ hsize_t file_dims[F2_RANK] = {F2_DIM0, F2_DIM1};
+ ssize_t oid_count, ret_count;
+ hid_t *oid_list = NULL;
+ herr_t ret;
+ int i, m, n;
+ ssize_t oid_list_size = NDSETS;
+ char gname[64], dname[64];
+
+ MESSAGE(5, ("Testing retrieval of object IDs\n"));
+
+ /* Create a new file */
+ fid = H5Fcreate(FILE7, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ filespace = H5Screate_simple(F2_RANK, file_dims, NULL);
+ CHECK(filespace, FAIL, "H5Screate_simple");
+
+ /* creates NGROUPS groups under the root group */
+ for (m = 0; m < NGROUPS; m++) {
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid[m] = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid[m], FAIL, "H5Gcreate2");
+ }
+
+ /* create NDSETS datasets under the root group */
+ for (n = 0; n < NDSETS; n++) {
+ HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ dset[n] = H5Dcreate2(fid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset[n], FAIL, "H5Dcreate2");
+ }
+
+ /* The number of opened objects should be NGROUPS + NDSETS + 1. One is opened file. */
+ oid_count = H5Fget_obj_count(fid, H5F_OBJ_ALL);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, (NGROUPS + NDSETS + 1), "H5Fget_obj_count");
+
+ oid_list = (hid_t *)HDcalloc((size_t)oid_list_size, sizeof(hid_t));
+ CHECK_PTR(oid_list, "HDcalloc");
+
+ /* Call the public function H5F_get_obj_ids to use H5F__get_objects. User reported having problem here.
+ * that the returned size (ret_count) from H5Fget_obj_ids is one greater than the size passed in
+ * (oid_list_size) */
+ ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list);
+ CHECK(ret_count, FAIL, "H5Fget_obj_ids");
+ VERIFY(ret_count, oid_list_size, "H5Fget_obj_count");
+
+ /* Close all object IDs on the list except the file ID. The first ID is supposed to be file ID according
+ * to the library design */
+ for (i = 0; i < ret_count; i++) {
+ if (fid != oid_list[i]) {
+ ret = H5Oclose(oid_list[i]);
+ CHECK(ret, FAIL, "H5Oclose");
+ }
+ }
+
+ /* The number of opened objects should be NGROUPS + 1 + 1. The first one is opened file. The second one
+ * is the dataset ID left open from the previous around of H5Fget_obj_ids */
+ oid_count = H5Fget_obj_count(fid, H5F_OBJ_ALL);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, NGROUPS + 2, "H5Fget_obj_count");
+
+ /* Get the IDs of the left opened objects */
+ ret_count = H5Fget_obj_ids(fid, H5F_OBJ_ALL, (size_t)oid_list_size, oid_list);
+ CHECK(ret_count, FAIL, "H5Fget_obj_ids");
+ VERIFY(ret_count, oid_list_size, "H5Fget_obj_count");
+
+ /* Close all object IDs on the list except the file ID. The first ID is still the file ID */
+ for (i = 0; i < ret_count; i++) {
+ if (fid != oid_list[i]) {
+ ret = H5Oclose(oid_list[i]);
+ CHECK(ret, FAIL, "H5Oclose");
+ }
+ }
+
+ H5Sclose(filespace);
+ H5Fclose(fid);
+
+ HDfree(oid_list);
+
+ /* Reopen the file to check whether H5Fget_obj_count and H5Fget_obj_ids still works
+ * when the file is closed first */
+ fid = H5Fopen(FILE7, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open NDSETS datasets under the root group */
+ for (n = 0; n < NDSETS; n++) {
+ HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ dset[n] = H5Dopen2(fid, dname, H5P_DEFAULT);
+ CHECK(dset[n], FAIL, "H5Dcreate2");
+ }
+
+ /* Close the file first */
+ H5Fclose(fid);
+#ifndef WRONG_DATATYPE_OBJ_COUNT
+ /* Get the number of all opened objects */
+ oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, NDSETS, "H5Fget_obj_count");
+
+ oid_list = (hid_t *)HDcalloc((size_t)oid_count, sizeof(hid_t));
+ CHECK_PTR(oid_list, "HDcalloc");
+
+ /* Get the list of all opened objects */
+ ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list);
+ CHECK(ret_count, FAIL, "H5Fget_obj_ids");
+ VERIFY(ret_count, NDSETS, "H5Fget_obj_ids");
+
+ H5E_BEGIN_TRY
+ {
+ /* Close all open objects with H5Oclose */
+ for (n = 0; n < oid_count; n++)
+ H5Oclose(oid_list[n]);
+ }
+ H5E_END_TRY;
+
+ HDfree(oid_list);
+#endif
+}
+
+/****************************************************************
+**
+** test_get_file_id(): Test H5Iget_file_id()
+**
+*****************************************************************/
+static void
+test_get_file_id(void)
+{
+#if 0
+ hid_t fid, fid2, fid3;
+ hid_t datatype_id, dataset_id, dataspace_id, group_id, attr_id;
+ hid_t plist;
+ hsize_t dims[F2_RANK];
+ unsigned intent;
+ herr_t ret;
+#endif
+
+ MESSAGE(5, ("Testing H5Iget_file_id - SKIPPED for now due to no H5Iget_file_id support\n"));
+#if 0
+ /* Create a file */
+ fid = H5Fcreate(FILE4, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Check the intent */
+ ret = H5Fget_intent(fid, &intent);
+ CHECK(ret, FAIL, "H5Fget_intent");
+ VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent");
+
+ /* Test H5Iget_file_id() */
+ check_file_id(fid, fid);
+
+ /* Create a group in the file. Make a duplicated file ID from the group.
+ * And close this duplicated ID
+ */
+ group_id = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gcreate2");
+
+ /* Test H5Iget_file_id() */
+ check_file_id(fid, group_id);
+
+ /* Close the file and get file ID from the group ID */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Test H5Iget_file_id() */
+ check_file_id((hid_t)-1, group_id);
+
+ ret = H5Gclose(group_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open the file again. Test H5Iget_file_id() */
+ fid = H5Fopen(FILE4, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ group_id = H5Gopen2(fid, GRP_NAME, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gopen2");
+
+ /* Test H5Iget_file_id() */
+ check_file_id(fid, group_id);
+
+ /* Open the file for second time. Test H5Iget_file_id() */
+ fid3 = H5Freopen(fid);
+ CHECK(fid3, FAIL, "H5Freopen");
+
+ /* Test H5Iget_file_id() */
+ check_file_id(fid3, fid3);
+
+ ret = H5Fclose(fid3);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create a dataset in the group. Make a duplicated file ID from the
+ * dataset. And close this duplicated ID.
+ */
+ dims[0] = F2_DIM0;
+ dims[1] = F2_DIM1;
+ dataspace_id = H5Screate_simple(F2_RANK, dims, NULL);
+ CHECK(dataspace_id, FAIL, "H5Screate_simple");
+
+ dataset_id =
+ H5Dcreate2(group_id, DSET_NAME, H5T_NATIVE_INT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dcreate2");
+
+ /* Test H5Iget_file_id() */
+ check_file_id(fid, dataset_id);
+
+ /* Create an attribute for the dataset. Make a duplicated file ID from
+ * this attribute. And close it.
+ */
+ attr_id = H5Acreate2(dataset_id, ATTR_NAME, H5T_NATIVE_INT, dataspace_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Acreate2");
+
+ /* Test H5Iget_file_id() */
+ check_file_id(fid, attr_id);
+
+ /* Create a named datatype. Make a duplicated file ID from
+ * this attribute. And close it.
+ */
+ datatype_id = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tcopy");
+
+ ret = H5Tcommit2(fid, TYPE_NAME, datatype_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Test H5Iget_file_id() */
+ check_file_id(fid, datatype_id);
+
+ /* Create a property list and try to get file ID from it.
+ * Supposed to fail.
+ */
+ plist = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(plist, FAIL, "H5Pcreate");
+
+ H5E_BEGIN_TRY
+ {
+ fid2 = H5Iget_file_id(plist);
+ }
+ H5E_END_TRY;
+ VERIFY(fid2, FAIL, "H5Iget_file_id");
+
+ /* Close objects */
+ ret = H5Pclose(plist);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Tclose(datatype_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Sclose(dataspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Gclose(group_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+#endif
+}
+
+/****************************************************************
+**
+** check_file_id(): Internal function of test_get_file_id()
+**
+*****************************************************************/
+#if 0
+static void
+check_file_id(hid_t fid, hid_t object_id)
+{
+ hid_t new_fid;
+ herr_t ret;
+
+ /* Return a duplicated file ID even not expecting user to do it.
+ * And close this duplicated ID
+ */
+ new_fid = H5Iget_file_id(object_id);
+
+ if (fid >= 0)
+ VERIFY(new_fid, fid, "H5Iget_file_id");
+ else
+ CHECK(new_fid, FAIL, "H5Iget_file_id");
+
+ ret = H5Fclose(new_fid);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+#endif
+
+/****************************************************************
+**
+** test_obj_count_and_id(): test object count and ID list functions.
+**
+****************************************************************/
+#if 0
+static void
+test_obj_count_and_id(hid_t fid1, hid_t fid2, hid_t did, hid_t gid1, hid_t gid2, hid_t gid3)
+{
+ hid_t fid3, fid4;
+ ssize_t oid_count, ret_count;
+ herr_t ret;
+
+ /* Create two new files */
+ fid3 = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid3, FAIL, "H5Fcreate");
+ fid4 = H5Fcreate(FILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid4, FAIL, "H5Fcreate");
+
+ /* test object count of all files IDs open */
+ oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_FILE);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_4, "H5Fget_obj_count");
+
+ /* test object count of all datasets open */
+ oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATASET);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_1, "H5Fget_obj_count");
+
+ /* test object count of all groups open */
+ oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_GROUP);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_3, "H5Fget_obj_count");
+
+ /* test object count of all named datatypes open */
+ oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_DATATYPE);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count");
+
+ /* test object count of all attributes open */
+ oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ATTR);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_0, "H5Fget_obj_count");
+
+ /* test object count of all objects currently open */
+ oid_count = H5Fget_obj_count((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL);
+ CHECK(oid_count, FAIL, "H5Fget_obj_count");
+ VERIFY(oid_count, OBJ_ID_COUNT_8, "H5Fget_obj_count");
+
+ if (oid_count > 0) {
+ hid_t *oid_list;
+
+ oid_list = (hid_t *)HDcalloc((size_t)oid_count, sizeof(hid_t));
+ if (oid_list != NULL) {
+ int i;
+
+ ret_count = H5Fget_obj_ids((hid_t)H5F_OBJ_ALL, H5F_OBJ_ALL, (size_t)oid_count, oid_list);
+ CHECK(ret_count, FAIL, "H5Fget_obj_ids");
+
+ for (i = 0; i < oid_count; i++) {
+ H5I_type_t id_type;
+
+ id_type = H5Iget_type(oid_list[i]);
+ switch (id_type) {
+ case H5I_FILE:
+ if (oid_list[i] != fid1 && oid_list[i] != fid2 && oid_list[i] != fid3 &&
+ oid_list[i] != fid4)
+ ERROR("H5Fget_obj_ids");
+ break;
+
+ case H5I_GROUP:
+ if (oid_list[i] != gid1 && oid_list[i] != gid2 && oid_list[i] != gid3)
+ ERROR("H5Fget_obj_ids");
+ break;
+
+ case H5I_DATASET:
+ VERIFY(oid_list[i], did, "H5Fget_obj_ids");
+ break;
+
+ case H5I_MAP:
+ /* TODO: Not supported in native VOL connector yet */
+
+ case H5I_UNINIT:
+ case H5I_BADID:
+ case H5I_DATATYPE:
+ case H5I_DATASPACE:
+ case H5I_ATTR:
+ case H5I_VFL:
+ case H5I_VOL:
+ case H5I_GENPROP_CLS:
+ case H5I_GENPROP_LST:
+ case H5I_ERROR_CLASS:
+ case H5I_ERROR_MSG:
+ case H5I_ERROR_STACK:
+ case H5I_SPACE_SEL_ITER:
+ case H5I_EVENTSET:
+ case H5I_NTYPES:
+ default:
+ ERROR("H5Fget_obj_ids");
+ } /* end switch */
+ } /* end for */
+
+ HDfree(oid_list);
+ } /* end if */
+ } /* end if */
+
+ /* close the two new files */
+ ret = H5Fclose(fid3);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(fid4);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+#endif
+
+/****************************************************************
+**
+** test_file_perm(): low-level file test routine.
+** This test verifies that a file can be opened for both
+** read-only and read-write access and things will be handled
+** appropriately.
+**
+*****************************************************************/
+static void
+test_file_perm(void)
+{
+ hid_t file; /* File opened with read-write permission */
+ hid_t filero; /* Same file opened with read-only permission */
+ hid_t dspace; /* Dataspace ID */
+ hid_t dset; /* Dataset ID */
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Low-Level File Permissions\n"));
+
+ dspace = H5Screate(H5S_SCALAR);
+ CHECK(dspace, FAIL, "H5Screate");
+
+ /* Create the file (with read-write permission) */
+ file = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create a dataset with the read-write file handle */
+ dset = H5Dcreate2(file, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open the file (with read-only permission) */
+ filero = H5Fopen(FILE2, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(filero, FAIL, "H5Fopen");
+
+ /* Create a dataset with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ dset = H5Dcreate2(filero, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(dset, FAIL, "H5Dcreate2");
+ if (dset != FAIL) {
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end if */
+
+ ret = H5Fclose(filero);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* end test_file_perm() */
+
+/****************************************************************
+**
+** test_file_perm2(): low-level file test routine.
+** This test verifies that no object can be created in a
+** file that is opened for read-only.
+**
+*****************************************************************/
+static void
+test_file_perm2(void)
+{
+ hid_t file; /* File opened with read-write permission */
+ hid_t filero; /* Same file opened with read-only permission */
+ hid_t dspace; /* Dataspace ID */
+ hid_t group; /* Group ID */
+ hid_t dset; /* Dataset ID */
+ hid_t type; /* Datatype ID */
+ hid_t attr; /* Attribute ID */
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Low-Level File Permissions again\n"));
+
+ dspace = H5Screate(H5S_SCALAR);
+ CHECK(dspace, FAIL, "H5Screate");
+
+ /* Create the file (with read-write permission) */
+ file = H5Fcreate(FILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open the file (with read-only permission) */
+ filero = H5Fopen(FILE2, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(filero, FAIL, "H5Fopen");
+
+ /* Create a group with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ group = H5Gcreate2(filero, "MY_GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(group, FAIL, "H5Gcreate2");
+
+ /* Create a dataset with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ dset = H5Dcreate2(filero, F2_DSET, H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(dset, FAIL, "H5Dcreate2");
+
+ /* Create an attribute with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ attr = H5Acreate2(filero, "MY_ATTR", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(attr, FAIL, "H5Acreate2");
+
+ type = H5Tcopy(H5T_NATIVE_SHORT);
+ CHECK(type, FAIL, "H5Tcopy");
+
+ /* Commit a datatype with the read-only file handle (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Tcommit2(filero, "MY_DTYPE", type, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Tcommit2");
+
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Fclose(filero);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* end test_file_perm2() */
+
+/****************************************************************
+**
+** test_file_is_accessible(): low-level file test routine.
+** Clone of test_file_ishdf5 but uses the newer VOL-enabled
+** H5Fis_accessible() API call.
+**
+*****************************************************************/
+#define FILE_IS_ACCESSIBLE "tfile_is_accessible"
+#define FILE_IS_ACCESSIBLE_NON_HDF5 "tfile_is_accessible_non_hdf5"
+static void
+test_file_is_accessible(const char *env_h5_drvr)
+{
+ hid_t fid = H5I_INVALID_HID; /* File opened with read-write permission */
+ hid_t fcpl_id = H5I_INVALID_HID; /* File creation property list */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access property list */
+#if 0
+ int fd; /* POSIX file descriptor */
+#endif
+ char filename[FILENAME_LEN]; /* Filename to use */
+ char non_hdf5_filename[FILENAME_LEN]; /* Base name of non-hdf5 file */
+ char non_hdf5_sb_filename[FILENAME_LEN]; /* Name of non-hdf5 superblock file */
+#if 0
+ ssize_t nbytes; /* Number of bytes written */
+ unsigned u; /* Local index variable */
+ unsigned char buf[1024]; /* Buffer of data to write */
+#endif
+ htri_t is_hdf5; /* Whether a file is an HDF5 file */
+#if 0
+ int posix_ret; /* Return value from POSIX calls */
+#endif
+ hbool_t driver_is_default_compatible;
+ herr_t ret; /* Return value from HDF5 calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Detection of HDF5 Files\n"));
+
+ /* Get FAPL */
+ fapl_id = h5_fileaccess();
+ CHECK(fapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ if (h5_driver_is_default_vfd_compatible(fapl_id, &driver_is_default_compatible) < 0) {
+ TestErrPrintf("Can't check if VFD is compatible with default VFD");
+ return;
+ }
+
+ /* Fix up filenames */
+ h5_fixname(FILE_IS_ACCESSIBLE, fapl_id, filename, sizeof(filename));
+ h5_fixname(FILE_IS_ACCESSIBLE_NON_HDF5, fapl_id, non_hdf5_filename, sizeof(non_hdf5_filename));
+ h5_fixname_superblock(FILE_IS_ACCESSIBLE_NON_HDF5, fapl_id, non_hdf5_sb_filename,
+ sizeof(non_hdf5_sb_filename));
+
+ /****************/
+ /* Normal usage */
+ /****************/
+
+ /* Create a file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ VERIFY(is_hdf5, TRUE, "H5Fis_accessible");
+
+ /*****************************************/
+ /* Newly created file that is still open */
+ /*****************************************/
+
+ /* On Windows, file locking is mandatory so this check ensures that
+ * H5Fis_accessible() works on files that have an exclusive lock.
+ * Previous versions of this API call created an additional file handle
+ * and attempted to read through it, which will not work when locks
+ * are enforced by the OS.
+ */
+
+ /* Create a file and hold it open */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ VERIFY(is_hdf5, TRUE, "H5Fis_accessible");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*******************************/
+ /* Non-default user block size */
+ /*******************************/
+
+ /* This test is not currently working for the family VFD.
+ * There are failures when creating files with userblocks.
+ */
+ if (0 != HDstrcmp(env_h5_drvr, "family")) {
+ /* Create a file creation property list with a non-default user block size */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ ret = H5Pset_userblock(fcpl_id, (hsize_t)2048);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file with non-default user block */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Release file-creation property list */
+ ret = H5Pclose(fcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ VERIFY(is_hdf5, TRUE, "H5Fis_accessible");
+ } /* end if */
+#if 0
+ if (driver_is_default_compatible) {
+ /***********************/
+ /* EMPTY non-HDF5 file */
+ /***********************/
+
+ /* Create non-HDF5 file and check it */
+ fd = HDopen(non_hdf5_sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW);
+ CHECK(fd, (-1), "HDopen");
+
+ /* Close the file */
+ posix_ret = HDclose(fd);
+ CHECK(posix_ret, (-1), "HDclose");
+
+ /* Verify that the file is NOT an HDF5 file using the base filename */
+ is_hdf5 = H5Fis_accessible(non_hdf5_filename, fapl_id);
+ VERIFY(is_hdf5, FALSE, "H5Fis_accessible (empty non-HDF5 file)");
+
+ /***************************/
+ /* Non-empty non-HDF5 file */
+ /***************************/
+
+ /* Create non-HDF5 file and check it */
+ fd = HDopen(non_hdf5_sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW);
+ CHECK(fd, (-1), "HDopen");
+
+ /* Initialize information to write */
+ for (u = 0; u < 1024; u++)
+ buf[u] = (unsigned char)u;
+
+ /* Write some information */
+ nbytes = HDwrite(fd, buf, (size_t)1024);
+ VERIFY(nbytes, 1024, "HDwrite");
+
+ /* Close the file */
+ posix_ret = HDclose(fd);
+ CHECK(posix_ret, (-1), "HDclose");
+
+ /* Verify that the file is not an HDF5 file */
+ is_hdf5 = H5Fis_accessible(non_hdf5_filename, fapl_id);
+ VERIFY(is_hdf5, FALSE, "H5Fis_accessible (non-HDF5 file)");
+ }
+
+ /* Clean up files */
+ h5_delete_test_file(filename, fapl_id);
+ h5_delete_test_file(non_hdf5_filename, fapl_id);
+#endif
+ H5Fdelete(filename, fapl_id);
+
+ /* Close property list */
+ ret = H5Pclose(fapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_file_is_accessible() */
+
+/****************************************************************
+**
+** test_file_ishdf5(): low-level file test routine.
+** This test checks whether the H5Fis_hdf5() routine is working
+** correctly in various situations.
+**
+*****************************************************************/
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+static void
+test_file_ishdf5(const char *env_h5_drvr)
+{
+ hid_t fid = H5I_INVALID_HID; /* File opened with read-write permission */
+ hid_t fcpl_id = H5I_INVALID_HID; /* File creation property list */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access property list */
+ int fd; /* POSIX file descriptor */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ char sb_filename[FILENAME_LEN]; /* Name of file w/ superblock */
+ ssize_t nbytes; /* Number of bytes written */
+ unsigned u; /* Local index variable */
+ unsigned char buf[1024]; /* Buffer of data to write */
+ htri_t is_hdf5; /* Whether a file is an HDF5 file */
+ int posix_ret; /* Return value from POSIX calls */
+ herr_t ret; /* Return value from HDF5 calls */
+
+ if (!h5_using_default_driver(env_h5_drvr))
+ return;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Detection of HDF5 Files (using deprecated H5Fis_hdf5() call)\n"));
+
+ /* Get FAPL */
+ fapl_id = h5_fileaccess();
+ CHECK(fapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Fix up filenames
+ * For VFDs that create multiple files, we also need the name
+ * of the file with the superblock. With single-file VFDs, this
+ * will be equal to the one from h5_fixname().
+ */
+ h5_fixname(FILE_IS_ACCESSIBLE, fapl_id, filename, sizeof(filename));
+ h5_fixname_superblock(FILE_IS_ACCESSIBLE, fapl_id, sb_filename, sizeof(filename));
+
+ /****************/
+ /* Normal usage */
+ /****************/
+
+ /* Create a file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_hdf5(sb_filename);
+ VERIFY(is_hdf5, TRUE, "H5Fis_hdf5");
+
+ /*******************************/
+ /* Non-default user block size */
+ /*******************************/
+
+ /* Create a file creation property list with a non-default user block size */
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ ret = H5Pset_userblock(fcpl_id, (hsize_t)2048);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file with non-default user block */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Release file creation property list */
+ ret = H5Pclose(fcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_hdf5(sb_filename);
+ VERIFY(is_hdf5, TRUE, "H5Fis_hdf5");
+
+ /***************************/
+ /* Non-empty non-HDF5 file */
+ /***************************/
+
+ /* Create non-HDF5 file. Use the calculated superblock
+ * filename to avoid the format strings that will make
+ * open(2) sad.
+ */
+ fd = HDopen(sb_filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW);
+ CHECK(fd, (-1), "HDopen");
+
+ /* Initialize information to write */
+ for (u = 0; u < 1024; u++)
+ buf[u] = (unsigned char)u;
+
+ /* Write some information */
+ nbytes = HDwrite(fd, buf, (size_t)1024);
+ VERIFY(nbytes, 1024, "HDwrite");
+
+ /* Close the file */
+ posix_ret = HDclose(fd);
+ CHECK(posix_ret, (-1), "HDclose");
+
+ /* Verify that the file is not an HDF5 file */
+ is_hdf5 = H5Fis_hdf5(sb_filename);
+ VERIFY(is_hdf5, FALSE, "H5Fis_hdf5");
+
+ /* Clean up files */
+#if 0
+ h5_delete_test_file(filename, fapl_id);
+#endif
+ H5Fdelete(filename, fapl_id);
+
+ /* Close property list */
+ ret = H5Pclose(fapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_file_ishdf5() */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+
+/****************************************************************
+**
+** test_file_delete(): tests H5Fdelete for all VFDs
+**
+*****************************************************************/
+#define FILE_DELETE "test_file_delete.h5"
+#define FILE_DELETE_NOT_HDF5 "test_file_delete_not_hdf5"
+static void
+test_file_delete(hid_t fapl_id)
+{
+ hid_t fid = H5I_INVALID_HID; /* File to be deleted */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ htri_t is_hdf5; /* Whether a file is an HDF5 file */
+#if 0
+ int fd; /* POSIX file descriptor */
+ int iret;
+#endif
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Deletion of HDF5 Files\n"));
+
+ /*************/
+ /* HDF5 FILE */
+ /*************/
+
+ /* Get fapl-dependent filename */
+ h5_fixname(FILE_DELETE, fapl_id, filename, sizeof(filename));
+
+ /* Create a file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ VERIFY(ret, SUCCEED, "H5Fclose");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ VERIFY(is_hdf5, TRUE, "H5Fis_accessible");
+
+ /* Delete the file */
+ ret = H5Fdelete(filename, fapl_id);
+ VERIFY(ret, SUCCEED, "H5Fdelete");
+
+ /* Verify that the file is NO LONGER an HDF5 file */
+ /* This should fail since there is no file */
+ H5E_BEGIN_TRY
+ {
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(is_hdf5, FAIL, "H5Fis_accessible");
+
+#if 0
+ /* Just in case deletion fails - silent on errors */
+ h5_delete_test_file(FILE_DELETE, fapl_id);
+
+ /*****************/
+ /* NON-HDF5 FILE */
+ /*****************/
+
+ /* Get fapl-dependent filename */
+ h5_fixname(FILE_DELETE_NOT_HDF5, fapl_id, filename, sizeof(filename));
+
+ /* Create a non-HDF5 file */
+ fd = HDopen(filename, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW);
+ CHECK_I(fd, "HDopen");
+
+ /* Close the file */
+ ret = HDclose(fd);
+ VERIFY(ret, 0, "HDclose");
+
+ /* Verify that the file is not an HDF5 file */
+ /* Note that you can get a FAIL result when h5_fixname()
+ * perturbs the filename as a file with that exact name
+ * may not have been created since we created it with
+ * open(2) and not the library.
+ */
+ H5E_BEGIN_TRY
+ {
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ }
+ H5E_END_TRY;
+ CHECK(is_hdf5, TRUE, "H5Fis_accessible");
+
+ /* Try to delete it (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fdelete(filename, fapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fdelete");
+
+ /* Delete the file */
+ iret = H5Fdelete(filename, H5P_DEFAULT);
+ VERIFY(iret, 0, "H5Fdelete");
+#endif
+} /* end test_file_delete() */
+
+/****************************************************************
+**
+** test_file_open_dot(): low-level file test routine.
+** This test checks whether opening objects with "." for a name
+** works correctly in various situations.
+**
+*****************************************************************/
+static void
+test_file_open_dot(void)
+{
+ hid_t fid; /* File ID */
+ hid_t gid, gid2; /* Group IDs */
+ hid_t did; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid, tid2; /* Datatype IDs */
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing opening objects with \".\" for a name\n"));
+
+ /* Create a new HDF5 file to work with */
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a group in the HDF5 file */
+ gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create a dataspace for creating datasets */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a dataset with no name using the file ID */
+ H5E_BEGIN_TRY
+ {
+ did = H5Dcreate2(fid, ".", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(did, FAIL, "H5Dcreate2");
+
+ /* Create a dataset with no name using the group ID */
+ H5E_BEGIN_TRY
+ {
+ did = H5Dcreate2(gid, ".", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(did, FAIL, "H5Dcreate2");
+
+ /* Open a dataset with no name using the file ID */
+ H5E_BEGIN_TRY
+ {
+ did = H5Dopen2(fid, ".", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(did, FAIL, "H5Dopen2");
+
+ /* Open a dataset with no name using the group ID */
+ H5E_BEGIN_TRY
+ {
+ did = H5Dopen2(gid, ".", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(did, FAIL, "H5Dopen2");
+
+ /* Make a copy of a datatype to use for creating a named datatype */
+ tid = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(tid, FAIL, "H5Tcopy");
+
+ /* Create a named datatype with no name using the file ID */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Tcommit2(fid, ".", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Tcommit2");
+
+ /* Create a named datatype with no name using the group ID */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Tcommit2(gid, ".", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Tcommit2");
+
+ /* Open a named datatype with no name using the file ID */
+ H5E_BEGIN_TRY
+ {
+ tid2 = H5Topen2(fid, ".", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tid2, FAIL, "H5Topen2");
+
+ /* Open a named datatype with no name using the group ID */
+ H5E_BEGIN_TRY
+ {
+ tid2 = H5Topen2(gid, ".", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tid2, FAIL, "H5Topen2");
+
+ /* Create a group with no name using the file ID */
+ H5E_BEGIN_TRY
+ {
+ gid2 = H5Gcreate2(fid, ".", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(gid2, FAIL, "H5Gcreate2");
+
+ /* Create a group with no name using the group ID */
+ H5E_BEGIN_TRY
+ {
+ gid2 = H5Gcreate2(gid, ".", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(gid2, FAIL, "H5Gcreate2");
+
+ /* Open a group with no name using the file ID (should open the root group) */
+ gid2 = H5Gopen2(fid, ".", H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gopen2");
+
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open a group with no name using the group ID (should open the group again) */
+ gid2 = H5Gopen2(gid, ".", H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gopen2");
+
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close everything */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_file_open_dot() */
+
+/****************************************************************
+**
+** test_file_open_overlap(): low-level file test routine.
+** This test checks whether opening files in an overlapping way
+** (as opposed to a nested manner) works correctly.
+**
+*****************************************************************/
+static void
+test_file_open_overlap(void)
+{
+ hid_t fid1, fid2;
+ hid_t did1, did2;
+ hid_t gid;
+ hid_t sid;
+ ssize_t nobjs; /* # of open objects */
+ unsigned intent;
+#if 0
+ unsigned long fileno1, fileno2; /* File number */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing opening overlapping file opens\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Open file also */
+ fid2 = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Check the intent */
+ ret = H5Fget_intent(fid1, &intent);
+ CHECK(ret, FAIL, "H5Fget_intent");
+ VERIFY(intent, H5F_ACC_RDWR, "H5Fget_intent");
+#if 0
+ /* Check the file numbers */
+ fileno1 = 0;
+ ret = H5Fget_fileno(fid1, &fileno1);
+ CHECK(ret, FAIL, "H5Fget_fileno");
+ fileno2 = 0;
+ ret = H5Fget_fileno(fid2, &fileno2);
+ CHECK(ret, FAIL, "H5Fget_fileno");
+ VERIFY(fileno1, fileno2, "H5Fget_fileno");
+
+ /* Check that a file number pointer of NULL is ignored */
+ ret = H5Fget_fileno(fid1, NULL);
+ CHECK(ret, FAIL, "H5Fget_fileno");
+#endif
+
+ /* Create a group in file */
+ gid = H5Gcreate2(fid1, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataset in group w/first file ID */
+ did1 = H5Dcreate2(gid, DSET1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dcreate2");
+#ifndef WRONG_DATATYPE_OBJ_COUNT
+ /* Check number of objects opened in first file */
+ nobjs = H5Fget_obj_count(fid1, H5F_OBJ_LOCAL | H5F_OBJ_ALL);
+ VERIFY(nobjs, 3, "H5Fget_obj_count"); /* 3 == file, dataset & group */
+#endif
+ /* Close dataset */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close first file ID */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create dataset with second file ID */
+ did2 = H5Dcreate2(fid2, DSET2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did2, FAIL, "H5Dcreate2");
+
+ /* Check number of objects opened in first file */
+ nobjs = H5Fget_obj_count(fid2, H5F_OBJ_ALL);
+ VERIFY(nobjs, 2, "H5Fget_obj_count"); /* 3 == file & dataset */
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close second dataset */
+ ret = H5Dclose(did2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close second file */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_file_open_overlap() */
+
+/****************************************************************
+**
+** test_file_getname(): low-level file test routine.
+** This test checks whether H5Fget_name works correctly.
+**
+*****************************************************************/
+static void
+test_file_getname(void)
+{
+ /* Compound datatype */
+ typedef struct s1_t {
+ unsigned int a;
+ float b;
+ } s1_t;
+
+ hid_t file_id;
+ hid_t group_id;
+ hid_t dataset_id;
+ hid_t space_id;
+ hid_t type_id;
+ hid_t attr_id;
+ hsize_t dims[TESTA_RANK] = {TESTA_NX, TESTA_NY};
+ char name[TESTA_NAME_BUF_SIZE];
+ ssize_t name_len;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Fget_name() functionality\n"));
+
+ /* Create a new file_id using default properties. */
+ file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Get and verify file name */
+ name_len = H5Fget_name(file_id, name, (size_t)TESTA_NAME_BUF_SIZE);
+ CHECK(name_len, FAIL, "H5Fget_name");
+ VERIFY_STR(name, FILE1, "H5Fget_name");
+
+ /* Create a group in the root group */
+ group_id = H5Gcreate2(file_id, TESTA_GROUPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gcreate2");
+
+ /* Get and verify file name */
+ name_len = H5Fget_name(group_id, name, (size_t)TESTA_NAME_BUF_SIZE);
+ CHECK(name_len, FAIL, "H5Fget_name");
+ VERIFY_STR(name, FILE1, "H5Fget_name");
+
+ /* Create the data space */
+ space_id = H5Screate_simple(TESTA_RANK, dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+
+ /* Try get file name from data space. Supposed to fail because
+ * it's illegal operation. */
+ H5E_BEGIN_TRY
+ {
+ name_len = H5Fget_name(space_id, name, (size_t)TESTA_NAME_BUF_SIZE);
+ }
+ H5E_END_TRY;
+ VERIFY(name_len, FAIL, "H5Fget_name");
+
+ /* Create a new dataset */
+ dataset_id =
+ H5Dcreate2(file_id, TESTA_DSETNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dcreate2");
+
+ /* Get and verify file name */
+ name_len = H5Fget_name(dataset_id, name, (size_t)TESTA_NAME_BUF_SIZE);
+ CHECK(name_len, FAIL, "H5Fget_name");
+ VERIFY_STR(name, FILE1, "H5Fget_name");
+
+ /* Create an attribute for the dataset */
+ attr_id = H5Acreate2(dataset_id, TESTA_ATTRNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+
+ /* Get and verify file name */
+ name_len = H5Fget_name(attr_id, name, (size_t)TESTA_NAME_BUF_SIZE);
+ CHECK(name_len, FAIL, "H5Fget_name");
+ VERIFY_STR(name, FILE1, "H5Fget_name");
+
+ /* Create a compound datatype */
+ type_id = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(type_id, FAIL, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(type_id, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(type_id, "b", HOFFSET(s1_t, b), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save it on file */
+ ret = H5Tcommit2(file_id, TESTA_DTYPENAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Get and verify file name */
+ name_len = H5Fget_name(type_id, name, (size_t)TESTA_NAME_BUF_SIZE);
+ CHECK(name_len, FAIL, "H5Fget_name");
+ VERIFY_STR(name, FILE1, "H5Fget_name");
+
+ /* Close things down */
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Gclose(group_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_file_getname() */
+
+/****************************************************************
+**
+** test_file_double_root_open(): low-level file test routine.
+** This test checks whether opening the root group from two
+** different files works correctly.
+**
+*****************************************************************/
+static void
+test_file_double_root_open(void)
+{
+ hid_t file1_id, file2_id;
+ hid_t grp1_id, grp2_id;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing double root group open\n"));
+
+ file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1_id, FAIL, "H5Fcreate");
+ file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file2_id, FAIL, "H5Fopen");
+
+ grp1_id = H5Gopen2(file1_id, "/", H5P_DEFAULT);
+ CHECK(grp1_id, FAIL, "H5Gopen2");
+ grp2_id = H5Gopen2(file2_id, "/", H5P_DEFAULT);
+ CHECK(grp2_id, FAIL, "H5Gopen2");
+
+ /* Note "asymmetric" close order */
+ ret = H5Gclose(grp1_id);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(grp2_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file1_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(file2_id);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_file_double_root_open() */
+
+/****************************************************************
+**
+** test_file_double_group_open(): low-level file test routine.
+** This test checks whether opening the same group from two
+** different files works correctly.
+**
+*****************************************************************/
+static void
+test_file_double_group_open(void)
+{
+ hid_t file1_id, file2_id;
+ hid_t grp1_id, grp2_id;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing double non-root group open\n"));
+
+ file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1_id, FAIL, "H5Fcreate");
+ file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file2_id, FAIL, "H5Fopen");
+
+ grp1_id = H5Gcreate2(file1_id, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp1_id, FAIL, "H5Gcreate2");
+ grp2_id = H5Gopen2(file2_id, GRP_NAME, H5P_DEFAULT);
+ CHECK(grp2_id, FAIL, "H5Gopen2");
+
+ /* Note "asymmetric" close order */
+ ret = H5Gclose(grp1_id);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(grp2_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file1_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(file2_id);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_file_double_group_open() */
+
+/****************************************************************
+**
+** test_file_double_dataset_open(): low-level file test routine.
+** This test checks whether opening the same dataset from two
+** different files works correctly.
+**
+*****************************************************************/
+static void
+test_file_double_dataset_open(void)
+{
+ hid_t file1_id, file2_id;
+ hid_t dset1_id, dset2_id;
+ hid_t space_id;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing double dataset open\n"));
+
+ file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1_id, FAIL, "H5Fcreate");
+ file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file2_id, FAIL, "H5Fopen");
+
+ /* Create dataspace for dataset */
+ space_id = H5Screate(H5S_SCALAR);
+ CHECK(space_id, FAIL, "H5Screate");
+
+ dset1_id =
+ H5Dcreate2(file1_id, DSET_NAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset1_id, FAIL, "H5Dcreate2");
+ dset2_id = H5Dopen2(file2_id, DSET_NAME, H5P_DEFAULT);
+ CHECK(dset2_id, FAIL, "H5Dopen2");
+
+ /* Close "supporting" dataspace */
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Note "asymmetric" close order */
+ ret = H5Dclose(dset1_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Fclose(file1_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(file2_id);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_file_double_dataset_open() */
+
+/****************************************************************
+**
+** test_file_double_file_dataset_open():
+** This test checks multi-opens of files & datasets:
+** It simulates the multi-thread test program from DLS
+** which exposes the file pointer segmentation fault failure.
+** NOTE: The order on when the files and datasets are open/close
+** is important.
+**
+*****************************************************************/
+static void
+test_file_double_file_dataset_open(hbool_t new_format)
+{
+ hid_t fapl = -1; /* File access property list */
+ hid_t dcpl = -1; /* Dataset creation property list */
+ hid_t fid1 = -1, fid2 = -1; /* File IDs */
+ hid_t did1 = -1, did2 = -1; /* Dataset IDs */
+ hid_t sid1 = -1, sid2 = -1; /* Dataspace IDs */
+ hid_t tid1 = -1, tid2 = -1; /* Datatype IDs */
+ hsize_t dims[1] = {5}, dims2[2] = {1, 4}; /* Dimension sizes */
+ hsize_t e_ext_dims[1] = {7}; /* Expanded dimension sizes */
+ hsize_t s_ext_dims[1] = {3}; /* Shrunk dimension sizes */
+ hsize_t max_dims0[1] = {8}; /* Maximum dimension sizes */
+ hsize_t max_dims1[1] = {H5S_UNLIMITED}; /* Maximum dimesion sizes for extensible array index */
+ hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes for v2 B-tree index */
+ hsize_t chunks[1] = {2}, chunks2[2] = {4, 5}; /* Chunk dimension sizes */
+#if 0
+ hsize_t size; /* File size */
+#endif
+ char filename[FILENAME_LEN]; /* Filename to use */
+ const char *data[] = {"String 1", "String 2", "String 3", "String 4", "String 5"}; /* Input Data */
+ const char *e_data[] = {"String 1", "String 2", "String 3", "String 4",
+ "String 5", "String 6", "String 7"}; /* Input Data */
+ char *buffer[5]; /* Output buffer */
+ int wbuf[4] = {1, 2, 3, 4}; /* Input data */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing double file and dataset open/close\n"));
+
+ /* Setting up test file */
+ fapl = h5_fileaccess();
+ CHECK(fapl, FAIL, "H5Pcreate");
+ if (new_format) {
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+ } /* end if */
+ h5_fixname(FILE1, fapl, filename, sizeof filename);
+
+ /* Create the test file */
+ fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create a chunked dataset with fixed array indexing */
+ sid1 = H5Screate_simple(1, dims, max_dims0);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+ tid1 = H5Tcopy(H5T_C_S1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+ ret = H5Tset_size(tid1, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, 1, chunks);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ did1 = H5Dcreate2(fid1, "dset_fa", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dcreate2");
+
+ /* Closing */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a chunked dataset with extensible array indexing */
+ sid1 = H5Screate_simple(1, dims, max_dims1);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+ tid1 = H5Tcopy(H5T_C_S1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+ ret = H5Tset_size(tid1, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, 1, chunks);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ did1 = H5Dcreate2(fid1, "dset_ea", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dcreate2");
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Closing */
+ /* (Leave sid1 open for later use) */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a chunked dataset with v2 btree indexing */
+ sid2 = H5Screate_simple(2, dims2, max_dims2);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, 2, chunks2);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ did2 = H5Dcreate2(fid1, "dset_bt2", H5T_NATIVE_INT, sid2, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did2, FAIL, "H5Dcreate2");
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Closing */
+ ret = H5Dclose(did2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*
+ * Scenario 1
+ */
+
+ /* First file open */
+ fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* First file's dataset open */
+ did1 = H5Dopen2(fid1, "/dset_fa", H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dopen2");
+
+ tid1 = H5Tcopy(did1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+
+ /* First file's dataset write */
+ ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Second file open */
+ fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Second file's dataset open */
+ did2 = H5Dopen2(fid2, "/dset_fa", H5P_DEFAULT);
+ CHECK(did2, FAIL, "H5Dopen2");
+
+ tid2 = H5Tcopy(did2);
+ CHECK(tid2, FAIL, "H5Tcopy");
+
+ /* First file's dataset close */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* First file close */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Second file's dataset write */
+ ret = H5Dwrite(did2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Second file's dataset close */
+ ret = H5Dclose(did2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Second file close */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Closing */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /*
+ * Scenario 2
+ */
+
+ /* First file open */
+ fid1 = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Second file open */
+ fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Second file's dataset open */
+ did2 = H5Dopen2(fid2, "/dset_ea", H5P_DEFAULT);
+ CHECK(did2, FAIL, "H5Dopen2");
+
+ tid2 = H5Tcopy(did2);
+ CHECK(tid2, FAIL, "H5Tcopy");
+
+ /* First file's dataset open */
+ did1 = H5Dopen2(fid1, "/dset_ea", H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dopen2");
+
+ tid1 = H5Tcopy(did1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+
+ /* Second file's dataset read */
+ HDmemset(buffer, 0, sizeof(char *) * 5);
+ ret = H5Dread(did2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer);
+ CHECK(ret, FAIL, "H5Dread");
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, buffer);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Second file's dataset close */
+ ret = H5Dclose(did2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Second file close */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* First file's dataset read */
+ HDmemset(buffer, 0, sizeof(char *) * 5);
+ ret = H5Dread(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, buffer);
+ CHECK(ret, FAIL, "H5Dread");
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, buffer);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* First file's dataset close */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* First file close */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Closing */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /*
+ * Scenario 3
+ */
+
+ /* First file open */
+ fid1 = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* First file's dataset open */
+ did1 = H5Dopen2(fid1, "/dset_bt2", H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dopen2");
+#if 0
+ /* First file's get storage size */
+ size = H5Dget_storage_size(did1);
+ CHECK(size, 0, "H5Dget_storage_size");
+#endif
+ /* Second file open */
+ fid2 = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Second file's dataset open */
+ did2 = H5Dopen2(fid2, "/dset_bt2", H5P_DEFAULT);
+ CHECK(did2, FAIL, "H5Dopen2");
+
+ /* First file's dataset close */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* First file close */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Second file's get storage size */
+ size = H5Dget_storage_size(did2);
+ CHECK(size, 0, "H5Dget_storage_size");
+#endif
+ /* Second file's dataset close */
+ ret = H5Dclose(did2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Second file close */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*
+ * Scenario 4
+ * --trigger H5AC_protect: Assertion `f->shared' failed
+ * from second call to
+ * H5Dset_extent->...H5D__earray_idx_remove->H5EA_get...H5EA__iblock_protect...H5AC_protect
+ */
+ /* First file open */
+ fid1 = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* First file's dataset open */
+ did1 = H5Dopen2(fid1, "/dset_ea", H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dopen2");
+
+ tid1 = H5Tcopy(did1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+
+ /* Extend the dataset */
+ ret = H5Dset_extent(did1, e_ext_dims);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did1, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, e_data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Second file open */
+ fid2 = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Second file's dataset open */
+ did2 = H5Dopen2(fid2, "/dset_ea", H5P_DEFAULT);
+ CHECK(did2, FAIL, "H5Dopen2");
+
+ /* First file's dataset close */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* First file close */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Shrink the dataset */
+ ret = H5Dset_extent(did2, s_ext_dims);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ /* Second file's dataset close */
+ ret = H5Dclose(did2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Second file close */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close the data type */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close FAPL */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* end test_file_double_dataset_open() */
+
+/****************************************************************
+**
+** test_file_double_datatype_open(): low-level file test routine.
+** This test checks whether opening the same named datatype from two
+** different files works correctly.
+**
+*****************************************************************/
+static void
+test_file_double_datatype_open(void)
+{
+ hid_t file1_id, file2_id;
+ hid_t type1_id, type2_id;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing double datatype open\n"));
+
+ file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1_id, FAIL, "H5Fcreate");
+ file2_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file2_id, FAIL, "H5Fopen");
+
+ type1_id = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(type1_id, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(file1_id, TYPE_NAME, type1_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ type2_id = H5Topen2(file2_id, TYPE_NAME, H5P_DEFAULT);
+ CHECK(type2_id, FAIL, "H5Topen2");
+
+ /* Note "asymmetric" close order */
+ ret = H5Tclose(type1_id);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(type2_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Fclose(file1_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(file2_id);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_file_double_dataset_open() */
+
+/****************************************************************
+**
+** test_userblock_file_size(): low-level file test routine.
+** This test checks that the presence of a userblock
+** affects the file size in the expected manner, and that
+** the filesize is not changed by reopening the file. It
+** creates two files which are identical except that one
+** contains a userblock, and verifies that their file sizes
+** differ exactly by the userblock size.
+**
+*****************************************************************/
+#if 0
+static void
+test_userblock_file_size(const char *env_h5_drvr)
+{
+ hid_t file1_id, file2_id;
+ hid_t group1_id, group2_id;
+ hid_t dset1_id, dset2_id;
+ hid_t space_id;
+ hid_t fcpl2_id;
+ hsize_t dims[2] = {3, 4};
+#if 0
+ hsize_t filesize1, filesize2, filesize;
+ unsigned long fileno1, fileno2; /* File number */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Don't run with multi/split, family or direct drivers */
+ if (!HDstrcmp(env_h5_drvr, "multi") || !HDstrcmp(env_h5_drvr, "split") ||
+ !HDstrcmp(env_h5_drvr, "family") || !HDstrcmp(env_h5_drvr, "direct"))
+ return;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing file size with user block\n"));
+
+ /* Create property list with userblock size set */
+ fcpl2_id = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl2_id, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl2_id, USERBLOCK_SIZE);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create files. Only file2 with have a userblock. */
+ file1_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1_id, FAIL, "H5Fcreate");
+ file2_id = H5Fcreate(FILE2, H5F_ACC_TRUNC, fcpl2_id, H5P_DEFAULT);
+ CHECK(file2_id, FAIL, "H5Fcreate");
+#if 0
+ /* Check the file numbers */
+ fileno1 = 0;
+ ret = H5Fget_fileno(file1_id, &fileno1);
+ CHECK(ret, FAIL, "H5Fget_fileno");
+ fileno2 = 0;
+ ret = H5Fget_fileno(file2_id, &fileno2);
+ CHECK(ret, FAIL, "H5Fget_fileno");
+ CHECK(fileno1, fileno2, "H5Fget_fileno");
+#endif
+ /* Create groups */
+ group1_id = H5Gcreate2(file1_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group1_id, FAIL, "H5Gcreate2");
+ group2_id = H5Gcreate2(file2_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group2_id, FAIL, "H5Gcreate2");
+
+ /* Create dataspace */
+ space_id = H5Screate_simple(2, dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+
+ /* Create datasets */
+ dset1_id = H5Dcreate2(file1_id, DSET2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset1_id, FAIL, "H5Dcreate2");
+ dset2_id = H5Dcreate2(file2_id, DSET2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset2_id, FAIL, "H5Dcreate2");
+
+ /* Close IDs */
+ ret = H5Dclose(dset1_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(dset2_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Gclose(group1_id);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(group2_id);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Pclose(fcpl2_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close files */
+ ret = H5Fclose(file1_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(file2_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Reopen files */
+ file1_id = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file1_id, FAIL, "H5Fopen");
+ file2_id = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file2_id, FAIL, "H5Fopen");
+#if 0
+ /* Check file sizes */
+ ret = H5Fget_filesize(file1_id, &filesize1);
+ CHECK(ret, FAIL, "H5Fget_filesize");
+ ret = H5Fget_filesize(file2_id, &filesize2);
+ CHECK(ret, FAIL, "H5Fget_filesize");
+
+ /* Verify that the file sizes differ exactly by the userblock size */
+ VERIFY_TYPE((unsigned long long)filesize2, (unsigned long long)(filesize1 + USERBLOCK_SIZE),
+ unsigned long long, "%llu", "H5Fget_filesize");
+#endif
+ /* Close files */
+ ret = H5Fclose(file1_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(file2_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Reopen files */
+ file1_id = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file1_id, FAIL, "H5Fopen");
+ file2_id = H5Fopen(FILE2, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file2_id, FAIL, "H5Fopen");
+#if 0
+ /* Verify file sizes did not change */
+ ret = H5Fget_filesize(file1_id, &filesize);
+ CHECK(ret, FAIL, "H5Fget_filesize");
+ VERIFY(filesize, filesize1, "H5Fget_filesize");
+ ret = H5Fget_filesize(file2_id, &filesize);
+ CHECK(ret, FAIL, "H5Fget_filesize");
+ VERIFY(filesize, filesize2, "H5Fget_filesize");
+#endif
+ /* Close files */
+ ret = H5Fclose(file1_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(file2_id);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_userblock_file_size() */
+#endif
+
+/****************************************************************
+**
+** test_cached_stab_info(): low-level file test routine.
+** This test checks that new files are created with cached
+** symbol table information in the superblock (when using
+** the old format). This is necessary to ensure backwards
+** compatibility with versions from 1.3.0 to 1.6.3.
+**
+*****************************************************************/
+#if 0
+static void
+test_cached_stab_info(void)
+{
+ hid_t file_id;
+ hid_t group_id;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing cached symbol table information\n"));
+
+ /* Create file */
+ file_id = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Create group */
+ group_id = H5Gcreate2(file_id, GROUP1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gcreate2");
+
+ /* Close file and group */
+ ret = H5Gclose(group_id);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Reopen file */
+ file_id = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+#if 0
+ /* Verify the cached symbol table information */
+ ret = H5F__check_cached_stab_test(file_id);
+ CHECK(ret, FAIL, "H5F__check_cached_stab_test");
+#endif
+ /* Close file */
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_cached_stab_info() */
+#endif
+
+#if 0
+/*
+ * To calculate the checksum for a file.
+ * This is a helper routine for test_rw_noupdate().
+ */
+static int
+cal_chksum(const char *file, uint32_t *chksum)
+{
+ int curr_num_errs = nerrors; /* Retrieve the current # of errors */
+ int fdes = -1; /* File descriptor */
+ void *file_data = NULL; /* Copy of file data */
+ ssize_t bytes_read; /* # of bytes read */
+ h5_stat_t sb; /* Stat buffer for file */
+ herr_t ret; /* Generic return value */
+
+ /* Open the file */
+ fdes = HDopen(file, O_RDONLY);
+ CHECK(fdes, FAIL, "HDopen");
+
+ /* Retrieve the file's size */
+ ret = HDfstat(fdes, &sb);
+ CHECK(fdes, FAIL, "HDfstat");
+
+ /* Allocate space for the file data */
+ file_data = HDmalloc((size_t)sb.st_size);
+ CHECK_PTR(file_data, "HDmalloc");
+
+ if (file_data) {
+ /* Read file's data into memory */
+ bytes_read = HDread(fdes, file_data, (size_t)sb.st_size);
+ CHECK(bytes_read == sb.st_size, FALSE, "HDmalloc");
+
+ /* Calculate checksum */
+ *chksum = H5_checksum_lookup3(file_data, sizeof(file_data), 0);
+
+ /* Free memory */
+ HDfree(file_data);
+ }
+
+ /* Close the file */
+ ret = HDclose(fdes);
+ CHECK(ret, FAIL, "HDclose");
+
+ return ((nerrors == curr_num_errs) ? 0 : -1);
+} /* cal_chksum() */
+#endif
+
+/****************************************************************
+**
+** test_rw_noupdate(): low-level file test routine.
+** This test checks to ensure that opening and closing a file
+** with read/write permissions does not write anything to the
+** file if the file does not change.
+** Due to the implementation of file locking (status_flags in
+** the superblock is used), this test is changed to use checksum
+** instead of timestamp to verify the file is not changed.
+**
+** Programmer: Vailin Choi; July 2013
+**
+*****************************************************************/
+#if 0
+static void
+test_rw_noupdate(void)
+{
+ herr_t ret; /* Generic return value */
+ hid_t fid; /* File ID */
+ uint32_t chksum1, chksum2; /* Checksum value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing to verify that nothing is written if nothing is changed.\n"));
+
+ /* Create and Close a HDF5 File */
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Calculate checksum for the file */
+ ret = cal_chksum(FILE1, &chksum1);
+ CHECK(ret, FAIL, "cal_chksum");
+
+ /* Open and close File With Read/Write Permission */
+ fid = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Calculate checksum for the file */
+ ret = cal_chksum(FILE1, &chksum2);
+ CHECK(ret, FAIL, "cal_chksum");
+
+ /* The two checksums are the same, i.e. the file is not changed */
+ VERIFY(chksum1, chksum2, "Checksum");
+
+} /* end test_rw_noupdate() */
+#endif
+
+/****************************************************************
+**
+** test_userblock_alignment_helper1(): helper routine for
+** test_userblock_alignment() test, to handle common testing
+**
+** Programmer: Quincey Koziol
+** Septmber 10, 2009
+**
+*****************************************************************/
+#if 0
+static int
+test_userblock_alignment_helper1(hid_t fcpl, hid_t fapl)
+{
+ hid_t fid; /* File ID */
+ int curr_num_errs = nerrors(); /* Retrieve the current # of errors */
+ herr_t ret; /* Generic return value */
+
+ /* Create a file with FAPL & FCPL */
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Only proceed further if file ID is OK */
+ if (fid > 0) {
+ hid_t gid; /* Group ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t did; /* Dataset ID */
+ int val = 2; /* Dataset value */
+
+ /* Create a group */
+ gid = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create a dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+ did = H5Dcreate2(gid, "dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Write value to dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end if */
+
+ return ((nerrors == curr_num_errs) ? 0 : -1);
+} /* end test_userblock_alignment_helper1() */
+
+/****************************************************************
+**
+** test_userblock_alignment_helper2(): helper routine for
+** test_userblock_alignment() test, to handle common testing
+**
+** Programmer: Quincey Koziol
+** Septmber 10, 2009
+**
+*****************************************************************/
+static int
+test_userblock_alignment_helper2(hid_t fapl, hbool_t open_rw)
+{
+ hid_t fid; /* File ID */
+ int curr_num_errs = nerrors(); /* Retrieve the current # of errors */
+ herr_t ret; /* Generic return value */
+
+ /* Re-open file */
+ fid = H5Fopen(FILE1, (open_rw ? H5F_ACC_RDWR : H5F_ACC_RDONLY), fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Only proceed further if file ID is OK */
+ if (fid > 0) {
+ hid_t gid; /* Group ID */
+ hid_t did; /* Dataset ID */
+ int val = -1; /* Dataset value */
+
+ /* Open group */
+ gid = H5Gopen2(fid, "group1", H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Open dataset */
+ did = H5Dopen2(gid, "dataset", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Read value from dataset */
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val);
+ CHECK(ret, FAIL, "H5Dread");
+ VERIFY(val, 2, "H5Dread");
+
+ /* Close dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Only create new objects if file is open R/W */
+ if (open_rw) {
+ hid_t gid2; /* Group ID */
+
+ /* Create a new group */
+ gid2 = H5Gcreate2(gid, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Close new group */
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+ } /* end if */
+
+ /* Close group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end if */
+
+ return ((nerrors == curr_num_errs) ? 0 : -1);
+} /* end test_userblock_alignment_helper2() */
+
+/****************************************************************
+**
+** test_userblock_alignment(): low-level file test routine.
+** This test checks to ensure that files with both a userblock and a
+** object [allocation] alignment size set interact properly.
+**
+** Programmer: Quincey Koziol
+** Septmber 8, 2009
+**
+*****************************************************************/
+static void
+test_userblock_alignment(const char *env_h5_drvr)
+{
+ hid_t fid; /* File ID */
+ hid_t fcpl; /* File creation property list ID */
+ hid_t fapl; /* File access property list ID */
+ herr_t ret; /* Generic return value */
+
+ /* Only run with sec2 driver */
+ if (!h5_using_default_driver(env_h5_drvr))
+ return;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing that non-zero userblocks and object alignment interact correctly.\n"));
+
+ /* Case 1:
+ * Userblock size = 0, alignment != 0
+ * Outcome:
+ * Should succeed
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)0);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Case 2:
+ * Userblock size = 512, alignment = 16
+ * (userblock is integral mult. of alignment)
+ * Outcome:
+ * Should succeed
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Case 3:
+ * Userblock size = 512, alignment = 512
+ * (userblock is equal to alignment)
+ * Outcome:
+ * Should succeed
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Case 4:
+ * Userblock size = 512, alignment = 3
+ * (userblock & alignment each individually valid, but userblock is
+ * non-integral multiple of alignment)
+ * Outcome:
+ * Should fail at file creation
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Case 5:
+ * Userblock size = 512, alignment = 1024
+ * (userblock & alignment each individually valid, but userblock is
+ * less than alignment)
+ * Outcome:
+ * Should fail at file creation
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Case 6:
+ * File created with:
+ * Userblock size = 512, alignment = 512
+ * File re-opened for read-only & read-write access with:
+ * Userblock size = 512, alignment = 1024
+ * Outcome:
+ * Should succeed
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+
+ /* Change alignment in FAPL */
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper2(fapl, FALSE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* end test_userblock_alignment() */
+
+/****************************************************************
+**
+** test_userblock_alignment_paged(): low-level file test routine.
+** This test checks to ensure that files with both a userblock and
+** alignment interact properly:
+** -- alignment via H5Pset_alignment
+** -- alignment via paged aggregation
+**
+** Programmer: Vailin Choi; March 2013
+**
+*****************************************************************/
+static void
+test_userblock_alignment_paged(const char *env_h5_drvr)
+{
+ hid_t fid; /* File ID */
+ hid_t fcpl; /* File creation property list ID */
+ hid_t fapl; /* File access property list ID */
+ herr_t ret; /* Generic return value */
+
+ /* Only run with sec2 driver */
+ if (!h5_using_default_driver(env_h5_drvr))
+ return;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing interaction between userblock and alignment (via paged aggregation and "
+ "H5Pset_alignment)\n"));
+
+ /*
+ * Case 1:
+ * Userblock size = 0
+ * Alignment in use = 4096
+ * Strategy = H5F_FILE_SPACE_PAGE; fsp_size = alignment = 4096
+ * Outcome:
+ * Should succeed:
+ * userblock is 0 and alignment != 0
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)0);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ /* Create file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Set the "use the latest version of the format" bounds */
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 2a:
+ * Userblock size = 1024
+ * Alignment in use = 512
+ * Strategy = H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512
+ * H5Pset_alignment() is 3
+ * Outcome:
+ * Should succeed:
+ * userblock (1024) is integral mult. of alignment (512)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+
+ /* Create file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 2b:
+ * Userblock size = 1024
+ * Alignment in use = 3
+ * Strategy = H5F_FILE_SPACE_AGGR; fsp_size = 512
+ * (via default file creation property)
+ * H5Pset_alignment() is 3
+ * Outcome:
+ * Should fail at file creation:
+ * userblock (1024) is non-integral mult. of alignment (3)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+
+ /* Create file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 3a:
+ * Userblock size = 512
+ * Alignment in use = 512
+ * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512
+ * H5Pset_alignment() is 3
+ * Outcome:
+ * Should succeed:
+ * userblock (512) is equal to alignment (512)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 3b:
+ * Userblock size = 512
+ * Alignment in use = 3
+ * Strategy is H5F_FILE_SPACE_NONE; fsp_size = 512
+ * H5Pset_alignment() is 3
+ * Outcome:
+ * Should fail at file creation:
+ * userblock (512) is non-integral mult. of alignment (3)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 4a:
+ * Userblock size = 1024
+ * Alignment in use = 1023
+ * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 1023
+ * H5Pset_alignment() is 16
+ * Outcome:
+ * Should fail at file creation:
+ * userblock (1024) is non-integral multiple of alignment (1023)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1023);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 4b:
+ * Userblock size = 1024
+ * Alignment in use = 16
+ * Strategy is H5F_FILE_SPACE_FSM_AGGR; fsp_size = 1023
+ * H5Pset_alignment() is 16
+ * Outcome:
+ * Should succeed:
+ * userblock (512) is integral multiple of alignment (16)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1023);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 5a:
+ * Userblock size = 512
+ * Alignment in use = 1024
+ * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 1024
+ * H5Pset_alignment() is 16
+ * Outcome:
+ * Should fail at file creation:
+ * userblock (512) is less than alignment (1024)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 5b:
+ * Userblock size = 512
+ * Alignment in use = 16
+ * Strategy is H5F_FILE_SPACE_NONE; fsp_size = 1024
+ * H5Pset_alignment() is 16
+ * Outcome:
+ * Should succeed:
+ * userblock (512) is integral multiple of alignment (16)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 6:
+ * Userblock size = 512
+ * Alignment in use = 512
+ * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512
+ * H5Pset_alignment() is 3
+ * Reopen the file; H5Pset_alignment() is 1024
+ * Outcome:
+ * Should succeed:
+ * Userblock (512) is the same as alignment (512);
+ * The H5Pset_alignment() calls have no effect
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+
+ /* Change alignment in FAPL */
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper2(fapl, FALSE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_userblock_alignment_paged() */
+#endif
+
+/****************************************************************
+**
+** test_filespace_info():
+** Verify the following public routines retrieve and set file space
+** information correctly:
+** (1) H5Pget/set_file_space_strategy():
+** Retrieve and set file space strategy, persisting free-space,
+** and free-space section threshold as specified
+** (2) H5Pget/set_file_space_page_size():
+** Retrieve and set the page size for paged aggregation
+**
+****************************************************************/
+#if 0
+static void
+test_filespace_info(const char *env_h5_drvr)
+{
+ hid_t fid; /* File IDs */
+ hid_t fapl, new_fapl; /* File access property lists */
+ hid_t fcpl, fcpl1, fcpl2; /* File creation property lists */
+ H5F_fspace_strategy_t strategy; /* File space strategy */
+ hbool_t persist; /* Persist free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ unsigned new_format; /* New or old format */
+ H5F_fspace_strategy_t fs_strategy; /* File space strategy--iteration variable */
+ unsigned fs_persist; /* Persist free-space or not--iteration variable */
+ hsize_t fs_threshold; /* Free-space section threshold--iteration variable */
+ hsize_t fsp_size; /* File space page size */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contiguous address space */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing file creation public routines: H5Pget/set_file_space_strategy & "
+ "H5Pget/set_file_space_page_size\n"));
+
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") != 0 && HDstrcmp(env_h5_drvr, "multi") != 0);
+
+ fapl = h5_fileaccess();
+ h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename);
+
+ /* Get a copy of the file access property list */
+ new_fapl = H5Pcopy(fapl);
+ CHECK(new_fapl, FAIL, "H5Pcopy");
+
+ /* Set the "use the latest version of the format" bounds */
+ ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /*
+ * Case (1)
+ * Check file space information from a default file creation property list.
+ * Values expected:
+ * strategy--H5F_FILE_SPACE_AGGR
+ * persist--FALSE
+ * threshold--1
+ * file space page size--4096
+ */
+ /* Create file creation property list template */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+
+ /* Retrieve file space page size */
+ ret = H5Pget_file_space_page_size(fcpl, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size");
+
+ /* Close property list */
+ H5Pclose(fcpl);
+
+ /*
+ * Case (2)
+ * File space page size has a minimum size of 512.
+ * Setting value less than 512 will return an error;
+ * --setting file space page size to 0
+ * --setting file space page size to 511
+ *
+ * File space page size has a maximum size of 1 gigabyte.
+ * Setting value greater than 1 gigabyte will return an error.
+ */
+ /* Create file creation property list template */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Setting to 0: should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_file_space_page_size(fcpl, 0);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Setting to 511: should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_file_space_page_size(fcpl, 511);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Setting to 1GB+1: should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE1G + 1);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Setting to 512: should succeed */
+ ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+ ret = H5Pget_file_space_page_size(fcpl, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
+
+ /* Setting to 1GB: should succeed */
+ ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE1G);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+ ret = H5Pget_file_space_page_size(fcpl, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE1G, "H5Pget_file_space_page_size");
+
+ /* Close property list */
+ H5Pclose(fcpl);
+
+ /*
+ * Case (3)
+ * Check file space information when creating a file with default properties.
+ * Values expected:
+ * strategy--H5F_FILE_SPACE_AGGR
+ * persist--FALSE
+ * threshold--1
+ * file space page size--4096
+ */
+ /* Create a file with default file creation and access property lists */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Get the file's creation property list */
+ fcpl1 = H5Fget_create_plist(fid);
+ CHECK(fcpl1, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+
+ /* Retrieve file space page size */
+ ret = H5Pget_file_space_page_size(fcpl1, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size");
+
+ /* Close property lists */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Pclose(fcpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case (4)
+ * Check file space information when creating a file with the
+ * latest library format and default properties.
+ * Values expected:
+ * strategy--H5F_FILE_SPACE_AGGR
+ * persist--FALSE
+ * threshold--1
+ * file space page size--4096
+ */
+ /* Create a file with the latest library format */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, new_fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Get the file's creation property */
+ fcpl1 = H5Fget_create_plist(fid);
+ CHECK(fcpl1, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+
+ /* Retrieve file space page size */
+ ret = H5Pget_file_space_page_size(fcpl1, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size");
+
+ /* Close property lists */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Pclose(fcpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case (5)
+ * Check file space information with the following combinations:
+ * Create file with --
+ * New or old format
+ * Persist or not persist free-space
+ * Different sizes for free-space section threshold (0 to 10)
+ * The four file space strategies:
+ * H5F_FSPACE_STRATEGY_FSM_AGGR, H5F_FSPACE_STRATEGY_PAGE,
+ * H5F_FSPACE_STRATEGY_AGGR, H5F_FSPACE_STRATEGY_NONE
+ * File space page size: set to 512
+ *
+ */
+ for (new_format = FALSE; new_format <= TRUE; new_format++) {
+ hid_t my_fapl;
+
+ /* Set the FAPL for the type of format */
+ if (new_format) {
+ MESSAGE(5, ("Testing with new group format\n"));
+ my_fapl = new_fapl;
+ } /* end if */
+ else {
+ MESSAGE(5, ("Testing with old group format\n"));
+ my_fapl = fapl;
+ } /* end else */
+
+ /* Test with TRUE or FALSE for persisting free-space */
+ for (fs_persist = FALSE; fs_persist <= TRUE; fs_persist++) {
+
+ /* Test with free-space section threshold size: 0 to 10 */
+ for (fs_threshold = 0; fs_threshold <= TEST_THRESHOLD10; fs_threshold++) {
+
+ /* Test with 4 file space strategies */
+ for (fs_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR; fs_strategy < H5F_FSPACE_STRATEGY_NTYPES;
+ fs_strategy++) {
+
+ if (!contig_addr_vfd && (fs_strategy == H5F_FSPACE_STRATEGY_PAGE || fs_persist))
+ continue;
+
+ /* Create file creation property list template */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Set file space information */
+ ret = H5Pset_file_space_strategy(fcpl, fs_strategy, (hbool_t)fs_persist, fs_threshold);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+
+ ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy");
+
+ if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) {
+ VERIFY(persist, (hbool_t)fs_persist, "H5Pget_file_space_strategy");
+ VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy");
+ }
+ else {
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+ }
+
+ /* Retrieve and verify file space page size */
+ ret = H5Pget_file_space_page_size(fcpl, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
+
+ /* Create the file with the specified file space info */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Get the file's creation property */
+ fcpl1 = H5Fget_create_plist(fid);
+ CHECK(fcpl1, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy");
+
+ if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) {
+ VERIFY(persist, fs_persist, "H5Pget_file_space_strategy");
+ VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy");
+ }
+ else {
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+ }
+
+ /* Retrieve and verify file space page size */
+ ret = H5Pget_file_space_page_size(fcpl1, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, my_fapl);
+ CHECK(ret, FAIL, "H5Fopen");
+
+ /* Get the file's creation property */
+ fcpl2 = H5Fget_create_plist(fid);
+ CHECK(fcpl2, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl2, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy");
+ if (fs_strategy < H5F_FSPACE_STRATEGY_AGGR) {
+ VERIFY(persist, fs_persist, "H5Pget_file_space_strategy");
+ VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy");
+ }
+ else {
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+ }
+
+ /* Retrieve and verify file space page size */
+ ret = H5Pget_file_space_page_size(fcpl2, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Release file creation property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fcpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fcpl2);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end for file space strategy type */
+ } /* end for free-space section threshold */
+ } /* end for fs_persist */
+
+ /* close fapl_ and remove the file */
+#if 0
+ h5_clean_files(FILESPACE_NAME, my_fapl);
+#endif
+
+ H5E_BEGIN_TRY
+ {
+ H5Fdelete(FILESPACE_NAME[0], my_fapl);
+ }
+ H5E_END_TRY;
+ } /* end for new_format */
+
+} /* test_filespace_info() */
+#endif
+
+/****************************************************************
+**
+** set_multi_split():
+** Internal routine to set up page-aligned address space for multi/split driver
+** when testing paged aggregation.
+** This is used by test_file_freespace() and test_sects_freespace().
+**
+*****************************************************************/
+#if 0
+static int
+set_multi_split(hid_t fapl, hsize_t pagesize, hbool_t split)
+{
+ H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
+ hid_t memb_fapl_arr[H5FD_MEM_NTYPES];
+ char *memb_name[H5FD_MEM_NTYPES];
+ haddr_t memb_addr[H5FD_MEM_NTYPES];
+ hbool_t relax;
+ H5FD_mem_t mt;
+
+ HDassert(split);
+
+ HDmemset(memb_name, 0, sizeof memb_name);
+
+ /* Get current split settings */
+ if (H5Pget_fapl_multi(fapl, memb_map, memb_fapl_arr, memb_name, memb_addr, &relax) < 0)
+ TEST_ERROR;
+
+ if (split) {
+ /* Set memb_addr aligned */
+ memb_addr[H5FD_MEM_SUPER] = ((memb_addr[H5FD_MEM_SUPER] + pagesize - 1) / pagesize) * pagesize;
+ memb_addr[H5FD_MEM_DRAW] = ((memb_addr[H5FD_MEM_DRAW] + pagesize - 1) / pagesize) * pagesize;
+ }
+ else {
+ /* Set memb_addr aligned */
+ for (mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; mt++)
+ memb_addr[mt] = ((memb_addr[mt] + pagesize - 1) / pagesize) * pagesize;
+ } /* end else */
+
+ /* Set multi driver with new FAPLs */
+ if (H5Pset_fapl_multi(fapl, memb_map, memb_fapl_arr, (const char *const *)memb_name, memb_addr, relax) <
+ 0)
+ TEST_ERROR;
+
+ /* Free memb_name */
+ for (mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; mt++)
+ HDfree(memb_name[mt]);
+
+ return 0;
+
+error:
+ return (-1);
+
+} /* set_multi_split() */
+#endif
+
+/****************************************************************
+**
+** test_file_freespace():
+** This routine checks the free space available in a file as
+** returned by the public routine H5Fget_freespace().
+**
+**
+*****************************************************************/
+#if 0
+static void
+test_file_freespace(const char *env_h5_drvr)
+{
+ hid_t file; /* File opened with read-write permission */
+#if 0
+ h5_stat_size_t empty_filesize; /* Size of file when empty */
+ h5_stat_size_t mod_filesize; /* Size of file after being modified */
+ hssize_t free_space; /* Amount of free space in file */
+#endif
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl, new_fapl; /* File access property list IDs */
+ hid_t dspace; /* Dataspace ID */
+ hid_t dset; /* Dataset ID */
+ hid_t dcpl; /* Dataset creation property list */
+ int k; /* Local index variable */
+ unsigned u; /* Local index variable */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ char name[32]; /* Dataset name */
+ unsigned new_format; /* To use old or new format */
+ hbool_t split_vfd, multi_vfd; /* Indicate multi/split driver */
+ hsize_t expected_freespace; /* Freespace expected */
+ hsize_t expected_fs_del; /* Freespace expected after delete */
+ herr_t ret; /* Return value */
+
+ split_vfd = !HDstrcmp(env_h5_drvr, "split");
+ multi_vfd = !HDstrcmp(env_h5_drvr, "multi");
+
+ if (!split_vfd && !multi_vfd) {
+ fapl = h5_fileaccess();
+ h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename);
+
+ new_fapl = H5Pcopy(fapl);
+ CHECK(new_fapl, FAIL, "H5Pcopy");
+
+ /* Set the "use the latest version of the format" bounds */
+ ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Test with old & new format */
+ for (new_format = FALSE; new_format <= TRUE; new_format++) {
+ hid_t my_fapl;
+
+ /* Set the FAPL for the type of format */
+ if (new_format) {
+ MESSAGE(5, ("Testing with new group format\n"));
+
+ my_fapl = new_fapl;
+
+ if (multi_vfd || split_vfd) {
+ ret = set_multi_split(new_fapl, FSP_SIZE_DEF, split_vfd);
+ CHECK(ret, FAIL, "set_multi_split");
+ }
+
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5P_set_file_space_strategy");
+
+ expected_freespace = 4534;
+ if (split_vfd)
+ expected_freespace = 427;
+ if (multi_vfd)
+ expected_freespace = 248;
+ expected_fs_del = 0;
+ } /* end if */
+ else {
+ MESSAGE(5, ("Testing with old group format\n"));
+ /* Default: non-paged aggregation, non-persistent free-space */
+ my_fapl = fapl;
+ expected_freespace = 2464;
+ if (split_vfd)
+ expected_freespace = 264;
+ if (multi_vfd)
+ expected_freespace = 0;
+ expected_fs_del = 4096;
+
+ } /* end else */
+
+ /* Create an "empty" file */
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+#if 0
+ /* Get the "empty" file size */
+ empty_filesize = h5_get_file_size(filename, H5P_DEFAULT);
+#endif
+ /* Re-open the file (with read-write permission) */
+ file = H5Fopen(filename, H5F_ACC_RDWR, my_fapl);
+ CHECK_I(file, "H5Fopen");
+#if 0
+ /* Check that the free space is 0 */
+ free_space = H5Fget_freespace(file);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, 0, "H5Fget_freespace");
+#endif
+ /* Create dataspace for datasets */
+ dspace = H5Screate(H5S_SCALAR);
+ CHECK(dspace, FAIL, "H5Screate");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create datasets in file */
+ for (u = 0; u < 10; u++) {
+ HDsnprintf(name, sizeof(name), "Dataset %u", u);
+ dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end for */
+
+ /* Close dataspace */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Check that there is the right amount of free space in the file */
+ free_space = H5Fget_freespace(file);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, expected_freespace, "H5Fget_freespace");
+#endif
+ /* Delete datasets in file */
+ for (k = 9; k >= 0; k--) {
+ HDsnprintf(name, sizeof(name), "Dataset %u", (unsigned)k);
+ ret = H5Ldelete(file, name, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end for */
+#if 0
+ /* Check that there is the right amount of free space in the file */
+ free_space = H5Fget_freespace(file);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, expected_fs_del, "H5Fget_freespace");
+#endif
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+#if 0
+ /* Get the file size after modifications*/
+ mod_filesize = h5_get_file_size(filename, H5P_DEFAULT);
+
+ /* Check that the file reverted to empty size */
+ VERIFY(mod_filesize, empty_filesize, "H5Fget_freespace");
+
+ h5_clean_files(FILESPACE_NAME, my_fapl);
+#endif
+ H5Fdelete(FILESPACE_NAME[0], my_fapl);
+ } /* end for */
+ }
+
+} /* end test_file_freespace() */
+
+/****************************************************************
+**
+** test_sects_freespace():
+** This routine checks free-space section information for the
+** file as returned by the public routine H5Fget_free_sections().
+**
+*****************************************************************/
+static void
+test_sects_freespace(const char *env_h5_drvr, hbool_t new_format)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file; /* File ID */
+ hid_t fcpl; /* File creation property list template */
+ hid_t fapl; /* File access property list template */
+#if 0
+ hssize_t free_space; /* Amount of free-space in the file */
+#endif
+ hid_t dspace; /* Dataspace ID */
+ hid_t dset; /* Dataset ID */
+ hid_t dcpl; /* Dataset creation property list */
+ char name[32]; /* Dataset name */
+ hssize_t nsects = 0; /* # of free-space sections */
+ hssize_t nall; /* # of free-space sections for all types of data */
+ hssize_t nmeta = 0, nraw = 0; /* # of free-space sections for meta/raw/generic data */
+ H5F_sect_info_t sect_info[15]; /* Array to hold free-space information */
+ H5F_sect_info_t all_sect_info[15]; /* Array to hold free-space information for all types of data */
+ H5F_sect_info_t meta_sect_info[15]; /* Array to hold free-space information for metadata */
+ H5F_sect_info_t raw_sect_info[15]; /* Array to hold free-space information for raw data */
+ hsize_t total = 0; /* sum of the free-space section sizes */
+ hsize_t tmp_tot = 0; /* Sum of the free-space section sizes */
+ hsize_t last_size; /* Size of last free-space section */
+ hsize_t dims[1]; /* Dimension sizes */
+ unsigned u; /* Local index variable */
+ H5FD_mem_t type;
+ hbool_t split_vfd = FALSE, multi_vfd = FALSE;
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Fget_free_sections()--free-space section info in the file\n"));
+
+ split_vfd = !HDstrcmp(env_h5_drvr, "split");
+ multi_vfd = !HDstrcmp(env_h5_drvr, "multi");
+
+ if (!split_vfd && !multi_vfd) {
+
+ fapl = h5_fileaccess();
+ h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename);
+
+ /* Create file-creation template */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ if (new_format) {
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Set to paged aggregation and persistent free-space */
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Set up paged aligned address space for multi/split driver */
+ if (multi_vfd || split_vfd) {
+ ret = set_multi_split(fapl, FSP_SIZE_DEF, split_vfd);
+ CHECK(ret, FAIL, "set_multi_split");
+ }
+ }
+ else {
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+ }
+
+ /* Create the file */
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create 1 large dataset */
+ dims[0] = 1200;
+ dspace = H5Screate_simple(1, dims, NULL);
+ dset = H5Dcreate2(file, "Dataset_large", H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ /* Close dataset */
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for datasets */
+ dspace = H5Screate(H5S_SCALAR);
+ CHECK(dspace, FAIL, "H5Screate");
+
+ /* Create datasets in file */
+ for (u = 0; u < 10; u++) {
+ HDsnprintf(name, sizeof(name), "Dataset %u", u);
+ dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end for */
+
+ /* Close dataspace */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Delete odd-numbered datasets in file */
+ for (u = 0; u < 10; u++) {
+ HDsnprintf(name, sizeof(name), "Dataset %u", u);
+ if (u % 2) {
+ ret = H5Ldelete(file, name, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end if */
+ } /* end for */
+
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file with read-only permission */
+ file = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ CHECK_I(file, "H5Fopen");
+#if 0
+ /* Get the amount of free space in the file */
+ free_space = H5Fget_freespace(file);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+#endif
+ /* Get the total # of free-space sections in the file */
+ nall = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, NULL);
+ CHECK(nall, FAIL, "H5Fget_free_sections");
+
+ /* Should return failure when nsects is 0 with a nonnull sect_info */
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, all_sect_info);
+ VERIFY(nsects, FAIL, "H5Fget_free_sections");
+
+ /* Retrieve and verify free space info for all the sections */
+ HDmemset(all_sect_info, 0, sizeof(all_sect_info));
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)nall, all_sect_info);
+ VERIFY(nsects, nall, "H5Fget_free_sections");
+
+ /* Verify the amount of free-space is correct */
+ for (u = 0; u < nall; u++)
+ total += all_sect_info[u].size;
+#if 0
+ VERIFY(free_space, total, "H5Fget_free_sections");
+#endif
+ /* Save the last section's size */
+ last_size = all_sect_info[nall - 1].size;
+
+ /* Retrieve and verify free space info for -1 sections */
+ HDmemset(sect_info, 0, sizeof(sect_info));
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall - 1), sect_info);
+ VERIFY(nsects, nall, "H5Fget_free_sections");
+
+ /* Verify the amount of free-space is correct */
+ total = 0;
+ for (u = 0; u < (nall - 1); u++) {
+ VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections");
+ VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections");
+ total += sect_info[u].size;
+ }
+#if 0
+ VERIFY(((hsize_t)free_space - last_size), total, "H5Fget_free_sections");
+#endif
+ /* Retrieve and verify free-space info for +1 sections */
+ HDmemset(sect_info, 0, sizeof(sect_info));
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall + 1), sect_info);
+ VERIFY(nsects, nall, "H5Fget_free_sections");
+
+ /* Verify amount of free-space is correct */
+ total = 0;
+ for (u = 0; u < nall; u++) {
+ VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections");
+ VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections");
+ total += sect_info[u].size;
+ }
+ VERIFY(sect_info[nall].addr, 0, "H5Fget_free_sections");
+ VERIFY(sect_info[nall].size, 0, "H5Fget_free_sections");
+#if 0
+ VERIFY(free_space, total, "H5Fget_free_sections");
+#endif
+
+ HDmemset(meta_sect_info, 0, sizeof(meta_sect_info));
+ if (multi_vfd) {
+ hssize_t ntmp;
+
+ for (type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; type++) {
+ if (type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ continue;
+ /* Get the # of free-space sections in the file for metadata */
+ ntmp = H5Fget_free_sections(file, type, (size_t)0, NULL);
+ CHECK(ntmp, FAIL, "H5Fget_free_sections");
+
+ if (ntmp > 0) {
+ nsects = H5Fget_free_sections(file, type, (size_t)ntmp, &meta_sect_info[nmeta]);
+ VERIFY(nsects, ntmp, "H5Fget_free_sections");
+ nmeta += ntmp;
+ }
+ }
+ }
+ else {
+ /* Get the # of free-space sections in the file for metadata */
+ nmeta = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)0, NULL);
+ CHECK(nmeta, FAIL, "H5Fget_free_sections");
+
+ /* Retrieve and verify free-space sections for metadata */
+ nsects = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)nmeta, meta_sect_info);
+ VERIFY(nsects, nmeta, "H5Fget_free_sections");
+ }
+
+ /* Get the # of free-space sections in the file for raw data */
+ nraw = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)0, NULL);
+ CHECK(nraw, FAIL, "H5Fget_free_sections");
+
+ /* Retrieve and verify free-space sections for raw data */
+ HDmemset(raw_sect_info, 0, sizeof(raw_sect_info));
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)nraw, raw_sect_info);
+ VERIFY(nsects, nraw, "H5Fget_free_sections");
+
+ /* Sum all the free-space sections */
+ for (u = 0; u < nmeta; u++)
+ tmp_tot += meta_sect_info[u].size;
+
+ for (u = 0; u < nraw; u++)
+ tmp_tot += raw_sect_info[u].size;
+
+ /* Verify free-space info */
+ VERIFY(nmeta + nraw, nall, "H5Fget_free_sections");
+ VERIFY(tmp_tot, total, "H5Fget_free_sections");
+
+ /* Closing */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Pclose(fcpl);
+ CHECK(fcpl, FAIL, "H5Pclose");
+#if 0
+ h5_clean_files(FILESPACE_NAME, fapl);
+#endif
+ H5Fdelete(FILESPACE_NAME[0], fapl);
+ }
+
+} /* end test_sects_freespace() */
+#endif
+
+/****************************************************************
+**
+** test_filespace_compatible():
+** Verify that the trunk with the latest file space management
+** can open, read and modify 1.6 HDF5 file and 1.8 HDF5 file.
+** Also verify the correct file space handling information
+** and the amount of free space.
+**
+****************************************************************/
+#if 0
+static void
+test_filespace_compatible(void)
+{
+ int fd_old = (-1), fd_new = (-1); /* File descriptors for copying data */
+ hid_t fid = -1; /* File id */
+ hid_t did = -1; /* Dataset id */
+ hid_t fcpl; /* File creation property list template */
+ int check[100]; /* Temporary buffer for verifying dataset data */
+ int rdbuf[100]; /* Temporary buffer for reading in dataset data */
+ uint8_t buf[READ_OLD_BUFSIZE]; /* temporary buffer for reading */
+ ssize_t nread; /* Number of bytes read in */
+ unsigned i, j; /* Local index variable */
+ hssize_t free_space; /* Amount of free-space in the file */
+ hbool_t persist; /* Persist free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ H5F_fspace_strategy_t strategy; /* File space handling strategy */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("File space compatibility testing for 1.6 and 1.8 files\n"));
+
+ for (j = 0; j < NELMTS(OLD_FILENAME); j++) {
+ const char *filename = H5_get_srcdir_filename(OLD_FILENAME[j]); /* Corrected test file name */
+
+ /* Open and copy the test file into a temporary file */
+ fd_old = HDopen(filename, O_RDONLY);
+ CHECK(fd_old, FAIL, "HDopen");
+ fd_new = HDopen(FILE5, O_RDWR | O_CREAT | O_TRUNC, H5_POSIX_CREATE_MODE_RW);
+ CHECK(fd_new, FAIL, "HDopen");
+
+ /* Copy data */
+ while ((nread = HDread(fd_old, buf, (size_t)READ_OLD_BUFSIZE)) > 0) {
+ ssize_t write_err = HDwrite(fd_new, buf, (size_t)nread);
+ CHECK(write_err, -1, "HDwrite");
+ } /* end while */
+
+ /* Close the files */
+ ret = HDclose(fd_old);
+ CHECK(ret, FAIL, "HDclose");
+ ret = HDclose(fd_new);
+ CHECK(ret, FAIL, "HDclose");
+
+ /* Open the temporary test file */
+ fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* There should not be any free space in the file */
+ free_space = H5Fget_freespace(fid);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, (hssize_t)0, "H5Fget_freespace");
+
+ /* Get the file's file creation property list */
+ fcpl = H5Fget_create_plist(fid);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve the file space info */
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* File space handling strategy should be H5F_FSPACE_STRATEGY_FSM_AGGR */
+ /* Persisting free-space should be FALSE */
+ /* Free-space section threshold should be 1 */
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+
+ /* Generate raw data */
+ for (i = 0; i < 100; i++)
+ check[i] = (int)i;
+
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for (i = 0; i < 100; i++)
+ VERIFY(rdbuf[i], check[i], "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Remove the dataset */
+ ret = H5Ldelete(fid, DSETNAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close the plist */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-Open the file */
+ fid = H5Fopen(FILE5, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* The dataset should not be there */
+ did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT);
+ VERIFY(did, FAIL, "H5Dopen");
+
+ /* There should not be any free space in the file */
+ free_space = H5Fget_freespace(fid);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, (hssize_t)0, "H5Fget_freespace");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+} /* test_filespace_compatible */
+#endif
+
+/****************************************************************
+**
+** test_filespace_1.10.0_compatible():
+** Verify that the latest file space management can open, read and
+** modify 1.10.0 HDF5 files :
+** h5fc_ext1_i.h5: H5F_FILE_SPACE_ALL, default threshold; has superblock extension but no fsinfo message
+** h5fc_ext1_f.h5: H5F_FILE_SPACE_ALL_PERSIST, default threshold; has superblock extension with fsinfo
+*message
+** h5fc_ext2_if.h5: H5F_FILE_SPACE_ALL, non-default threshold; has superblock extension with fsinfo
+*message
+** h5fc_ext2_sf.h5: H5F_FILE_SPACE_VFD, default threshold; has superblock extension with fsinfo message
+** h5fc_ext3_isf.h5: H5F_FILE_SPACE_AGGR_VFD, default threshold; has superblock extension with fsinfo
+*message
+** h5fc_ext_none.h5: H5F_FILE_SPACE_ALL, default threshold; without superblock extension
+** The above files are copied from release 1.10.0 tools/h5format_convert/testfiles.
+**
+****************************************************************/
+#if 0
+static void
+test_filespace_1_10_0_compatible(void)
+{
+ hid_t fid = -1; /* File id */
+ hid_t did = -1; /* Dataset id */
+ hid_t fcpl; /* File creation property list */
+ hbool_t persist; /* Persist free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ H5F_fspace_strategy_t strategy; /* File space handling strategy */
+ int wbuf[24]; /* Buffer for dataset data */
+ int rdbuf[24]; /* Buffer for dataset data */
+ int status; /* Status from copying the existing file */
+ unsigned i, j; /* Local index variable */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("File space compatibility testing for 1.10.0 files\n"));
+
+ for (j = 0; j < NELMTS(OLD_1_10_0_FILENAME); j++) {
+ /* Make a copy of the test file */
+ status = h5_make_local_copy(OLD_1_10_0_FILENAME[j], FILE5);
+ CHECK(status, FAIL, "h5_make_local_copy");
+
+ /* Open the temporary test file */
+ fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Get the file's file creation property list */
+ fcpl = H5Fget_create_plist(fid);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve the file space info */
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ switch (j) {
+ case 0:
+#if 0
+ VERIFY(strategy, H5F_FILE_SPACE_STRATEGY_DEF, "H5Pget_file_space_strategy");
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+#endif
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/DSET_EA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for (i = 0; i < 24; i++)
+ wbuf[i] = (int)j + 1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 1:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, TRUE, "H5Pget_file_space_strategy");
+#if 0
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+#endif
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/DSET_NDATA_BT2", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for (i = 0; i < 24; i++)
+ wbuf[i] = (int)j + 1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 2:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+#if 0
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+#endif
+ VERIFY(threshold, 2, "H5Pget_file_space_strategy");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/DSET_NONE", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for (i = 0; i < 24; i++)
+ wbuf[i] = (int)j + 1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 3:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_NONE, "H5Pget_file_space_strategy");
+#if 0
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+#endif
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_EA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for (i = 0; i < 24; i++)
+ wbuf[i] = (int)j + 1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 4:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_AGGR, "H5Pget_file_space_strategy");
+#if 0
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+#endif
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_FA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for (i = 0; i < 24; i++)
+ wbuf[i] = (int)j + 1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+ case 5:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+#if 0
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+#endif
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_NONE", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for (i = 0; i < 24; i++)
+ wbuf[i] = (int)j + 1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ default:
+ break;
+ }
+
+ /* Close the plist */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-Open the file */
+ fid = H5Fopen(FILE5, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ switch (j) {
+ case 0:
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/DSET_EA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for (i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j + 1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 1:
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/DSET_NDATA_BT2", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for (i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j + 1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 2:
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/DSET_NONE", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for (i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j + 1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 3:
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_EA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for (i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j + 1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 4:
+
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_FA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for (i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j + 1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 5:
+
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_NONE", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for (i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j + 1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ default:
+ break;
+ }
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+} /* test_filespace_1_10_0_compatible */
+#endif
+
+/****************************************************************
+**
+** test_filespace_round_compatible():
+** Verify that the trunk can open, read and modify these files--
+** 1) They are initially created (via gen_filespace.c) in the trunk
+** with combinations of file space strategies, default/non-default
+** threshold, and file spacing paging enabled/disabled.
+** The library creates the file space info message with
+** "mark if unknown" in these files.
+** 2) They are copied to the 1.8 branch, and are opened/read/modified
+** there via test_filespace_compatible() in test/tfile.c.
+** The 1.8 library marks the file space info message as "unknown"
+** in these files.
+** 3) They are then copied back from the 1.8 branch to the trunk for
+** compatibility testing via this routine.
+** 4) Upon encountering the file space info message which is marked
+** as "unknown", the library will use the default file space management
+** from then on: non-persistent free-space managers, default threshold,
+** and non-paging file space.
+**
+****************************************************************/
+#if 0
+static void
+test_filespace_round_compatible(void)
+{
+ hid_t fid = -1; /* File id */
+ hid_t fcpl = -1; /* File creation property list ID */
+ unsigned j; /* Local index variable */
+ H5F_fspace_strategy_t strategy; /* File space strategy */
+ hbool_t persist; /* Persist free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ hssize_t free_space; /* Amount of free space in the file */
+ int status; /* Status from copying the existing file */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("File space compatibility testing for files from trunk to 1_8 to trunk\n"));
+
+ for (j = 0; j < NELMTS(FSPACE_FILENAMES); j++) {
+ /* Make a copy of the test file */
+ status = h5_make_local_copy(FSPACE_FILENAMES[j], FILE5);
+ CHECK(status, FAIL, "h5_make_local_copy");
+
+ /* Open the temporary test file */
+ fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Get the file's creation property list */
+ fcpl = H5Fget_create_plist(fid);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+
+ /* There should not be any free space in the file */
+ free_space = H5Fget_freespace(fid);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, (hssize_t)0, "H5Fget_freespace");
+
+ /* Closing */
+ ret = H5Fclose(fid);
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+} /* test_filespace_round_compatible */
+
+/****************************************************************
+**
+** test_libver_bounds_real():
+** Verify that a file created and modified with the
+** specified libver bounds has the specified object header
+** versions for the right objects.
+**
+****************************************************************/
+static void
+test_libver_bounds_real(H5F_libver_t libver_create, unsigned oh_vers_create, H5F_libver_t libver_mod,
+ unsigned oh_vers_mod)
+{
+ hid_t file, group; /* Handles */
+ hid_t fapl; /* File access property list */
+ H5O_native_info_t ninfo; /* Object info */
+ herr_t ret; /* Return value */
+
+ /*
+ * Create a new file using the creation properties.
+ */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ ret = H5Pset_libver_bounds(fapl, libver_create, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ file = H5Fcreate("tfile5.h5", H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /*
+ * Make sure the root group has the correct object header version
+ */
+ ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name");
+
+ /*
+ * Reopen the file and make sure the root group still has the correct version
+ */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Pset_libver_bounds(fapl, libver_mod, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ file = H5Fopen("tfile5.h5", H5F_ACC_RDWR, fapl);
+ CHECK(file, FAIL, "H5Fopen");
+
+ ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name");
+
+ /*
+ * Create a group named "G1" in the file, and make sure it has the correct
+ * object header version
+ */
+ group = H5Gcreate2(file, "/G1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, FAIL, "H5Gcreate");
+
+ //! [H5Oget_native_info_snip]
+
+ ret = H5Oget_native_info(group, &ninfo, H5O_NATIVE_INFO_HDR);
+
+ //! [H5Oget_native_info_snip]
+
+ CHECK(ret, FAIL, "H5Oget_native)info");
+ VERIFY(ninfo.hdr.version, oh_vers_mod, "H5Oget_native_info");
+
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /*
+ * Create a group named "/G1/G3" in the file, and make sure it has the
+ * correct object header version
+ */
+ group = H5Gcreate2(file, "/G1/G3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, FAIL, "H5Gcreate");
+
+ ret = H5Oget_native_info(group, &ninfo, H5O_NATIVE_INFO_HDR);
+ CHECK(ret, FAIL, "H5Oget_native_info");
+ VERIFY(ninfo.hdr.version, oh_vers_mod, "H5Oget_native_info");
+
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ //! [H5Oget_native_info_by_name_snip]
+
+ /*
+ * Make sure the root group still has the correct object header version
+ */
+ ret = H5Oget_native_info_by_name(file, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+
+ //! [H5Oget_native_info_by_name_snip]
+
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.version, oh_vers_create, "H5Oget_native_info_by_name");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* end test_libver_bounds_real() */
+#endif
+
+/*-------------------------------------------------------------------------
+ * Function: test_libver_bounds_open
+ *
+ * Purpose: Tests opening latest file with various low/high bounds.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ *-------------------------------------------------------------------------
+ */
+#if 0
+#define VERBFNAME "tverbounds_dspace.h5"
+#define VERBDSNAME "dataset 1"
+#define SPACE1_DIM1 3
+static void
+test_libver_bounds_open(void)
+{
+ hid_t file = -1; /* File ID */
+ hid_t space = -1; /* Dataspace ID */
+ hid_t dset = -1; /* Dataset ID */
+ hid_t fapl = -1; /* File access property list ID */
+ hid_t new_fapl = -1; /* File access property list ID for reopened file */
+ hid_t dcpl = -1; /* Dataset creation property list ID */
+ hsize_t dim[1] = {SPACE1_DIM1}; /* Dataset dimensions */
+ H5F_libver_t low, high; /* File format bounds */
+ hsize_t chunk_dim[1] = {SPACE1_DIM1}; /* Chunk dimensions */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Opening File in Various Version Bounds\n"));
+
+ /* Create a file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Create dataspace */
+ space = H5Screate_simple(1, dim, NULL);
+ CHECK(space, FAIL, "H5Screate_simple");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Create and set chunk plist */
+ ret = H5Pset_chunk(dcpl, 1, chunk_dim);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ ret = H5Pset_deflate(dcpl, 9);
+ CHECK(ret, FAIL, "H5Pset_deflate");
+ ret = H5Pset_chunk_opts(dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
+ CHECK(ret, FAIL, "H5Pset_chunk_opts");
+
+ /* Create a file with (LATEST, LATEST) bounds, create a layout version 4
+ dataset, then close the file */
+
+ /* Set version bounds to (LATEST, LATEST) */
+ low = H5F_LIBVER_LATEST;
+ high = H5F_LIBVER_LATEST;
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create the file */
+ file = H5Fcreate(VERBFNAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create dataset */
+ dset = H5Dcreate2(file, VERBDSNAME, H5T_NATIVE_INT, space, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ /* Close dataset and file */
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Attempt to open latest file with (earliest, v18), should fail */
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_EARLIEST, H5F_LIBVER_V18);
+ H5E_BEGIN_TRY
+ {
+ file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(file, FAIL, "Attempted to open latest file with earliest version");
+
+ /* Attempt to open latest file with (v18, v18), should fail */
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_V18, H5F_LIBVER_V18);
+ H5E_BEGIN_TRY
+ {
+ file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(file, FAIL, "Attempted to open latest file with v18 bounds");
+
+ /* Opening VERBFNAME in these combination should succeed.
+ For each low bound, verify that it is upgraded properly */
+ high = H5F_LIBVER_LATEST;
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ H5F_libver_t new_low = H5F_LIBVER_EARLIEST;
+
+ /* Set version bounds for opening file */
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Open the file */
+ file = H5Fopen(VERBFNAME, H5F_ACC_RDONLY, fapl);
+ CHECK(file, FAIL, "H5Fopen");
+
+ /* Get the new file access property */
+ new_fapl = H5Fget_access_plist(file);
+ CHECK(new_fapl, FAIL, "H5Fget_access_plist");
+
+ /* Get new low bound and verify that it has been upgraded properly */
+ ret = H5Pget_libver_bounds(new_fapl, &new_low, NULL);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+ VERIFY(new_low >= H5F_LIBVER_V110, TRUE, "Low bound should be upgraded to at least H5F_LIBVER_V110");
+
+ ret = H5Pclose(new_fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* for low */
+
+ /* Close dataspace and property lists */
+ ret = H5Sclose(space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* end test_libver_bounds_open() */
+#endif
+
+/*-------------------------------------------------------------------------
+ * Function: test_libver_bounds_copy
+ *
+ * Purpose: Test to verify HDFFV-10800 is fixed:
+ * This test is copied from the user test program: copy10.c.
+ * (See attached programs in the jira issue.)
+ *
+ * The source file used in the test is generated by the user test
+ * program "fill18.c" with the 1.8 library. The file is created
+ * with the latest format and the dataset created in the file
+ * has version 3 fill value message (latest).
+ *
+ * The test creates the destination file with (v18, v18) version bounds.
+ * H5Ocopy() should succeed in copying the dataset in the source file
+ * to the destination file.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ *-------------------------------------------------------------------------
+ */
+#if 0
+static void
+test_libver_bounds_copy(void)
+{
+ hid_t src_fid = -1; /* File ID */
+ hid_t dst_fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list ID */
+ const char *src_fname; /* Source file name */
+ herr_t ret; /* Generic return value */
+ hbool_t driver_is_default_compatible;
+
+ /* Output message about the test being performed */
+ MESSAGE(5, ("Testing H5Ocopy a dataset in a 1.8 library file to a 1.10 library file\n"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK_I(ret, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ /* Get the test file name */
+ src_fname = H5_get_srcdir_filename(SRC_FILE);
+
+ /* Open the source test file */
+ src_fid = H5Fopen(src_fname, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(src_fid, FAIL, "H5Fopen");
+
+ /* Create file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Set library version bounds to (v18, v18) */
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_V18, H5F_LIBVER_V18);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create the destination file with the fapl */
+ dst_fid = H5Fcreate(DST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(dst_fid, FAIL, "H5Pcreate");
+
+ /* Close the fapl */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Copy the dataset in the source file to the destination file */
+ ret = H5Ocopy(src_fid, DSET_DS1, dst_fid, DSET_DS1, H5P_DEFAULT, H5P_DEFAULT);
+ VERIFY(ret, SUCCEED, "H5Ocopy");
+
+ /* Close the source file */
+ ret = H5Fclose(src_fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close the destination file */
+ ret = H5Fclose(dst_fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Remove the destination file */
+ H5Fdelete(DST_FILE, H5P_DEFAULT);
+
+} /* end test_libver_bounds_copy() */
+#endif
+
+/****************************************************************
+**
+** test_libver_bounds():
+** Verify that a file created and modified with various
+** libver bounds is handled correctly. (Further testing
+** welcome)
+**
+****************************************************************/
+#if 0
+static void
+test_libver_bounds(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing setting library version bounds\n"));
+
+ /* Run the tests */
+ test_libver_bounds_real(H5F_LIBVER_EARLIEST, 1, H5F_LIBVER_LATEST, 2);
+ test_libver_bounds_real(H5F_LIBVER_LATEST, 2, H5F_LIBVER_EARLIEST, 2);
+ test_libver_bounds_open();
+#if 0
+ test_libver_bounds_copy();
+#endif
+} /* end test_libver_bounds() */
+#endif
+
+/**************************************************************************************
+**
+** test_libver_bounds_low_high():
+** Tests to verify that format versions are correct with the following five
+** pairs of low/high version bounds set in fapl via H5Pset_libver_bounds():
+** (1) (earliest, v18)
+** (2) (earliest, v110)
+** (3) (v18, v18)
+** (4) (v18, v110)
+** (5) (v110, v110)
+**
+** For each pair of setting in fapl, verify format versions with the following
+** six tests:
+** (1) test_libver_bounds_super(fapl): superblock versions
+** (2) test_libver_bounds_obj(fapl): object header versions
+** (3) test_libver_bounds_dataset(fapl): message versions associated with dataset
+** (4) test_libver_bounds_dataspace(fapl): dataspace message versions
+** (5) test_libver_bounds_datatype(fapl): datatype message versions
+** (6) test_libver_bounds_attributes(fapl): attribute message versions
+**
+**************************************************************************************/
+#if 0
+static void
+test_libver_bounds_low_high(const char *env_h5_drvr)
+{
+ hid_t fapl = H5I_INVALID_HID; /* File access property list */
+ H5F_libver_t low, high; /* Low and high bounds */
+ herr_t ret; /* The return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing setting (low, high) format version bounds\n"));
+
+ /* Create a file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Loop through all the combinations of low/high version bounds */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++)
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+
+ H5E_BEGIN_TRY
+ {
+ /* Set the low/high version bounds */
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ }
+ H5E_END_TRY;
+
+ /* Should fail: invalid combinations */
+ if (high == H5F_LIBVER_EARLIEST) {
+ VERIFY(ret, FAIL, "H5Pset_libver_bounds");
+ continue;
+ }
+
+ /* Should fail: invalid combinations */
+ if (high < low) {
+ VERIFY(ret, FAIL, "H5Pset_libver_bounds");
+ continue;
+ }
+
+ /* All other combinations are valid and should succeed */
+ VERIFY(ret, SUCCEED, "H5Pset_libver_bounds");
+
+ /* Tests to verify version bounds */
+ test_libver_bounds_super(fapl, env_h5_drvr);
+ test_libver_bounds_obj(fapl);
+ test_libver_bounds_dataset(fapl);
+ test_libver_bounds_dataspace(fapl);
+ test_libver_bounds_datatype(fapl);
+ test_libver_bounds_attributes(fapl);
+ }
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_libver_bounds_low_high() */
+#endif
+
+/***********************************************************************
+**
+** test_libver_bounds_super():
+** Verify superblock version with the following two tests:
+** (1) test_libver_bounds_super_create():
+** --when creating a file with the input fapl and the fcpl
+** that has the following feature enabled:
+** (A) default fcpl
+** (B) fcpl with v1-btee K value enabled
+** (C) fcpl with shared messages enabled
+** (D) fcpl with persistent free-space manager enabled
+**
+** (2) test_libver_bounds_super_open():
+** --when opening a file which is created with the input fapl
+** and the fcpl setting as #A to #D above.
+**
+** These two tests are run with or without SWMR file access.
+**
+*************************************************************************/
+#if 0
+static void
+test_libver_bounds_super(hid_t fapl, const char *env_h5_drvr)
+{
+ hid_t fcpl = H5I_INVALID_HID; /* File creation property list */
+ herr_t ret; /* The return value */
+
+ /* Create a default fcpl: #A */
+ /* This will result in superblock version 0 */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Verify superblock version when creating a file with input fapl,
+ fcpl #A and with/without SWMR access */
+ if (H5FD__supports_swmr_test(env_h5_drvr))
+ test_libver_bounds_super_create(fapl, fcpl, TRUE, FALSE);
+ test_libver_bounds_super_create(fapl, fcpl, FALSE, FALSE);
+
+ /* Verify superblock version when opening a file which is created
+ with input fapl, fcpl #A and with/without SWMR access */
+ if (H5FD__supports_swmr_test(env_h5_drvr))
+ test_libver_bounds_super_open(fapl, fcpl, TRUE, FALSE);
+ test_libver_bounds_super_open(fapl, fcpl, FALSE, FALSE);
+
+ /* Close the fcpl */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Create a fcpl with v1-btree K value enabled: #B */
+ /* This will result in superblock version 1 */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_istore_k(fcpl, 64);
+ CHECK(ret, FAIL, "H5Pset_istore_k");
+
+ /* Verify superblock version when creating a file with input fapl,
+ fcpl #B and with/without SWMR access */
+ if (H5FD__supports_swmr_test(env_h5_drvr))
+ test_libver_bounds_super_create(fapl, fcpl, TRUE, FALSE);
+ test_libver_bounds_super_create(fapl, fcpl, FALSE, FALSE);
+
+ /* Verify superblock version when opening a file which is created
+ with input fapl, fcpl #B and with/without SWMR access */
+ if (H5FD__supports_swmr_test(env_h5_drvr))
+ test_libver_bounds_super_open(fapl, fcpl, TRUE, FALSE);
+ test_libver_bounds_super_open(fapl, fcpl, FALSE, FALSE);
+
+ /* Close the fcpl */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Create a fcpl with shared messages enabled: #C */
+ /* This will result in superblock version 2 */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_shared_mesg_nindexes(fcpl, 1);
+ CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_ATTR_FLAG, 2);
+ CHECK(ret, FAIL, "H5Pset_shared_mesg_index");
+
+ /* Verify superblock version when creating a file with input fapl,
+ fcpl #C and with/without SWMR access */
+ if (H5FD__supports_swmr_test(env_h5_drvr))
+ test_libver_bounds_super_create(fapl, fcpl, TRUE, FALSE);
+ test_libver_bounds_super_create(fapl, fcpl, FALSE, FALSE);
+
+ /* Verify superblock version when opening a file which is created
+ with input fapl, fcpl #C and with/without SWMR access */
+ if (H5FD__supports_swmr_test(env_h5_drvr))
+ test_libver_bounds_super_open(fapl, fcpl, TRUE, FALSE);
+ test_libver_bounds_super_open(fapl, fcpl, FALSE, FALSE);
+
+ /* Close the fcpl */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ if (h5_using_default_driver(env_h5_drvr)) {
+ /* Create a fcpl with persistent free-space manager enabled: #D */
+ /* This will result in superblock version 2 */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, 1, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space");
+
+ /* Verify superblock version when creating a file with input fapl,
+ fcpl #D and with/without SWMR access */
+ if (H5FD__supports_swmr_test(env_h5_drvr))
+ test_libver_bounds_super_create(fapl, fcpl, TRUE, TRUE);
+ test_libver_bounds_super_create(fapl, fcpl, FALSE, TRUE);
+
+ /* Verify superblock version when opening a file which is created
+ with input fapl, fcpl #D and with/without SWMR access */
+ if (H5FD__supports_swmr_test(env_h5_drvr))
+ test_libver_bounds_super_open(fapl, fcpl, TRUE, TRUE);
+ test_libver_bounds_super_open(fapl, fcpl, FALSE, TRUE);
+
+ /* Close the fcpl */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ }
+
+} /* end test_libver_bounds_super() */
+
+/**************************************************************************************************
+**
+** test_libver_bounds_super_create():
+** Verify the following when the file is created with the input fapl, fcpl,
+** and with/without SWMR access:
+** (a) the superblock version #
+** (b) the file's low bound setting
+** (c) fail or succeed in creating the file
+**
+** For file creation, the bounds setting in fapl, the feature enabled in fcpl,
+** and with/without SWMR file access will determine the results for #a to #c.
+**
+** The first row for the following two tables is the 5 pairs of low/high bounds setting
+** in the input fapl. The next three rows list the expected results for #a to #c.
+** "-->" indicates "upgrade to"
+**
+** The last table lists the expected results in creating the file when non-default
+** free-space info (fsinfo) is enabled in fcpl.
+**
+** Creating a file with write access
+** --------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |______________________________________________________________________________|
+** Superblock version | vers 0, 1, 2 | vers 0, 1, 2 | vers 2 | vers 2 | vers 3 |
+** |------------------------------------------------------------------------------|
+** File's low bound | no change |
+** |------------------------------------------------------------------------------|
+** File creation | succeed |
+** |______________________________________________________________________________|
+**
+** Creating a file with SWMR-write access
+** --------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |______________________________________________________________________________|
+** Superblock version | -- | vers 3 | -- | vers 3 | vers 3 |
+** |------------------------------------------------------------------------------|
+** File's low bound | -- | ->v110 | -- | ->v110 | no change |
+** |------------------------------------------------------------------------------|
+** File creation | fail | succeed | fail | succeed | succeed |
+** |______________________________________________________________________________|
+**
+** Creating a file with write/SWMR-write access + non-default fsinfo
+** --------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |______________________________________________________________________________|
+** File creation | fail | succeed | fail | succeed | succeed |
+** |______________________________________________________________________________|
+**
+******************************************************************************************************/
+static void
+test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm)
+{
+ hid_t fid = H5I_INVALID_HID; /* File ID */
+#if 0
+ H5F_t *f = NULL; /* Internal file pointer */
+#endif
+ H5F_libver_t low, high; /* Low and high bounds */
+#if 0
+ hbool_t ok; /* The result is ok or not */
+#endif
+ herr_t ret; /* The return value */
+
+ /* Try to create the file */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC | (is_swmr ? H5F_ACC_SWMR_WRITE : 0), fcpl, fapl);
+ }
+ H5E_END_TRY;
+
+#if 0
+ /* Get the internal file pointer if the create succeeds */
+ if (fid >= 0) {
+ f = (H5F_t *)H5VL_object(fid);
+ CHECK_PTR(f, "H5VL_object");
+ }
+#endif
+ /* Retrieve the low/high bounds */
+ ret = H5Pget_libver_bounds(fapl, &low, &high);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+
+ if (non_def_fsm && high < H5F_LIBVER_V110)
+ VERIFY(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ else if (is_swmr) { /* SWMR is enabled */
+ if (high >= H5F_LIBVER_V110) { /* Should succeed */
+ VERIFY(fid >= 0, TRUE, "H5Fcreate");
+#if 0
+ VERIFY(HDF5_SUPERBLOCK_VERSION_3, f->shared->sblock->super_vers, "HDF5_superblock_ver_bounds");
+ VERIFY(f->shared->low_bound >= H5F_LIBVER_V110, TRUE, "HDF5_superblock_ver_bounds");
+#endif
+ }
+ else /* Should fail */
+ VERIFY(fid >= 0, FALSE, "H5Fcreate");
+ }
+ else { /* Should succeed */
+ VERIFY(fid >= 0, TRUE, "H5Fcreate");
+#if 0
+ VERIFY(low, f->shared->low_bound, "HDF5_superblock_ver_bounds");
+
+ switch (low) {
+ case H5F_LIBVER_EARLIEST:
+ ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_DEF ||
+ f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_1 ||
+ f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_2);
+ VERIFY(ok, TRUE, "HDF5_superblock_ver_bounds");
+ break;
+
+ case H5F_LIBVER_V18:
+ ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_2);
+ VERIFY(ok, TRUE, "HDF5_superblock_ver_bounds");
+ break;
+
+ case H5F_LIBVER_V110:
+ case H5F_LIBVER_V112:
+ case H5F_LIBVER_V114:
+ case H5F_LIBVER_V116:
+ ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_3);
+ VERIFY(ok, TRUE, "HDF5_superblock_ver_bounds");
+ break;
+
+ case H5F_LIBVER_ERROR:
+ case H5F_LIBVER_NBOUNDS:
+ default:
+ ERROR("H5Pget_libver_bounds");
+
+ } /* end switch */
+#endif
+ } /* end else */
+
+ if (fid >= 0) { /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+
+} /* end test_libver_bounds_super_create() */
+
+/**************************************************************************************************
+**
+** test_libver_bounds_super_open():
+** Verify the following when opening a file which is created with the input fapl, fcpl,
+** and with/without SWMR access:
+** (a) the file's low bound setting
+** (b) fail or succeed in opening the file
+**
+** (1) Create a file with the input fapl, fcpl and with/without SWMR access
+** (2) Close the file
+** (3) Reopen the file with a new fapl that is set to the 5 pairs of low/high bounds
+** in a for loop. For each pair of setting in the new fapl:
+** --Verify the expected results for #a and #b above.
+** --Close the file.
+**
+** For file open, the file's superblock version, the low/high bounds setting in fapl,
+** and with/without SWMR file access will determine the results for #a and #b.
+**
+** The first row for the following tables (#A - #B) is the 5 pairs of low/high bounds setting
+** in the input fapl. The next two rows list the expected results for #a and #b.
+** "-->" indicates "upgrade to"
+**
+** The last table (#C) lists the expected results in opening the file when non-default
+** free-space info (fsinfo) is enabled in fcpl.
+**
+** (A) Opening a file with write access
+**
+** Superblock version 0, 1
+** --------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |______________________________________________________________________________|
+** File's low bound | no change |
+** |------------------------------------------------------------------------------|
+** File open | succeed |
+** |______________________________________________________________________________|
+**
+**
+** Superblock version 2
+** --------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |______________________________________________________________________________|
+** File's low bound | -->v18 | no change |
+** |------------------------------------------------------------------------------|
+** File open | succeed |
+** |______________________________________________________________________________|
+**
+** Superblock version 3
+** --------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v110) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |______________________________________________________________________________|
+** File's low bound | -- | -->v110 | -- | -->v110 | no change |
+** |------------------------------------------------------------------------------|
+** File open | fail | succeed | fail | succeed | succeed |
+** |______________________________________________________________________________|
+**
+**
+**
+** (B) Opening a file with SWMR-write access
+**
+** Superblock version 0, 1, 2
+** -------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |_____________________________________________________________________________|
+** File's low bound | ----
+** |-----------------------------------------------------------------------------|
+** File open | fail
+** |_____________________________________________________________________________|
+**
+**
+** Superblock version 3
+** -------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |_____________________________________________________________________________|
+** File's low bound | -- | -->v110 | -- | -->v110 | no change |
+** |-----------------------------------------------------------------------------|
+** File open | fail | succeed | fail | succeed | succeed |
+** |_____________________________________________________________________________|
+**
+**
+** (C) Opening a file with write/SWMR-write access + non-default fsinfo
+** -------------------------------------------------------------------------------
+** | (earliest, v18) | (earliest, v10) | (v18, v18) | (v18, v110) | (v110, v110) |
+** |_____________________________________________________________________________|
+** File open | fail | succeed | fail | succeed | succeed |
+** |_____________________________________________________________________________|
+**
+**
+******************************************************************************************************/
+static void
+test_libver_bounds_super_open(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t non_def_fsm)
+{
+ hid_t fid = H5I_INVALID_HID; /* File ID */
+#if 0
+ H5F_t *f = NULL; /* Internal file pointer */
+#endif
+ hid_t new_fapl = H5I_INVALID_HID; /* File access property list */
+#if 0
+ unsigned super_vers; /* Superblock version */
+#endif
+ H5F_libver_t low, high; /* Low and high bounds */
+ herr_t ret; /* Return value */
+
+ /* Create the file with the input fcpl and fapl */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+
+ /* Retrieve the low/high bounds */
+ ret = H5Pget_libver_bounds(fapl, &low, &high);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+
+ if (non_def_fsm && high < H5F_LIBVER_V110) {
+ VERIFY(fid, H5I_INVALID_HID, "H5Fcreate");
+ }
+ else {
+ VERIFY(fid >= 0, TRUE, "H5Fcreate");
+#if 0
+ /* Get the internal file pointer */
+ f = (H5F_t *)H5VL_object(fid);
+ CHECK_PTR(f, "H5VL_object");
+
+ /* The file's superblock version */
+ super_vers = f->shared->sblock->super_vers;
+#endif
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create a default file access property list */
+ new_fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(new_fapl, FAIL, "H5Pcreate");
+
+ /* Loop through all the combinations of low/high bounds in new_fapl */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_libver_bounds(new_fapl, low, high);
+ }
+ H5E_END_TRY;
+
+ /* Invalid combinations */
+ if (ret < 0)
+ continue;
+
+ /* Open the file with or without SWMR access */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fopen(FILE8, H5F_ACC_RDWR | (is_swmr ? H5F_ACC_SWMR_WRITE : 0), new_fapl);
+ }
+ H5E_END_TRY;
+
+ if (non_def_fsm && high < H5F_LIBVER_V110) {
+ VERIFY(fid, H5I_INVALID_HID, "H5Fopen");
+ continue;
+ }
+#if 0
+ /* Get the internal file pointer if the open succeeds */
+ if (fid >= 0) {
+ f = (H5F_t *)H5VL_object(fid);
+ CHECK_PTR(f, "H5VL_object");
+ }
+
+ /* Verify the file open succeeds or fails */
+ switch (super_vers) {
+ case 3:
+ if (high >= H5F_LIBVER_V110) {
+ /* Should succeed */
+ VERIFY(fid >= 0, TRUE, "H5Fopen");
+ VERIFY(f->shared->low_bound >= H5F_LIBVER_V110, TRUE,
+ "HDF5_superblock_ver_bounds");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+ else /* Should fail */
+ VERIFY(fid >= 0, FALSE, "H5Fopen");
+ break;
+
+ case 2:
+ if (is_swmr) /* Should fail */
+ VERIFY(fid >= 0, FALSE, "H5Fopen");
+ else { /* Should succeed */
+ VERIFY(fid >= 0, TRUE, "H5Fopen");
+ VERIFY(f->shared->low_bound >= H5F_LIBVER_V18, TRUE,
+ "HDF5_superblock_ver_bounds");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+ break;
+
+ case 1:
+ case 0:
+ if (is_swmr) /* Should fail */
+ VERIFY(fid >= 0, FALSE, "H5Fopen");
+ else { /* Should succeed */
+ VERIFY(fid >= 0, TRUE, "H5Fopen");
+ VERIFY(f->shared->low_bound, low, "HDF5_superblock_ver_bounds");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+ break;
+
+ default:
+ break;
+ } /* end switch */
+#endif
+ } /* end for */
+ } /* end for */
+
+ /* Close the file access property list */
+ ret = H5Pclose(new_fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end else */
+
+} /* end test_libver_bounds_super_open() */
+#endif
+
+/****************************************************************
+**
+** test_libver_bounds_obj():
+** Verify object header versions:
+**
+** (a) Create a file with:
+** --the input fapl
+** --a fcpl that has shared message enabled
+** Verify the root group's object header version.
+** Close the file.
+**
+** (b) Create another file with:
+** --the input fapl
+** --a default fcpl
+** Verify the root group's object header version.
+** Close the file.
+**
+** (c) Reopen the same file in (b) with a new fapl.
+** The new fapl is set to the 5 pairs of low/high
+** bounds in a "for" loop. For each setting in fapl:
+** --Create a group in the file
+** --Verify the group's object header version
+** --Close and delete the group
+** --Close the file
+**
+****************************************************************/
+#if 0
+static void
+test_libver_bounds_obj(hid_t fapl)
+{
+ hid_t fid = H5I_INVALID_HID; /* File ID */
+ hid_t gid = H5I_INVALID_HID; /* Group ID */
+ hid_t fcpl = H5I_INVALID_HID; /* File creation property list */
+ hid_t new_fapl = H5I_INVALID_HID; /* File access property list */
+ H5F_t *f = NULL; /* Internal file pointer */
+ H5F_libver_t low, high; /* Low and high bounds */
+ H5O_native_info_t ninfo; /* Object info */
+ H5G_info_t ginfo; /* Group info */
+ herr_t ret; /* Return value */
+
+ /* Retrieve the low/high bounds from the input fapl */
+ ret = H5Pget_libver_bounds(fapl, &low, &high);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+
+ /* Create a default file creation property list */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Enable shared message in the fcpl */
+ /* This will result in a version 2 object header */
+ ret = H5Pset_shared_mesg_nindexes(fcpl, 1);
+ CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_ATTR_FLAG, 2);
+ CHECK(ret, FAIL, "H5Pset_shared_mesg_index");
+
+ /* Create the file with the fcpl and the input fapl */
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Get root group's object info */
+ ret = H5Oget_native_info_by_name(fid, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+
+ /* Verify object header version is 2 because shared message is enabled */
+ VERIFY(ninfo.hdr.version, H5O_VERSION_2, "H5O_obj_ver_bounds");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close the file creation property list */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Create a file with the default fcpl and input fapl */
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Get root group's object info */
+ ret = H5Oget_native_info_by_name(fid, "/", &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+
+ /* Verify object header version is as indicated by low_bound */
+ VERIFY(ninfo.hdr.version, H5O_obj_ver_bounds[low], "H5O_obj_ver_bounds");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create a new default file access property list which
+ is used to open the file in the "for" loop */
+ new_fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Loop through all the combinations of low/high bounds in new_fapl */
+ /* Open the file with the fapl; create a group and verify the
+ object header version, then delete the group and close the file.*/
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_libver_bounds(new_fapl, low, high);
+ }
+ H5E_END_TRY;
+
+ if (ret < 0) /* Invalid combinations */
+ continue;
+
+ /* Open the file */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl);
+ }
+ H5E_END_TRY;
+
+ if (fid >= 0) { /* The file open succeeds */
+
+ /* Get the internal file pointer */
+ f = (H5F_t *)H5VL_object(fid);
+ CHECK_PTR(f, "H5VL_object");
+
+ /* Create a group in the file */
+ gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Get group information */
+ ret = H5Gget_info(gid, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+
+ /* Verify group storage type */
+ if (f->shared->low_bound >= H5F_LIBVER_V18)
+ /* Links in group are stored in object header */
+ VERIFY(ginfo.storage_type, H5G_STORAGE_TYPE_COMPACT, "H5Gget_info");
+ else
+ /* Links in group are stored with a "symbol table" */
+ VERIFY(ginfo.storage_type, H5G_STORAGE_TYPE_SYMBOL_TABLE, "H5Gget_info");
+
+ /* Get object header information */
+ ret = H5Oget_native_info_by_name(gid, GRP_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+
+ /* Verify object header version as indicated by low_bound */
+ VERIFY(ninfo.hdr.version, H5O_obj_ver_bounds[f->shared->low_bound], "H5O_obj_ver_bounds");
+
+ /* Close the group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Delete the group */
+ ret = H5Ldelete(fid, GRP_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Close the file access property list */
+ ret = H5Pclose(new_fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_libver_bounds_obj() */
+
+/****************************************************************
+**
+** test_libver_bounds_dataset():
+** Verify message versions associated with datasets:
+**
+** (a) Create a file with default fcpl and the input fapl.
+** Create the following two datasets:
+** --A contiguous dataset
+** --A chunked dataset with "no filter edge chunks"
+** For both datasets, verify the versions for the layout,
+** fill value and filter pipeline messages.
+** Close the file.
+**
+** (b) Create a new fapl that is set to the 5 pairs of low/high
+** bounds in a "for" loop. For each pair of setting in the
+** new fapl:
+** --Open the same file in (a) with the fapl
+** --Create a chunked dataset with 2 unlimited
+** dimensions
+** --Verify the versions for the layout, fill value
+** and filter pipeline messages
+** --Close and delete the dataset
+** --Close the file
+**
+****************************************************************/
+static void
+test_libver_bounds_dataset(hid_t fapl)
+{
+ hid_t fid = H5I_INVALID_HID; /* File ID */
+ hid_t new_fapl = H5I_INVALID_HID; /* File access property list */
+ hid_t did = H5I_INVALID_HID; /* Dataset ID */
+ hid_t sid = H5I_INVALID_HID; /* Dataspace ID */
+ hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */
+ H5D_t *dset = NULL; /* Internal dataset pointer */
+ H5F_t *f = NULL; /* Internal file pointer */
+ H5F_libver_t low, high; /* Low and high bounds */
+ herr_t ret; /* Return value */
+ hsize_t fix_dims2[2] = {10, 4}; /* Dimension sizes */
+ hsize_t fix_chunks2[2] = {4, 3}; /* Chunk dimension sizes */
+ hsize_t dims2[2] = {1, 4}; /* Dimension sizes */
+ hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunks2[2] = {4, 5}; /* Chunk dimension sizes */
+
+ /* Retrieve the low/high bounds from the input fapl */
+ ret = H5Pget_libver_bounds(fapl, &low, &high);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+
+ /* Create the file with the input fapl */
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create the dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate");
+
+ /* Create a contiguous dataset */
+ did = H5Dcreate2(fid, DSETA, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate");
+
+ /* Get the internal dataset pointer */
+ dset = (H5D_t *)H5VL_object(did);
+ CHECK_PTR(dset, "H5VL_object");
+
+ /* Verify version for layout and fill value messages */
+ if (low == H5F_LIBVER_EARLIEST) {
+ /* For layout message: the earliest version the library will set is 3 */
+ /* For fill value message: the earliest version the library will set is 2 */
+ VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_DEFAULT, "H5O_layout_ver_bounds");
+ VERIFY(dset->shared->dcpl_cache.fill.version, H5O_FILL_VERSION_2, "H5O_fill_ver_bounds");
+ }
+ else {
+ VERIFY(dset->shared->layout.version, H5O_layout_ver_bounds[low], "H5O_layout_ver_bounds");
+ VERIFY(dset->shared->dcpl_cache.fill.version, H5O_fill_ver_bounds[low], "H5O_fill_ver_bounds");
+ }
+
+ /* Verify filter pipeline message version */
+ VERIFY(dset->shared->dcpl_cache.pline.version, H5O_pline_ver_bounds[low], "H5O_pline_ver_bounds");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Set up dataspace and dcpl for creating a chunked dataset
+ with "no filter edge chunks" enabled.
+ This will result in a version 4 layout message */
+ sid = H5Screate_simple(2, fix_dims2, NULL);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, 2, fix_chunks2);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ ret = H5Pset_chunk_opts(dcpl, H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS);
+ CHECK(ret, FAIL, "H5Pset_chunk_opts");
+
+ /* Create the chunked dataset */
+ H5E_BEGIN_TRY
+ {
+ did = H5Dcreate2(fid, DSETB, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (did >= 0) {
+
+ /* Get the internal dataset pointer */
+ dset = (H5D_t *)H5VL_object(did);
+ CHECK_PTR(dset, "H5VL_object");
+
+ /* Verify layout message version and chunk indexing type */
+ VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_4, "H5O_layout_ver_bounds");
+ VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_FARRAY, "chunk_index_type");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ }
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Create a default file access property list which is used
+ to open the file in the 'for' loop */
+ new_fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Set up dataspace and dcpl for creating a chunked dataset with
+ 2 unlimited dimensions in the 'for' loop */
+ sid = H5Screate_simple(2, dims2, max_dims2);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, 2, chunks2);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Loop through all the combinations of low/high bounds in new_fapl */
+ /* Open the file with the fapl and create the chunked dataset */
+ /* Verify the dataset's layout, fill value and filter pipeline message versions */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_libver_bounds(new_fapl, low, high);
+ }
+ H5E_END_TRY;
+
+ if (ret < 0) /* Invalid low/high combinations */
+ continue;
+
+ /* Open the file */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl);
+ }
+ H5E_END_TRY;
+
+ if (fid >= 0) { /* The file open succeeds */
+
+ /* Get the internal file pointer */
+ f = (H5F_t *)H5VL_object(fid);
+ CHECK_PTR(f, "H5VL_object");
+
+ /* Create the chunked dataset */
+ did = H5Dcreate2(fid, DSETC, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Get the internal file pointer */
+ dset = (H5D_t *)H5VL_object(did);
+ CHECK_PTR(dset, "H5VL_object");
+
+ if (dset) {
+ /* Verify the dataset's layout, fill value and filter pipeline message versions */
+ /* Also verify the chunk indexing type */
+ if (f->shared->low_bound == H5F_LIBVER_EARLIEST) {
+ /* For layout message: the earliest version the library will set is 3 */
+ /* For fill value message: the earliest version the library will set is 2 */
+ VERIFY(dset->shared->layout.version, H5O_LAYOUT_VERSION_DEFAULT,
+ "H5O_layout_ver_bounds");
+ VERIFY(dset->shared->dcpl_cache.fill.version, H5O_FILL_VERSION_2,
+ "H5O_fill_ver_bounds");
+ }
+ else {
+ VERIFY(dset->shared->layout.version, H5O_layout_ver_bounds[f->shared->low_bound],
+ "H5O_layout_ver_bounds");
+ VERIFY(dset->shared->dcpl_cache.fill.version,
+ H5O_fill_ver_bounds[f->shared->low_bound], "H5O_fill_ver_bounds");
+ }
+
+ /* Verify the filter pipeline message version */
+ VERIFY(dset->shared->dcpl_cache.pline.version, H5O_pline_ver_bounds[f->shared->low_bound],
+ "H5O_pline_ver_bounds");
+
+ /* Verify the dataset's chunk indexing type */
+ if (dset->shared->layout.version == H5O_LAYOUT_VERSION_LATEST)
+ VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_BT2, "chunk_index_type");
+ else
+ VERIFY(dset->shared->layout.u.chunk.idx_type, H5D_CHUNK_IDX_BTREE,
+ "chunk_index_type");
+ }
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Delete the dataset */
+ ret = H5Ldelete(fid, DSETC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Close the file access property list */
+ ret = H5Pclose(new_fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_libver_bounds_dataset() */
+
+/****************************************************************
+**
+** test_libver_bounds_dataspace():
+** Verify dataspace message versions:
+**
+** (a) Create a file with default fcpl and the input fapl.
+** Create the following two datasets:
+** --A dataset with scalar dataspace
+** --A dataset with null dataspace
+** For both datasets, verify the dataspace message versions.
+** Close the file.
+**
+** (b) Create a new fapl that is set to the 5 pairs of low/high
+** bounds in a "for" loop. For each pair of setting in the
+** new fapl:
+** --Open the same file in (a) with the fapl
+** --Create a chunked dataset, a compact dataset and
+** a contiguous dataset
+** --Verify the dataspace message version for these
+** three datasets
+** --Delete the three datasets and the dataspaces
+** --Close the file
+**
+****************************************************************/
+static void
+test_libver_bounds_dataspace(hid_t fapl)
+{
+ hid_t fid = H5I_INVALID_HID; /* File ID */
+ hid_t new_fapl = H5I_INVALID_HID; /* File access property list */
+ hid_t did = H5I_INVALID_HID, did_null = H5I_INVALID_HID; /* Dataset IDs */
+ hid_t did_compact = H5I_INVALID_HID, did_contig = H5I_INVALID_HID; /* Dataset IDs */
+ hid_t sid = H5I_INVALID_HID, sid_null = H5I_INVALID_HID; /* Dataspace IDs */
+ hid_t sid_compact = H5I_INVALID_HID, sid_contig = H5I_INVALID_HID; /* Dataspace IDs */
+ hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */
+ hid_t dcpl_compact = H5I_INVALID_HID, dcpl_contig = H5I_INVALID_HID; /* Dataset creation property lists */
+ H5S_t *space = NULL, *space_null = NULL; /* Internal dataspace pointers */
+ H5F_t *f = NULL; /* Internal file pointer */
+ H5F_libver_t low, high; /* Low and high bounds */
+ hsize_t dims[1] = {1}; /* Dimension sizes */
+ hsize_t dims2[2] = {5, 4}; /* Dimension sizes */
+ hsize_t max_dims[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunks[1] = {4}; /* Chunk dimension sizes */
+ herr_t ret; /* Return value */
+
+ /* Retrieve the low/high bounds from the input fapl */
+ ret = H5Pget_libver_bounds(fapl, &low, &high);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+
+ /* Create the file with the input fapl */
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create scalar dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate");
+
+ /* Create a dataset with the scalar dataspace */
+ did = H5Dcreate2(fid, DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate");
+
+ /* Get the internal dataspace pointer */
+ sid = H5Dget_space(did);
+ CHECK(sid, H5I_INVALID_HID, "H5Dget_space");
+ space = (H5S_t *)H5I_object(sid);
+ CHECK_PTR(space, "H5I_object");
+
+ /* Verify the dataspace version */
+ VERIFY(space->extent.version, H5O_sdspace_ver_bounds[low], "H5O_sdspace_ver_bounds");
+
+ /* Create null dataspace */
+ sid_null = H5Screate(H5S_NULL);
+ CHECK(sid_null, H5I_INVALID_HID, "H5Screate");
+
+ /* Create a dataset with the null dataspace */
+ did_null = H5Dcreate2(fid, DSET_NULL, H5T_NATIVE_INT, sid_null, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did_null, H5I_INVALID_HID, "H5Dcreate");
+
+ /* Get the internal dataspace pointer */
+ sid_null = H5Dget_space(did_null);
+ CHECK(sid_null, H5I_INVALID_HID, "H5Dget_space");
+ space_null = (H5S_t *)H5I_object(sid_null);
+ CHECK_PTR(space_null, "H5I_object");
+
+ /* Verify the dataspace version */
+ VERIFY(space_null->extent.version, H5O_SDSPACE_VERSION_2, "H5O_sdspace_ver_bounds");
+
+ /* Close the datasets */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(did_null);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid_null);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create a default file access property list which is used
+ to open the file in the 'for' loop */
+ new_fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Set up dataspace and dcpl for creating a chunked dataset */
+ sid = H5Screate_simple(1, dims, max_dims);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, 1, chunks);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Set up dataspace and dcpl for creating a compact dataset */
+ sid_compact = H5Screate_simple(1, dims, NULL);
+ CHECK(sid_compact, H5I_INVALID_HID, "H5Screate_simple");
+ dcpl_compact = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl_compact, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_layout(dcpl_compact, H5D_COMPACT);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Set up dataspace and dcpl for creating a contiguous dataset */
+ sid_contig = H5Screate_simple(2, dims2, NULL);
+ CHECK(sid_contig, H5I_INVALID_HID, "H5Screate_simple");
+ dcpl_contig = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl_contig, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_layout(dcpl_contig, H5D_CONTIGUOUS);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Loop through all the combinations of low/high bounds in new_fapl */
+ /* Open the file and create the chunked/compact/contiguous datasets */
+ /* Verify the dataspace message version for the three datasets */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+ hid_t tmp_sid, tmp_sid_compact, tmp_sid_contig; /* Dataspace IDs */
+ H5S_t *tmp_space, *tmp_space_compact, *tmp_space_contig; /* Internal dataspace pointers */
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_libver_bounds(new_fapl, low, high);
+ }
+ H5E_END_TRY;
+
+ if (ret < 0) /* Invalid low/high combinations */
+ continue;
+
+ /* Open the file */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl);
+ }
+ H5E_END_TRY;
+
+ if (fid >= 0) { /* The file open succeeds */
+
+ /* Get the internal file pointer */
+ f = (H5F_t *)H5VL_object(fid);
+ CHECK_PTR(f, "H5VL_object");
+
+ /* Create the chunked dataset */
+ did = H5Dcreate2(fid, DSETA, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Get the internal dataspace pointer for the chunked dataset */
+ tmp_sid = H5Dget_space(did);
+ CHECK(tmp_sid, H5I_INVALID_HID, "H5Dget_space");
+ tmp_space = (H5S_t *)H5I_object(tmp_sid);
+ CHECK_PTR(tmp_space, "H5I_object");
+
+ /* Create the compact dataset */
+ did_compact = H5Dcreate2(fid, DSETB, H5T_NATIVE_INT, sid_compact, H5P_DEFAULT, dcpl_compact,
+ H5P_DEFAULT);
+ CHECK(did_compact, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Get the internal dataspace pointer for the compact dataset */
+ tmp_sid_compact = H5Dget_space(did_compact);
+ CHECK(tmp_sid_compact, H5I_INVALID_HID, "H5Dget_space");
+ tmp_space_compact = (H5S_t *)H5I_object(tmp_sid_compact);
+ CHECK_PTR(tmp_space_compact, "H5I_object");
+
+ /* Create the contiguous dataset */
+ did_contig =
+ H5Dcreate2(fid, DSETC, H5T_NATIVE_INT, sid_contig, H5P_DEFAULT, dcpl_contig, H5P_DEFAULT);
+ CHECK(did_contig, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Get the internal dataspace pointer for the contiguous dataset */
+ tmp_sid_contig = H5Dget_space(did_contig);
+ CHECK(tmp_sid_contig, H5I_INVALID_HID, "H5Dget_space");
+ tmp_space_contig = (H5S_t *)H5I_object(tmp_sid_contig);
+ CHECK_PTR(tmp_space_contig, "H5I_object");
+
+ if (tmp_space) {
+ /* Verify versions for the three dataspaces */
+ VERIFY(tmp_space->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound],
+ "H5O_sdspace_ver_bounds");
+ }
+ if (tmp_space_compact) {
+ VERIFY(tmp_space_compact->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound],
+ "H5O_sdspace_ver_bounds");
+ }
+ if (tmp_space_contig) {
+ VERIFY(tmp_space_contig->extent.version, H5O_sdspace_ver_bounds[f->shared->low_bound],
+ "H5O_sdspace_ver_bounds");
+ }
+
+ /* Close the three datasets */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(did_compact);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Dclose(did_contig);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the three dataspaces */
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(tmp_sid_compact);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(tmp_sid_contig);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Delete the three datasets */
+ ret = H5Ldelete(fid, DSETA, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ ret = H5Ldelete(fid, DSETB, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ ret = H5Ldelete(fid, DSETC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Close the file access property list */
+ ret = H5Pclose(new_fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the three dataspaces */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid_compact);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid_contig);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the three dataset creation property lists */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(dcpl_compact);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(dcpl_contig);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_libver_bounds_dataspace() */
+
+/****************************************************************
+**
+** test_libver_bounds_datatype():
+** Verify the datatype message version:
+**
+** (a) Create the following datatypes:
+** 1) integer
+** 2) enum
+** 3) array
+** 4) compound
+** 5) vlen
+** (b) Call test_libver_bounds_datatype_check() for each
+** datatype in (a) to verify the datatype message version.
+**
+****************************************************************/
+static void
+test_libver_bounds_datatype(hid_t fapl)
+{
+ hid_t tid = H5I_INVALID_HID, tid_enum = H5I_INVALID_HID, tid_array = H5I_INVALID_HID; /* Datatype IDs */
+ hid_t tid_compound = H5I_INVALID_HID, tid_vlen = H5I_INVALID_HID; /* Datatype IDs */
+ int enum_value; /* Value for enum datatype */
+ typedef struct s1 { /* Data structure for compound datatype */
+ char c;
+ int i;
+ } s1;
+ hsize_t dims[1] = {1}; /* Dimension sizes */
+ herr_t ret; /* Return value */
+
+ /* Create integer datatype */
+ tid = H5Tcopy(H5T_NATIVE_INT);
+
+ /* Verify datatype message version */
+ test_libver_bounds_datatype_check(fapl, tid);
+
+ /* Create enum datatype */
+ tid_enum = H5Tenum_create(tid);
+ enum_value = 0;
+ H5Tenum_insert(tid_enum, "val1", &enum_value);
+ enum_value = 1;
+ H5Tenum_insert(tid_enum, "val2", &enum_value);
+
+ /* Verify datatype message version */
+ test_libver_bounds_datatype_check(fapl, tid_enum);
+
+ /* Create array datatype */
+ tid_array = H5Tarray_create2(tid, 1, dims);
+
+ /* Verify datatype message version */
+ test_libver_bounds_datatype_check(fapl, tid_array);
+
+ /* Create compound datatype */
+ tid_compound = H5Tcreate(H5T_COMPOUND, sizeof(s1));
+ H5Tinsert(tid_compound, "c", HOFFSET(s1, c), H5T_STD_U8LE);
+ H5Tinsert(tid_compound, "i", HOFFSET(s1, i), H5T_NATIVE_INT);
+
+ /* Verify datatype message version */
+ test_libver_bounds_datatype_check(fapl, tid_compound);
+
+ /* Create vlen datatype */
+ tid_vlen = H5Tvlen_create(tid);
+
+ /* Verify datatype message version */
+ test_libver_bounds_datatype_check(fapl, tid_vlen);
+
+ /* Close the datatypes */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Tclose(tid_enum);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Tclose(tid_array);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Tclose(tid_compound);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Tclose(tid_vlen);
+ CHECK(ret, FAIL, "H5Tclose");
+
+} /* end test_libver_bounds_datatype() */
+
+/****************************************************************
+**
+** test_libver_bounds_datatype_check():
+** Helper routine called by test_libver_bounds_datatype()
+** to verify the datatype message version for the input tid:
+**
+** (a) Create a file with default fcpl and the input fapl.
+** Create a contiguous dataset with the input tid.
+** Verify the datatype message version.
+** Create a committed datatype of string to be
+** used later.
+** Close the file.
+**
+** (b) Create a new fapl that is set to the 5 pairs of low/high
+** bounds in a "for" loop. For each pair of setting in
+** the new fapl:
+** --Open the same file in (a) with the fapl
+** --Verify the message version for the committed
+** datatype created earlier
+** --Create a chunked dataset with the input tid
+** --Verify the datatype message version
+** --Close and delete the dataset
+** --Close the file
+**
+****************************************************************/
+static void
+test_libver_bounds_datatype_check(hid_t fapl, hid_t tid)
+{
+ hid_t fid = H5I_INVALID_HID; /* File ID */
+ hid_t new_fapl = H5I_INVALID_HID; /* File access property list */
+ hid_t dcpl = H5I_INVALID_HID; /* Dataset creation property list */
+ hid_t dtid = H5I_INVALID_HID; /* Datatype ID for the dataset */
+ hid_t str_tid = H5I_INVALID_HID; /* String datatype ID */
+ hid_t did = H5I_INVALID_HID; /* Dataset ID */
+ hid_t sid = H5I_INVALID_HID; /* Dataspace ID */
+ hsize_t dims[1] = {1}; /* Dimension sizes */
+ hsize_t dims2[2] = {5, 4}; /* Dimension sizes */
+ hsize_t max_dims2[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunks[2] = {2, 3}; /* Chunk dimension sizes */
+ H5T_t *dtype = NULL; /* Internal datatype pointer */
+ H5T_t *str_dtype = NULL; /* Internal datatype pointer for the string datatype */
+ H5F_t *f = NULL; /* Internal file pointer */
+ H5F_libver_t low, high; /* Low and high bounds */
+ herr_t ret; /* Return value */
+
+ /* Retrieve the low/high version bounds from the input fapl */
+ ret = H5Pget_libver_bounds(fapl, &low, &high);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+
+ /* Create the file with the input fapl */
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create a committed datatype of string which will be used
+ later inside the 'for' loop */
+ str_tid = H5Tcopy(H5T_C_S1);
+ CHECK(str_tid, H5I_INVALID_HID, "H5Tcopy");
+ ret = H5Tset_size(str_tid, (size_t)10);
+ CHECK(ret, FAIL, "H5Tset_size");
+ ret = H5Tcommit2(fid, "datatype", str_tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ ret = H5Tclose(str_tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create dataspace */
+ sid = H5Screate_simple(1, dims, NULL);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a dataset with the input tid */
+ did = H5Dcreate2(fid, DSET1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Get the dataset's datatype */
+ dtid = H5Dget_type(did);
+ CHECK(dtid, H5I_INVALID_HID, "H5Dget_type");
+
+ /* Get the internal datatype pointer */
+ dtype = (H5T_t *)H5I_object(dtid);
+ CHECK_PTR(dtype, "H5I_object");
+
+ /* Verify the datatype message version */
+ /* H5T_COMPOUND, H5T_ENUM, H5T_ARRAY:
+ * --the library will set version according to low_bound
+ * --H5T_ARRAY: the earliest version the library will set is 2
+ * H5T_INTEGER, H5T_FLOAT, H5T_TIME, H5T_STRING, H5T_BITFIELD, H5T_OPAQUE, H5T_REFERENCE:
+ * --the library will only use basic version
+ */
+
+ if (dtype->shared->type == H5T_COMPOUND || dtype->shared->type == H5T_ENUM ||
+ dtype->shared->type == H5T_ARRAY) {
+ if (dtype->shared->type == H5T_ARRAY && low == H5F_LIBVER_EARLIEST)
+ VERIFY(dtype->shared->version, H5O_DTYPE_VERSION_2, "H5O_dtype_ver_bounds");
+ else
+ VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[low], "H5O_dtype_ver_bounds");
+ }
+ else
+ VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST], "H5O_dtype_ver_bounds");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the datatype */
+ ret = H5Tclose(dtid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create a default file access property list */
+ new_fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(new_fapl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Set up dataspace and dcpl for creating a chunked dataset */
+ sid = H5Screate_simple(2, dims2, max_dims2);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, 2, chunks);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Loop through all the combinations of low/high bounds */
+ /* Open the file and create the chunked dataset with the input tid */
+ /* Verify the dataset's datatype message version */
+ /* Also verify the committed atatype message version */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_libver_bounds(new_fapl, low, high);
+ }
+ H5E_END_TRY;
+
+ if (ret < 0) /* Invalid low/high combinations */
+ continue;
+
+ /* Open the file */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl);
+ }
+ H5E_END_TRY;
+
+ if (fid >= 0) { /* The file open succeeds */
+
+ /* Get the internal file pointer */
+ f = (H5F_t *)H5VL_object(fid);
+ CHECK_PTR(f, "H5VL_object");
+
+ /* Open the committed datatype */
+ str_tid = H5Topen2(fid, "datatype", H5P_DEFAULT);
+ CHECK(str_tid, FAIL, "H5Topen2");
+ str_dtype = (H5T_t *)H5VL_object(str_tid);
+ CHECK_PTR(str_dtype, "H5VL_object");
+
+ /* Verify the committed datatype message version */
+ VERIFY(str_dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST],
+ "H5O_dtype_ver_bounds");
+
+ /* Close the committed datatype */
+ ret = H5Tclose(str_tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create the chunked dataset */
+ did = H5Dcreate2(fid, DSETNAME, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Get the dataset's datatype */
+ dtid = H5Dget_type(did);
+ CHECK(dtid, H5I_INVALID_HID, "H5Dget_type");
+
+ /* Get the internal datatype pointer */
+ dtype = (H5T_t *)H5I_object(dtid);
+ CHECK_PTR(dtype, "H5I_object");
+
+ if (dtype) {
+ /* Verify the dataset's datatype message version */
+ /* H5T_COMPOUND, H5T_ENUM, H5T_ARRAY:
+ * --the library will set version according to low_bound
+ * --H5T_ARRAY: the earliest version the library will set is 2
+ * H5T_INTEGER, H5T_FLOAT, H5T_TIME, H5T_STRING, H5T_BITFIELD, H5T_OPAQUE, H5T_REFERENCE:
+ * --the library will only use basic version
+ */
+ if (dtype->shared->type == H5T_COMPOUND || dtype->shared->type == H5T_ENUM ||
+ dtype->shared->type == H5T_ARRAY) {
+ if (dtype->shared->type == H5T_ARRAY && f->shared->low_bound == H5F_LIBVER_EARLIEST)
+ VERIFY(dtype->shared->version, H5O_DTYPE_VERSION_2, "H5O_dtype_ver_bounds");
+ else
+ VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[f->shared->low_bound],
+ "H5O_dtype_ver_bounds");
+ }
+ else
+ VERIFY(dtype->shared->version, H5O_dtype_ver_bounds[H5F_LIBVER_EARLIEST],
+ "H5O_dtype_ver_bounds");
+ }
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the dataset's datatype */
+ ret = H5Tclose(dtid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Delete the dataset */
+ ret = H5Ldelete(fid, DSETNAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Close the file access property list */
+ ret = H5Pclose(new_fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_libver_bounds_datatype_check() */
+
+/****************************************************************
+**
+** test_libver_bounds_attributes():
+** Verify the attribute message versions:
+**
+** (a) Create a file with default fcpl and the input fapl.
+** Create a group and attach the following three attributes
+** to the group:
+** (1) Attribute with a committed datatype
+** (2) Attribute with integer type
+** (3) Attribute with character encoding set
+** Verify the three attributes' message versions.
+** Close the file.
+**
+** (b) Create a fcpl that has shared datatype message enabled.
+** Create a file with the fcpl and the input fapl.
+** Create a group and attach an attribute with shared
+** integer type to the group.
+** Verify the attribute message version.
+** Close the file
+**
+** (b) Create a new fapl that is set to the 5 pairs of low/high
+** bounds in a "for" loop. For each pair of setting in
+** the new fapl:
+** --Open the same file in (b) with the fapl
+** --Open the group and attach an attribute with integer
+** type to the group
+** --Verify the attribute message version
+** --Delete the attribute
+** --Close the group and the file
+**
+****************************************************************/
+static void
+test_libver_bounds_attributes(hid_t fapl)
+{
+ hid_t fid = H5I_INVALID_HID; /* File ID */
+ hid_t fcpl = H5I_INVALID_HID; /* File creation property list */
+ hid_t new_fapl = H5I_INVALID_HID; /* File access property list */
+ hid_t tid = H5I_INVALID_HID; /* Datatype ID */
+ hid_t gid = H5I_INVALID_HID; /* Group ID */
+ hid_t sid = H5I_INVALID_HID; /* Dataspace ID */
+ hid_t aid = H5I_INVALID_HID; /* Attribute ID */
+ hid_t attr_cpl = H5I_INVALID_HID; /* Attribute creation property list */
+ H5A_t *attr = NULL; /* Internal attribute pointer */
+ H5F_t *f = NULL; /* Internal file pointer */
+ H5F_libver_t low, high; /* Low and high bounds */
+ herr_t ret; /* Return value */
+
+ /* Retrieve the low/high bounds from the input fapl */
+ ret = H5Pget_libver_bounds(fapl, &low, &high);
+ CHECK(ret, FAIL, "H5Pget_libver_bounds");
+
+ /* Create the file */
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Integer datatype */
+ tid = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(tid, H5I_INVALID_HID, "H5Tcopy");
+
+ /* Create a committed datatype */
+ ret = H5Tcommit2(fid, "datatype", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Create dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate");
+
+ /* Create a group */
+ gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Attach an attribute to the group with the committed datatype */
+ aid = H5Acreate2(gid, "attr1", tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, H5I_INVALID_HID, "H5Acreate2");
+
+ /* Get the internal attribute pointer */
+ attr = (H5A_t *)H5VL_object(aid);
+ CHECK_PTR(attr, "H5VL_object");
+
+ /* Verify the attribute version */
+ if (low == H5F_LIBVER_EARLIEST)
+ /* The earliest version the library can set for an attribute with committed datatype is 2 */
+ VERIFY(attr->shared->version, H5O_ATTR_VERSION_2, "H5O_attr_ver_bounds");
+ else
+ VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds");
+
+ /* Close the attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create an attribute to the group with integer type */
+ aid = H5Acreate2(gid, "attr2", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Get the internal attribute pointer */
+ attr = (H5A_t *)H5VL_object(aid);
+ CHECK_PTR(attr, "H5VL_object");
+
+ /* Verify attribute version */
+ VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds");
+
+ /* Close the attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Enable character encoding in attribute creation property list */
+ attr_cpl = H5Pcreate(H5P_ATTRIBUTE_CREATE);
+ CHECK(attr_cpl, H5I_INVALID_HID, "H5Pcreate");
+ ret = H5Pset_char_encoding(attr_cpl, H5T_CSET_UTF8);
+ CHECK(ret, FAIL, "H5Pset_char_encoding");
+
+ /* Attach an attribute to the group with character encoding set */
+ aid = H5Acreate2(gid, "attr3", H5T_NATIVE_INT, sid, attr_cpl, H5P_DEFAULT);
+ CHECK(aid, H5I_INVALID_HID, "H5Acreate2");
+
+ /* Get internal attribute pointer */
+ attr = (H5A_t *)H5VL_object(aid);
+ CHECK_PTR(attr, "H5VL_object");
+
+ /* Verify attribute version */
+ if (low == H5F_LIBVER_EARLIEST)
+ /* The earliest version the library can set for an attribute with character encoding is 3 */
+ VERIFY(attr->shared->version, H5O_ATTR_VERSION_3, "H5O_attr_ver_bounds");
+ else
+ VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds");
+
+ /* Close the attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close the attribute creation property list */
+ ret = H5Pclose(attr_cpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create a copy of the file creation property list */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Enable shared datatype message */
+ ret = H5Pset_shared_mesg_nindexes(fcpl, 1);
+ CHECK(ret, FAIL, "H5Pset_shared_mesg_nindexes");
+ ret = H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_DTYPE_FLAG, 2);
+ CHECK(ret, FAIL, "H5Pset_shared_mesg_index");
+
+ /* Create the file with shared datatype message enabled */
+ fid = H5Fcreate(FILE8, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create an integer datatype */
+ tid = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(tid, H5I_INVALID_HID, "H5Tcopy");
+
+ /* Create dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate");
+
+ /* Create a group */
+ gid = H5Gcreate2(fid, GRP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Attach an attribute to the group with shared integer datatype */
+ aid = H5Acreate2(gid, ATTR_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, H5I_INVALID_HID, "H5Acreate2");
+
+ /* Get the internal attribute pointer */
+ attr = (H5A_t *)H5VL_object(aid);
+ CHECK_PTR(attr, "H5VL_object");
+
+ /* Verify the attribute version */
+ if (low == H5F_LIBVER_EARLIEST)
+ /* The earliest version the library can set for an attribute with shared datatype is 2 */
+ VERIFY(attr->shared->version, H5O_ATTR_VERSION_2, "H5O_attr_ver_bounds");
+ else
+ VERIFY(attr->shared->version, H5O_attr_ver_bounds[low], "H5O_attr_ver_bounds");
+
+ /* Close the attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close the group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create a default file access property list */
+ new_fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(new_fapl, FAIL, "H5Pcreate");
+
+ /* Create a scalar dataspace to be used later for the attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate");
+
+ /* Loop through all the combinations of low/high bounds */
+ /* Open the file and group and attach an attribute to the group */
+ /* Verify the attribute version */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_libver_bounds(new_fapl, low, high);
+ }
+ H5E_END_TRY;
+
+ if (ret < 0) /* Invalid low/high combinations */
+ continue;
+
+ /* Open the file */
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fopen(FILE8, H5F_ACC_RDWR, new_fapl);
+ }
+ H5E_END_TRY;
+
+ if (fid >= 0) { /* The file open succeeds */
+
+ /* Get the internal file pointer */
+ f = (H5F_t *)H5VL_object(fid);
+ CHECK_PTR(f, "H5VL_object");
+
+ /* Open the group */
+ gid = H5Gopen2(fid, GRP_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Attach an attribute to the group */
+ aid = H5Acreate2(gid, "attr1", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Get the internal attribute pointer */
+ attr = (H5A_t *)H5VL_object(aid);
+ CHECK_PTR(attr, "H5VL_object");
+
+ /* Verify the attribute message version */
+ VERIFY(attr->shared->version, H5O_attr_ver_bounds[f->shared->low_bound],
+ "H5O_attr_ver_bounds");
+
+ /* Close the attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Delete the attribute */
+ ret = H5Adelete(gid, "attr1");
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Close the group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Close the file access property list */
+ ret = H5Pclose(new_fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* end test_libver_bounds_attributes() */
+
+/****************************************************************
+**
+** test_libver_macros():
+** Verify that H5_VERSION_GE and H5_VERSION_LE work correactly.
+**
+****************************************************************/
+static void
+test_libver_macros(void)
+{
+ int major = H5_VERS_MAJOR;
+ int minor = H5_VERS_MINOR;
+ int release = H5_VERS_RELEASE;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing macros for library version comparison\n"));
+
+ VERIFY(H5_VERSION_GE(major, minor, release), TRUE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major - 1, minor, release), TRUE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major - 1, minor + 1, release), TRUE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major - 1, minor, release + 1), TRUE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major, minor - 1, release), TRUE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major, minor - 1, release + 1), TRUE, "H5_VERSION_GE");
+ if (H5_VERS_RELEASE > 0)
+ VERIFY(H5_VERSION_GE(major, minor, release - 1), TRUE, "H5_VERSION_GE");
+
+ VERIFY(H5_VERSION_GE(major + 1, minor, release), FALSE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major + 1, minor - 1, release), FALSE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major + 1, minor - 1, release - 1), FALSE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major, minor + 1, release), FALSE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major, minor + 1, release - 1), FALSE, "H5_VERSION_GE");
+ VERIFY(H5_VERSION_GE(major, minor, release + 1), FALSE, "H5_VERSION_GE");
+
+ VERIFY(H5_VERSION_LE(major, minor, release), TRUE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major + 1, minor, release), TRUE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major + 1, minor - 1, release), TRUE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major + 1, minor - 1, release - 1), TRUE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major, minor + 1, release), TRUE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major, minor + 1, release - 1), TRUE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major, minor, release + 1), TRUE, "H5_VERSION_LE");
+
+ VERIFY(H5_VERSION_LE(major - 1, minor, release), FALSE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major - 1, minor + 1, release), FALSE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major - 1, minor + 1, release + 1), FALSE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major, minor - 1, release), FALSE, "H5_VERSION_LE");
+ VERIFY(H5_VERSION_LE(major, minor - 1, release + 1), FALSE, "H5_VERSION_LE");
+ if (H5_VERS_RELEASE > 0)
+ VERIFY(H5_VERSION_LE(major, minor, release - 1), FALSE, "H5_VERSION_LE");
+} /* test_libver_macros() */
+
+/****************************************************************
+**
+** test_libver_macros2():
+** Verify that H5_VERSION_GE works correactly and show how
+** to use it.
+**
+****************************************************************/
+static void
+test_libver_macros2(void)
+{
+ hid_t file;
+ hid_t grp;
+ htri_t status;
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing macros for library version comparison with a file\n"));
+
+ /*
+ * Create a file.
+ */
+ file = H5Fcreate(FILE6, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /*
+ * Create a group in the file.
+ */
+ grp = H5Gcreate2(file, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Gcreate");
+
+ /*
+ * Close the group
+ */
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /*
+ * Delete the group using different function based on the library version.
+ * And verify the action.
+ */
+#if H5_VERSION_GE(1, 8, 0)
+ ret = H5Ldelete(file, "Group", H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lunlink");
+
+ status = H5Lexists(file, "Group", H5P_DEFAULT);
+ VERIFY(status, FALSE, "H5Lexists");
+#else
+ ret = H5Gunlink(file, "Group");
+ CHECK(ret, FAIL, "H5Gunlink");
+
+ H5E_BEGIN_TRY
+ {
+ grp = H5Gopen(file, "Group");
+ }
+ H5E_END_TRY;
+ VERIFY(grp, FAIL, "H5Gopen");
+#endif
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_libver_macros2() */
+#endif
+
+#if 0
+/****************************************************************
+**
+** test_filesize():
+** Verify H5Fincrement_filesize() and H5Fget_eoa() works as
+** indicated in the "RFC: Enhancement to the tool h5clear".
+**
+****************************************************************/
+static void
+test_incr_filesize(void)
+{
+ hid_t fid; /* File opened with read-write permission */
+ h5_stat_size_t filesize; /* Size of file when empty */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File access property list */
+ hid_t dspace; /* Dataspace ID */
+ hid_t dset; /* Dataset ID */
+ hid_t dcpl; /* Dataset creation property list */
+ unsigned u; /* Local index variable */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ char name[32]; /* Dataset name */
+ haddr_t stored_eoa; /* The stored EOA value */
+ hid_t driver_id = -1; /* ID for this VFD */
+ unsigned long driver_flags = 0; /* VFD feature flags */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Fincrement_filesize() and H5Fget_eoa())\n"));
+
+ fapl = h5_fileaccess();
+ h5_fixname(FILE8, fapl, filename, sizeof filename);
+
+ /* Get the VFD feature flags */
+ driver_id = H5Pget_driver(fapl);
+ CHECK(driver_id, FAIL, "H5Pget_driver");
+
+ ret = H5FDdriver_query(driver_id, &driver_flags);
+ CHECK(ret, FAIL, "H5PDdriver_query");
+
+ /* Check whether the VFD feature flag supports these two public routines */
+ if (driver_flags & H5FD_FEAT_SUPPORTS_SWMR_IO) {
+
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Set file space strategy */
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5P_set_file_space_strategy");
+
+ /* Create the test file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ dspace = H5Screate(H5S_SCALAR);
+ CHECK(dspace, FAIL, "H5Screate");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create datasets in file */
+ for (u = 0; u < 10; u++) {
+ HDsnprintf(name, sizeof(name), "Dataset %u", u);
+ dset = H5Dcreate2(fid, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end for */
+
+ /* Close dataspace */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Get the file size */
+ filesize = h5_get_file_size(filename, fapl);
+
+ /* Open the file */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Get the stored EOA */
+ ret = H5Fget_eoa(fid, &stored_eoa);
+ CHECK(ret, FAIL, "H5Fget_eoa");
+
+ /* Verify the stored EOA is the same as filesize */
+ VERIFY(filesize, stored_eoa, "file size");
+
+ /* Set the EOA to the MAX(EOA, EOF) + 512 */
+ ret = H5Fincrement_filesize(fid, 512);
+ CHECK(ret, FAIL, "H5Fincrement_filesize");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Get the file size */
+ filesize = h5_get_file_size(filename, fapl);
+
+ /* Verify the filesize is the previous stored_eoa + 512 */
+ VERIFY(filesize, stored_eoa + 512, "file size");
+
+ /* Close the file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the file creation property list */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ }
+} /* end test_incr_filesize() */
+#endif
+
+/****************************************************************
+**
+** test_min_dset_ohdr():
+** Test API calls to toggle dataset object header minimization.
+**
+** TODO (as separate function?):
+** + setting persists between close and (re)open?
+** + dataset header sizes created while changing value of toggle
+**
+****************************************************************/
+#if 0
+static void
+test_min_dset_ohdr(void)
+{
+ const char basename[] = "min_dset_ohdr_testfile";
+ char filename[FILENAME_LEN] = "";
+ hid_t file_id = -1;
+ hid_t file2_id = -1;
+ hbool_t minimize;
+ herr_t ret;
+
+ MESSAGE(5, ("Testing dataset object header minimization\n"));
+
+ /*********/
+ /* SETUP */
+ /*********/
+
+ h5_fixname(basename, H5P_DEFAULT, filename, sizeof(filename));
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK_I(file_id, "H5Fcreate");
+
+ /*********/
+ /* TESTS */
+ /*********/
+
+ /*----------------------------------------
+ * TEST default value
+ */
+ ret = H5Fget_dset_no_attrs_hint(file_id, &minimize);
+ CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+ VERIFY(minimize, FALSE, "minimize flag");
+
+ /*----------------------------------------
+ * TEST set to TRUE
+ */
+ ret = H5Fset_dset_no_attrs_hint(file_id, TRUE);
+ CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint");
+
+ ret = H5Fget_dset_no_attrs_hint(file_id, &minimize);
+ CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+ VERIFY(minimize, TRUE, "minimize flag");
+
+ /*----------------------------------------
+ * TEST second file open on same filename
+ */
+ file2_id = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK_I(file2_id, "H5Fopen");
+
+ /* verify TRUE setting on second open
+ */
+ ret = H5Fget_dset_no_attrs_hint(file_id, &minimize);
+ CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+ VERIFY(minimize, TRUE, "minimize flag");
+
+ /* re-set to FALSE on first open
+ */
+ ret = H5Fset_dset_no_attrs_hint(file_id, FALSE);
+ CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint");
+
+ /* verify FALSE set on both opens
+ */
+ ret = H5Fget_dset_no_attrs_hint(file_id, &minimize);
+ CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+ VERIFY(minimize, FALSE, "minimize flag");
+
+ ret = H5Fget_dset_no_attrs_hint(file2_id, &minimize);
+ CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+ VERIFY(minimize, FALSE, "minimize flag");
+
+ /* re-set to TRUE on second open
+ */
+ ret = H5Fset_dset_no_attrs_hint(file2_id, TRUE);
+ CHECK(ret, FAIL, "H5Fset_dset_no_attrs_hint");
+
+ /* verify TRUE set on both opens
+ */
+ ret = H5Fget_dset_no_attrs_hint(file_id, &minimize);
+ CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+ VERIFY(minimize, TRUE, "minimize flag");
+
+ ret = H5Fget_dset_no_attrs_hint(file2_id, &minimize);
+ CHECK(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+ VERIFY(minimize, TRUE, "minimize flag");
+
+ /*----------------------------------------
+ * TEST error cases
+ */
+
+ /* trying to set with invalid file ID */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fset_dset_no_attrs_hint(-1, TRUE);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fset_dset_no_attrs_hint");
+
+ /* trying to get with invalid file ID */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fget_dset_no_attrs_hint(-1, &minimize);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+
+ /* trying to get with invalid pointer */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fget_dset_no_attrs_hint(file_id, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fget_dset_no_attrs_hint");
+
+ /************/
+ /* TEARDOWN */
+ /************/
+
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(file2_id);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_min_dset_ohdr() */
+#endif
+
+/****************************************************************
+**
+** test_deprec():
+** Test deprecated functionality.
+**
+****************************************************************/
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+static void
+test_deprec(const char *env_h5_drvr)
+{
+ hid_t file; /* File IDs for old & new files */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File creation property list */
+ hid_t new_fapl;
+ hsize_t align;
+ unsigned super; /* Superblock version # */
+ unsigned freelist; /* Free list version # */
+ unsigned stab; /* Symbol table entry version # */
+ unsigned shhdr; /* Shared object header version # */
+ H5F_info1_t finfo; /* global information about file */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing deprecated routines\n"));
+
+ /* Creating a file with the default file creation property list should
+ * create a version 0 superblock
+ */
+
+ /* Create file with default file creation property list */
+ file = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Get the file's version information */
+ ret = H5Fget_info1(file, &finfo);
+ CHECK(ret, FAIL, "H5Fget_info1");
+ VERIFY(finfo.super_ext_size, 0, "H5Fget_info1");
+ VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1");
+ VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1");
+ VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1");
+
+ /* Get the file's dataset creation property list */
+ fcpl = H5Fget_create_plist(file);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ /* Get the file's version information */
+ ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr);
+ CHECK(ret, FAIL, "H5Pget_version");
+ VERIFY(super, 0, "H5Pget_version");
+ VERIFY(freelist, 0, "H5Pget_version");
+ VERIFY(stab, 0, "H5Pget_version");
+ VERIFY(shhdr, 0, "H5Pget_version");
+
+ /* Close FCPL */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Only run this part of the test with the sec2/default driver */
+ if (h5_using_default_driver(env_h5_drvr)) {
+ /* Create a file creation property list */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Set a property in the FCPL that will push the superblock version up */
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Creating a file with the non-default file creation property list should
+ * create a version 2 superblock
+ */
+
+ /* Create file with custom file creation property list */
+ file = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ new_fapl = H5Fget_access_plist(file);
+ H5Pget_alignment(new_fapl, NULL, &align);
+
+ /* Close FCPL */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Get the file's version information */
+ ret = H5Fget_info1(file, &finfo);
+ CHECK(ret, FAIL, "H5Fget_info1");
+ VERIFY(finfo.super_ext_size, 152, "H5Fget_info1");
+ VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1");
+ VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1");
+ VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1");
+
+ /* Get the file's dataset creation property list */
+ fcpl = H5Fget_create_plist(file);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ /* Get the file's version information */
+ ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr);
+ CHECK(ret, FAIL, "H5Pget_version");
+ VERIFY(super, 2, "H5Pget_version");
+ VERIFY(freelist, 0, "H5Pget_version");
+ VERIFY(stab, 0, "H5Pget_version");
+ VERIFY(shhdr, 0, "H5Pget_version");
+
+ /* Close FCPL */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ file = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Get the file's version information */
+ ret = H5Fget_info1(file, &finfo);
+ CHECK(ret, FAIL, "H5Fget_info1");
+ VERIFY(finfo.super_ext_size, 152, "H5Fget_info1");
+ VERIFY(finfo.sohm.hdr_size, 0, "H5Fget_info1");
+ VERIFY(finfo.sohm.msgs_info.index_size, 0, "H5Fget_info1");
+ VERIFY(finfo.sohm.msgs_info.heap_size, 0, "H5Fget_info1");
+
+ /* Get the file's creation property list */
+ fcpl = H5Fget_create_plist(file);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ /* Get the file's version information */
+ ret = H5Pget_version(fcpl, &super, &freelist, &stab, &shhdr);
+ CHECK(ret, FAIL, "H5Pget_version");
+ VERIFY(super, 2, "H5Pget_version");
+ VERIFY(freelist, 0, "H5Pget_version");
+ VERIFY(stab, 0, "H5Pget_version");
+ VERIFY(shhdr, 0, "H5Pget_version");
+
+ /* Close FCPL */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ { /* Test deprecated H5Pget/set_file_space() */
+
+ H5F_file_space_type_t old_strategy;
+ hsize_t old_threshold;
+ hid_t fid;
+ hid_t ffcpl;
+
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL, "H5Pget_file_space");
+ VERIFY(old_threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space");
+
+ /* Set file space strategy and free space section threshold */
+ ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+
+ /* Get the file space info from the creation property */
+ ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space");
+ VERIFY(old_threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space");
+
+ ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_DEFAULT, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+
+ ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space");
+ VERIFY(old_threshold, 3, "H5Pget_file_space");
+
+ /* Create a file */
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ old_strategy = H5F_FILE_SPACE_DEFAULT;
+ old_threshold = 0;
+ ffcpl = H5Fget_create_plist(fid);
+ ret = H5Pget_file_space(ffcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space");
+ VERIFY(old_threshold, 3, "H5Pget_file_space");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Pclose(ffcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Reopen the file */
+ fid = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ old_strategy = H5F_FILE_SPACE_DEFAULT;
+ old_threshold = 0;
+ ffcpl = H5Fget_create_plist(fid);
+ ret = H5Pget_file_space(ffcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space");
+ VERIFY(old_threshold, 3, "H5Pget_file_space");
+
+ ret = H5Pclose(ffcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+ }
+
+} /* test_deprec */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+
+/****************************************************************
+**
+** test_file(): Main low-level file I/O test routine.
+**
+****************************************************************/
+void
+test_file(void)
+{
+ const char *env_h5_drvr; /* File Driver value from environment */
+ hid_t fapl_id = H5I_INVALID_HID; /* VFD-dependent fapl ID */
+ hbool_t driver_is_default_compatible;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Low-Level File I/O\n"));
+
+ /* Get the VFD to use */
+ env_h5_drvr = HDgetenv(HDF5_DRIVER);
+ if (env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+
+ /* Improved version of VFD-dependent checks */
+ fapl_id = h5_fileaccess();
+ CHECK(fapl_id, H5I_INVALID_HID, "h5_fileaccess");
+
+ ret = h5_driver_is_default_vfd_compatible(fapl_id, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ test_file_create(); /* Test file creation(also creation templates)*/
+ test_file_open(env_h5_drvr); /* Test file opening */
+ test_file_reopen(); /* Test file reopening */
+ test_file_close(); /* Test file close behavior */
+ test_get_file_id(); /* Test H5Iget_file_id */
+ test_get_obj_ids(); /* Test H5Fget_obj_ids for Jira Issue 8528 */
+ test_file_perm(); /* Test file access permissions */
+ test_file_perm2(); /* Test file access permission again */
+ test_file_is_accessible(env_h5_drvr); /* Test detecting HDF5 files correctly */
+ test_file_delete(fapl_id); /* Test H5Fdelete */
+ test_file_open_dot(); /* Test opening objects with "." for a name */
+ test_file_open_overlap(); /* Test opening files in an overlapping manner */
+ test_file_getname(); /* Test basic H5Fget_name() functionality */
+ test_file_double_root_open(); /* Test opening root group from two files works properly */
+ test_file_double_group_open(); /* Test opening same group from two files works properly */
+ test_file_double_dataset_open(); /* Test opening same dataset from two files works properly */
+ test_file_double_datatype_open(); /* Test opening same named datatype from two files works properly */
+ test_file_double_file_dataset_open(TRUE);
+ test_file_double_file_dataset_open(FALSE);
+#if 0
+ test_userblock_file_size(
+ env_h5_drvr); /* Tests that files created with a userblock have the correct size */
+ test_cached_stab_info(); /* Tests that files are created with cached stab info in the superblock */
+
+ if (driver_is_default_compatible) {
+ test_rw_noupdate(); /* Test to ensure that RW permissions don't write the file unless dirtied */
+ }
+
+ test_userblock_alignment(
+ env_h5_drvr); /* Tests that files created with a userblock and alignment interact properly */
+ test_userblock_alignment_paged(env_h5_drvr); /* Tests files created with a userblock and alignment (via
+ paged aggregation) interact properly */
+ test_filespace_info(env_h5_drvr); /* Test file creation public routines: */
+ /* H5Pget/set_file_space_strategy() & H5Pget/set_file_space_page_size() */
+ /* Skipped testing for multi/split drivers */
+ test_file_freespace(env_h5_drvr); /* Test file public routine H5Fget_freespace() */
+ /* Skipped testing for multi/split drivers */
+ /* Setup for multi/split drivers are there already */
+ test_sects_freespace(env_h5_drvr,
+ TRUE); /* Test file public routine H5Fget_free_sections() for new format */
+ /* Skipped testing for multi/split drivers */
+ /* Setup for multi/split drivers are there already */
+ test_sects_freespace(env_h5_drvr, FALSE); /* Test file public routine H5Fget_free_sections() */
+ /* Skipped testing for multi/split drivers */
+
+ if (driver_is_default_compatible) {
+ test_filespace_compatible(); /* Test compatibility for file space management */
+
+ test_filespace_round_compatible(); /* Testing file space compatibility for files from trunk to 1_8 to
+ trunk */
+ test_filespace_1_10_0_compatible(); /* Testing file space compatibility for files from release 1.10.0
+ */
+ }
+
+ test_libver_bounds(); /* Test compatibility for file space management */
+ test_libver_bounds_low_high(env_h5_drvr);
+ test_libver_macros(); /* Test the macros for library version comparison */
+ test_libver_macros2(); /* Test the macros for library version comparison */
+ test_incr_filesize(); /* Test H5Fincrement_filesize() and H5Fget_eoa() */
+ test_min_dset_ohdr(); /* Test dataset object header minimization */
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ test_file_ishdf5(env_h5_drvr); /* Test detecting HDF5 files correctly */
+ test_deprec(env_h5_drvr); /* Test deprecated routines */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+
+ ret = H5Pclose(fapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* test_file() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_file
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Albert Cheng
+ * July 2, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_file(void)
+{
+ H5E_BEGIN_TRY
+ {
+ H5Fdelete(SFILE1, H5P_DEFAULT);
+ H5Fdelete(FILE1, H5P_DEFAULT);
+ H5Fdelete(FILE2, H5P_DEFAULT);
+ H5Fdelete(FILE3, H5P_DEFAULT);
+ H5Fdelete(FILE4, H5P_DEFAULT);
+ H5Fdelete(FILE5, H5P_DEFAULT);
+ H5Fdelete(FILE6, H5P_DEFAULT);
+ H5Fdelete(FILE7, H5P_DEFAULT);
+ H5Fdelete(DST_FILE, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+}
diff --git a/test/API/tgenprop.c b/test/API/tgenprop.c
new file mode 100644
index 0000000..c1ee8af
--- /dev/null
+++ b/test/API/tgenprop.c
@@ -0,0 +1,2201 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tgenprop
+ *
+ * Test the Generic Property functionality
+ *
+ *************************************************************/
+
+#define H5P_FRIEND /*suppress error about including H5Ppkg */
+
+/* Define this macro to indicate that the testing APIs should be available */
+#define H5P_TESTING
+
+#include "testhdf5.h"
+
+/* #include "H5Dprivate.h" */ /* For Dataset creation property list names */
+/* #include "H5Ppkg.h" */ /* Generic Properties */
+
+#define FILENAME "tgenprop.h5"
+
+/* Property definitions */
+#define CLASS1_NAME "Class 1"
+#define CLASS1_PATH "root/Class 1"
+
+#define CLASS2_NAME "Class 2"
+#define CLASS2_PATH "root/Class 1/Class 2"
+
+/* Property definitions */
+#define PROP1_NAME "Property 1"
+int prop1_def = 10; /* Property 1 default value */
+#define PROP1_SIZE sizeof(prop1_def)
+#define PROP1_DEF_VALUE (&prop1_def)
+
+#define PROP2_NAME "Property 2"
+float prop2_def = 3.14F; /* Property 2 default value */
+#define PROP2_SIZE sizeof(prop2_def)
+#define PROP2_DEF_VALUE (&prop2_def)
+
+#define PROP3_NAME "Property 3"
+char prop3_def[10] = "Ten chars"; /* Property 3 default value */
+#define PROP3_SIZE sizeof(prop3_def)
+#define PROP3_DEF_VALUE (&prop3_def)
+
+#define PROP4_NAME "Property 4"
+double prop4_def = 1.41; /* Property 4 default value */
+#define PROP4_SIZE sizeof(prop4_def)
+#define PROP4_DEF_VALUE (&prop4_def)
+
+/* Structs used during iteration */
+typedef struct iter_data_t {
+ int iter_count;
+ char **names;
+} iter_data_t;
+
+typedef struct count_data_t {
+ int count;
+ hid_t id;
+} count_data_t;
+
+/****************************************************************
+**
+** test_genprop_basic_class(): Test basic generic property list code.
+** Tests creating new generic classes.
+**
+****************************************************************/
+static void
+test_genprop_basic_class(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t cid2; /* Generic Property class ID */
+ hid_t cid3; /* Generic Property class ID */
+ char *name; /* Name of class */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Generic Property List Class Creation Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Check class name */
+ name = H5Pget_class_name(cid1);
+ CHECK_PTR(name, "H5Pget_class_name");
+ if (HDstrcmp(name, CLASS1_NAME) != 0)
+ TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME);
+ H5free_memory(name);
+
+ /* Check class parent */
+ cid2 = H5Pget_class_parent(cid1);
+ CHECK_I(cid2, "H5Pget_class_parent");
+
+ /* Verify class parent correct */
+ ret = H5Pequal(cid2, H5P_ROOT);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Make certain false positives aren't being returned */
+ ret = H5Pequal(cid2, H5P_FILE_CREATE);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Close parent class */
+ ret = H5Pclose_class(cid2);
+ CHECK_I(ret, "H5Pclose_class");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+
+ /* Create another new generic class, derived from file creation class */
+ cid1 = H5Pcreate_class(H5P_FILE_CREATE, CLASS2_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Check class name */
+ name = H5Pget_class_name(cid1);
+ CHECK_PTR(name, "H5Pget_class_name");
+ if (HDstrcmp(name, CLASS2_NAME) != 0)
+ TestErrPrintf("Class names don't match!, name=%s, CLASS2_NAME=%s\n", name, CLASS2_NAME);
+ H5free_memory(name);
+
+ /* Check class parent */
+ cid2 = H5Pget_class_parent(cid1);
+ CHECK_I(cid2, "H5Pget_class_parent");
+
+ /* Verify class parent correct */
+ ret = H5Pequal(cid2, H5P_FILE_CREATE);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Check class parent's parent */
+ cid3 = H5Pget_class_parent(cid2);
+ CHECK_I(cid3, "H5Pget_class_parent");
+
+ /* Verify class parent's parent correct */
+ ret = H5Pequal(cid3, H5P_GROUP_CREATE);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Close parent class's parent */
+ ret = H5Pclose_class(cid3);
+ CHECK_I(ret, "H5Pclose_class");
+
+ /* Close parent class */
+ ret = H5Pclose_class(cid2);
+ CHECK_I(ret, "H5Pclose_class");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+} /* end test_genprop_basic_class() */
+
+/****************************************************************
+**
+** test_genprop_basic_class_prop(): Test basic generic property list code.
+** Tests adding properties to generic classes.
+**
+****************************************************************/
+static void
+test_genprop_basic_class_prop(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ size_t size; /* Size of property */
+ size_t nprops; /* Number of properties in class */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Generic Property List Class Properties Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 0, "H5Pget_nprops");
+
+ /* Check the existence of the first property (should fail) */
+ ret = H5Pexist(cid1, PROP1_NAME);
+ VERIFY(ret, 0, "H5Pexist");
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Try to insert the first property again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pregister2");
+
+ /* Check the existence of the first property */
+ ret = H5Pexist(cid1, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check the size of the first property */
+ ret = H5Pget_size(cid1, PROP1_NAME, &size);
+ CHECK_I(ret, "H5Pget_size");
+ VERIFY(size, PROP1_SIZE, "H5Pget_size");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 1, "H5Pget_nprops");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Try to insert the second property again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL,
+ NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pregister2");
+
+ /* Check the existence of the second property */
+ ret = H5Pexist(cid1, PROP2_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check the size of the second property */
+ ret = H5Pget_size(cid1, PROP2_NAME, &size);
+ CHECK_I(ret, "H5Pget_size");
+ VERIFY(size, PROP2_SIZE, "H5Pget_size");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Insert third property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Check the existence of the third property */
+ ret = H5Pexist(cid1, PROP3_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check the size of the third property */
+ ret = H5Pget_size(cid1, PROP3_NAME, &size);
+ CHECK_I(ret, "H5Pget_size");
+ VERIFY(size, PROP3_SIZE, "H5Pget_size");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 3, "H5Pget_nprops");
+
+ /* Unregister first property */
+ ret = H5Punregister(cid1, PROP1_NAME);
+ CHECK_I(ret, "H5Punregister");
+
+ /* Try to check the size of the first property (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pget_size(cid1, PROP1_NAME, &size);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pget_size");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Unregister second property */
+ ret = H5Punregister(cid1, PROP2_NAME);
+ CHECK_I(ret, "H5Punregister");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 1, "H5Pget_nprops");
+
+ /* Unregister third property */
+ ret = H5Punregister(cid1, PROP3_NAME);
+ CHECK_I(ret, "H5Punregister");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 0, "H5Pget_nprops");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+} /* end test_genprop_basic_class_prop() */
+
+/****************************************************************
+**
+** test_genprop_iter1(): Property iterator for test_genprop_class_iter
+**
+****************************************************************/
+static int
+test_genprop_iter1(hid_t H5_ATTR_UNUSED id, const char *name, void *iter_data)
+{
+ iter_data_t *idata = (iter_data_t *)iter_data;
+
+ return HDstrcmp(name, idata->names[idata->iter_count++]);
+}
+
+/****************************************************************
+**
+** test_genprop_class_iter(): Test basic generic property list code.
+** Tests iterating over properties in a generic class.
+**
+****************************************************************/
+static void
+test_genprop_class_iter(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ size_t nprops; /* Number of properties in class */
+ int idx; /* Index to start iteration at */
+ struct { /* Struct for iterations */
+ int iter_count;
+ const char **names;
+ } iter_struct;
+ const char *pnames[4] = {/* Names of properties for iterator */
+ PROP1_NAME, PROP2_NAME, PROP3_NAME, PROP4_NAME};
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Generic Property List Class Property Iteration Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert third property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert third property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 4, "H5Pget_nprops");
+
+ /* Iterate over all properties in class */
+ iter_struct.iter_count = 0;
+ iter_struct.names = pnames;
+ ret = H5Piterate(cid1, NULL, test_genprop_iter1, &iter_struct);
+ VERIFY(ret, 0, "H5Piterate");
+
+ /* Iterate over last three properties in class */
+ idx = iter_struct.iter_count = 1;
+ ret = H5Piterate(cid1, &idx, test_genprop_iter1, &iter_struct);
+ VERIFY(ret, 0, "H5Piterate");
+ VERIFY(idx, (int)nprops, "H5Piterate");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+} /* end test_genprop_class_iter() */
+
+/****************************************************************
+**
+** test_genprop_cls_*_cb1(): Property List callbacks for test_genprop_class_callback
+**
+****************************************************************/
+static herr_t
+test_genprop_cls_crt_cb1(hid_t list_id, void *create_data)
+{
+ count_data_t *cdata = (count_data_t *)create_data;
+
+ cdata->count++;
+ cdata->id = list_id;
+
+ return SUCCEED;
+}
+
+static herr_t
+test_genprop_cls_cpy_cb1(hid_t new_list_id, hid_t H5_ATTR_UNUSED old_list_id, void *copy_data)
+{
+ count_data_t *cdata = (count_data_t *)copy_data;
+
+ cdata->count++;
+ cdata->id = new_list_id;
+
+ return SUCCEED;
+}
+
+static herr_t
+test_genprop_cls_cls_cb1(hid_t list_id, void *create_data)
+{
+ count_data_t *cdata = (count_data_t *)create_data;
+
+ cdata->count++;
+ cdata->id = list_id;
+
+ return SUCCEED;
+}
+
+/****************************************************************
+**
+** test_genprop_class_callback(): Test basic generic property list code.
+** Tests callbacks for property lists in a generic class.
+**
+****************************************************************/
+static void
+test_genprop_class_callback(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t cid2; /* Generic Property class ID */
+ hid_t lid1; /* Generic Property list ID */
+ hid_t lid2; /* Generic Property list ID */
+ hid_t lid3; /* Generic Property list ID */
+ size_t nprops; /* Number of properties in class */
+ struct { /* Struct for callbacks */
+ int count;
+ hid_t id;
+ } crt_cb_struct, cpy_cb_struct, cls_cb_struct;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Generic Property List Class Callback Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 =
+ H5Pcreate_class(H5P_ROOT, CLASS1_NAME, test_genprop_cls_crt_cb1, &crt_cb_struct,
+ test_genprop_cls_cpy_cb1, &cpy_cb_struct, test_genprop_cls_cls_cb1, &cls_cb_struct);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert third property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 3, "H5Pget_nprops");
+
+ /* Initialize class callback structs */
+ crt_cb_struct.count = 0;
+ crt_cb_struct.id = (-1);
+ cpy_cb_struct.count = 0;
+ cpy_cb_struct.id = (-1);
+ cls_cb_struct.count = 0;
+ cls_cb_struct.id = (-1);
+
+ /* Create a property list from the class */
+ lid1 = H5Pcreate(cid1);
+ CHECK_I(lid1, "H5Pcreate");
+
+ /* Verify that the creation callback occurred */
+ VERIFY(crt_cb_struct.count, 1, "H5Pcreate");
+ VERIFY(crt_cb_struct.id, lid1, "H5Pcreate");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 3, "H5Pget_nprops");
+
+ /* Create another property list from the class */
+ lid2 = H5Pcreate(cid1);
+ CHECK_I(lid2, "H5Pcreate");
+
+ /* Verify that the creation callback occurred */
+ VERIFY(crt_cb_struct.count, 2, "H5Pcreate");
+ VERIFY(crt_cb_struct.id, lid2, "H5Pcreate");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid2, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 3, "H5Pget_nprops");
+
+ /* Create another property list by copying an existing list */
+ lid3 = H5Pcopy(lid1);
+ CHECK_I(lid3, "H5Pcopy");
+
+ /* Verify that the copy callback occurred */
+ VERIFY(cpy_cb_struct.count, 1, "H5Pcopy");
+ VERIFY(cpy_cb_struct.id, lid3, "H5Pcopy");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid3, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 3, "H5Pget_nprops");
+
+ /* Close first list */
+ ret = H5Pclose(lid1);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Verify that the close callback occurred */
+ VERIFY(cls_cb_struct.count, 1, "H5Pclose");
+ VERIFY(cls_cb_struct.id, lid1, "H5Pclose");
+
+ /* Close second list */
+ ret = H5Pclose(lid2);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Verify that the close callback occurred */
+ VERIFY(cls_cb_struct.count, 2, "H5Pclose");
+ VERIFY(cls_cb_struct.id, lid2, "H5Pclose");
+
+ /* Close third list */
+ ret = H5Pclose(lid3);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Verify that the close callback occurred */
+ VERIFY(cls_cb_struct.count, 3, "H5Pclose");
+ VERIFY(cls_cb_struct.id, lid3, "H5Pclose");
+
+ /* Create another new generic class, derived from first class */
+ cid2 =
+ H5Pcreate_class(cid1, CLASS2_NAME, test_genprop_cls_crt_cb1, &crt_cb_struct, test_genprop_cls_cpy_cb1,
+ &cpy_cb_struct, test_genprop_cls_cls_cb1, &cls_cb_struct);
+ CHECK_I(cid2, "H5Pcreate_class");
+
+ /* Insert fourth property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid2, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Check the number of properties in class */
+ /* (only reports the number of properties in 2nd class) */
+ ret = H5Pget_nprops(cid2, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 1, "H5Pget_nprops");
+
+ /* Create a property list from the 2nd class */
+ lid1 = H5Pcreate(cid2);
+ CHECK_I(lid1, "H5Pcreate");
+
+ /* Verify that both of the creation callbacks occurred */
+ VERIFY(crt_cb_struct.count, 4, "H5Pcreate");
+ VERIFY(crt_cb_struct.id, lid1, "H5Pcreate");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 4, "H5Pget_nprops");
+
+ /* Create another property list by copying existing list */
+ lid2 = H5Pcopy(lid1);
+ CHECK_I(lid2, "H5Pcopy");
+
+ /* Verify that both of the copy callbacks occurred */
+ VERIFY(cpy_cb_struct.count, 3, "H5Pcopy");
+ VERIFY(cpy_cb_struct.id, lid2, "H5Pcopy");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid2, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 4, "H5Pget_nprops");
+
+ /* Close first list */
+ ret = H5Pclose(lid1);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Verify that both of the close callbacks occurred */
+ VERIFY(cls_cb_struct.count, 5, "H5Pclose");
+ VERIFY(cls_cb_struct.id, lid1, "H5Pclose");
+
+ /* Close second list */
+ ret = H5Pclose(lid2);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Verify that both of the close callbacks occurred */
+ VERIFY(cls_cb_struct.count, 7, "H5Pclose");
+ VERIFY(cls_cb_struct.id, lid2, "H5Pclose");
+
+ /* Close classes */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+ ret = H5Pclose_class(cid2);
+ CHECK_I(ret, "H5Pclose_class");
+} /* end test_genprop_class_callback() */
+
+/****************************************************************
+**
+** test_genprop_basic_list(): Test basic generic property list code.
+** Tests creating new generic property lists.
+**
+****************************************************************/
+static void
+test_genprop_basic_list(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t cid2; /* Generic Property class ID */
+ hid_t lid1; /* Generic Property list ID */
+ size_t nprops; /* Number of properties */
+ size_t size; /* Size of property */
+ int prop1_value; /* Value for property #1 */
+ float prop2_value; /* Value for property #2 */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Generic Property List Creation Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Add several properties (w/default values) */
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Create a property list from the class */
+ lid1 = H5Pcreate(cid1);
+ CHECK_I(lid1, "H5Pcreate");
+
+ /* Get the list's class */
+ cid2 = H5Pget_class(lid1);
+ CHECK_I(cid2, "H5Pget_class");
+
+ /* Check that the list's class is correct */
+ ret = H5Pequal(cid1, cid2);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Check correct "is a" class/list relationship */
+ ret = H5Pisa_class(lid1, cid1);
+ VERIFY(ret, 1, "H5Pisa_class");
+
+ /* Check "is a" class/list relationship another way */
+ ret = H5Pisa_class(lid1, cid2);
+ VERIFY(ret, 1, "H5Pisa_class");
+
+ /* Close class */
+ ret = H5Pclose_class(cid2);
+ CHECK_I(ret, "H5Pclose_class");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Check existence of properties */
+ ret = H5Pexist(lid1, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+ ret = H5Pexist(lid1, PROP2_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check the sizes of the properties */
+ ret = H5Pget_size(lid1, PROP1_NAME, &size);
+ CHECK_I(ret, "H5Pget_size");
+ VERIFY(size, PROP1_SIZE, "H5Pget_size");
+ ret = H5Pget_size(lid1, PROP2_NAME, &size);
+ CHECK_I(ret, "H5Pget_size");
+ VERIFY(size, PROP2_SIZE, "H5Pget_size");
+
+ /* Check values of properties (set with default values) */
+ ret = H5Pget(lid1, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+ ret = H5Pget(lid1, PROP2_NAME, &prop2_value);
+ CHECK_I(ret, "H5Pget");
+ /* Verify the floating-poing value in this way to avoid compiler warning. */
+ if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE))
+ HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget",
+ (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__);
+
+ /* Close list */
+ ret = H5Pclose(lid1);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+
+} /* end test_genprop_basic_list() */
+
+/****************************************************************
+**
+** test_genprop_basic_list_prop(): Test basic generic property list code.
+** Tests creating new generic property lists and adding and
+** removing properties from them.
+**
+****************************************************************/
+static void
+test_genprop_basic_list_prop(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t lid1; /* Generic Property list ID */
+ size_t nprops; /* Number of properties */
+ int prop1_value; /* Value for property #1 */
+ float prop2_value; /* Value for property #2 */
+ char prop3_value[10]; /* Property #3 value */
+ double prop4_value; /* Property #4 value */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Generic Property List Property Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Add several properties (several w/default values) */
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Create a property list from the class */
+ lid1 = H5Pcreate(cid1);
+ CHECK_I(lid1, "H5Pcreate");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Add temporary properties */
+
+ /* Insert first temporary property into list (with no callbacks) */
+ ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Insert second temporary property into list (with no callbacks) */
+ ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 4, "H5Pget_nprops");
+
+ /* Check existence of all properties */
+ ret = H5Pexist(lid1, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+ ret = H5Pexist(lid1, PROP2_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+ ret = H5Pexist(lid1, PROP3_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+ ret = H5Pexist(lid1, PROP4_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check values of permanent properties (set with default values) */
+ ret = H5Pget(lid1, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+ ret = H5Pget(lid1, PROP2_NAME, &prop2_value);
+ CHECK_I(ret, "H5Pget");
+ /* Verify the floating-poing value in this way to avoid compiler warning. */
+ if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE))
+ HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget",
+ (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__);
+
+ /* Check values of temporary properties (set with regular values) */
+ ret = H5Pget(lid1, PROP3_NAME, &prop3_value);
+ CHECK_I(ret, "H5Pget");
+ if (HDmemcmp(&prop3_value, PROP3_DEF_VALUE, PROP3_SIZE) != 0)
+ TestErrPrintf("Property #3 doesn't match!, line=%d\n", __LINE__);
+ ret = H5Pget(lid1, PROP4_NAME, &prop4_value);
+ CHECK_I(ret, "H5Pget");
+ /* Verify the floating-poing value in this way to avoid compiler warning. */
+ if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE))
+ HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget",
+ *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__);
+
+ /* Delete permanent property */
+ ret = H5Premove(lid1, PROP2_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Check number of properties */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 3, "H5Pget_nprops");
+
+ /* Delete temporary property */
+ ret = H5Premove(lid1, PROP3_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Check number of properties */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Check existence of remaining properties */
+ ret = H5Pexist(lid1, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+ ret = H5Pexist(lid1, PROP4_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check values of permanent properties (set with default values) */
+ ret = H5Pget(lid1, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+
+ /* Check values of temporary properties (set with regular values) */
+ ret = H5Pget(lid1, PROP4_NAME, &prop4_value);
+ CHECK_I(ret, "H5Pget");
+ /* Verify the floating-poing value in this way to avoid compiler warning. */
+ if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE))
+ HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget",
+ *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__);
+
+ /* Close list */
+ ret = H5Pclose(lid1);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+
+} /* end test_genprop_basic_list_prop() */
+
+/****************************************************************
+**
+** test_genprop_iter2(): Property iterator for test_genprop_list_iter
+**
+****************************************************************/
+static int
+test_genprop_iter2(hid_t H5_ATTR_UNUSED id, const char *name, void *iter_data)
+{
+ iter_data_t *idata = (iter_data_t *)iter_data;
+
+ return HDstrcmp(name, idata->names[idata->iter_count++]);
+}
+
+/****************************************************************
+**
+** test_genprop_list_iter(): Test basic generic property list code.
+** Tests iterating over generic property list properties.
+**
+****************************************************************/
+static void
+test_genprop_list_iter(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t lid1; /* Generic Property list ID */
+ size_t nprops; /* Number of properties */
+ int idx; /* Index to start iteration at */
+ struct { /* Struct for iterations */
+ int iter_count;
+ const char **names;
+ } iter_struct;
+ const char *pnames[4] = {/* Names of properties for iterator */
+ PROP3_NAME, PROP4_NAME, PROP1_NAME, PROP2_NAME};
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Generic Property List Iteration Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Add several properties (several w/default values) */
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Create a property list from the class */
+ lid1 = H5Pcreate(cid1);
+ CHECK_I(lid1, "H5Pcreate");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Add temporary properties */
+
+ /* Insert first temporary property into class (with no callbacks) */
+ ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Insert second temporary property into class (with no callbacks) */
+ ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Check the number of properties in list */
+ ret = H5Pget_nprops(lid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 4, "H5Pget_nprops");
+
+ /* Iterate over all properties in list */
+ iter_struct.iter_count = 0;
+ iter_struct.names = pnames;
+ ret = H5Piterate(lid1, NULL, test_genprop_iter2, &iter_struct);
+ VERIFY(ret, 0, "H5Piterate");
+
+ /* Iterate over last three properties in list */
+ idx = iter_struct.iter_count = 1;
+ ret = H5Piterate(lid1, &idx, test_genprop_iter2, &iter_struct);
+ VERIFY(ret, 0, "H5Piterate");
+ VERIFY(idx, (int)nprops, "H5Piterate");
+
+ /* Close list */
+ ret = H5Pclose(lid1);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+
+} /* end test_genprop_list_iter() */
+
+typedef struct {
+ /* Creation information */
+ int crt_count;
+ char *crt_name;
+ void *crt_value;
+
+ /* Set information */
+ int set_count;
+ hid_t set_plist_id;
+ char *set_name;
+ void *set_value;
+
+ /* Get information */
+ int get_count;
+ hid_t get_plist_id;
+ char *get_name;
+ void *get_value;
+
+ /* Delete information */
+ int del_count;
+ hid_t del_plist_id;
+ char *del_name;
+ void *del_value;
+
+ /* Copy information */
+ int cop_count;
+ char *cop_name;
+ void *cop_value;
+
+ /* Compare information */
+ int cmp_count;
+
+ /* Close information */
+ int cls_count;
+ char *cls_name;
+ void *cls_value;
+} prop_cb_info;
+
+/* Global variables for Callback information */
+prop_cb_info prop1_cb_info; /* Callback statistics for property #1 */
+prop_cb_info prop2_cb_info; /* Callback statistics for property #2 */
+prop_cb_info prop3_cb_info; /* Callback statistics for property #3 */
+
+/****************************************************************
+**
+** test_genprop_cls_cpy_cb2(): Property Class callback for test_genprop_list_callback
+**
+****************************************************************/
+static herr_t
+test_genprop_cls_cpy_cb2(hid_t new_list_id, hid_t H5_ATTR_UNUSED old_list_id, void *create_data)
+{
+ count_data_t *cdata = (count_data_t *)create_data;
+
+ cdata->count++;
+ cdata->id = new_list_id;
+
+ return SUCCEED;
+}
+
+/****************************************************************
+**
+** test_genprop_prop_crt_cb1(): Property creation callback for test_genprop_list_callback
+**
+****************************************************************/
+static herr_t
+test_genprop_prop_crt_cb1(const char *name, size_t size, void *def_value)
+{
+ /* Set the information from the creation call */
+ prop1_cb_info.crt_count++;
+ prop1_cb_info.crt_name = HDstrdup(name);
+ prop1_cb_info.crt_value = HDmalloc(size);
+ HDmemcpy(prop1_cb_info.crt_value, def_value, size);
+
+ return (SUCCEED);
+}
+
+/****************************************************************
+**
+** test_genprop_prop_set_cb1(): Property set callback for test_genprop_list_callback
+**
+****************************************************************/
+static herr_t
+test_genprop_prop_set_cb1(hid_t plist_id, const char *name, size_t size, void *value)
+{
+ /* Set the information from the set call */
+ prop1_cb_info.set_count++;
+ prop1_cb_info.set_plist_id = plist_id;
+ if (prop1_cb_info.set_name == NULL)
+ prop1_cb_info.set_name = HDstrdup(name);
+ if (prop1_cb_info.set_value == NULL)
+ prop1_cb_info.set_value = HDmalloc(size);
+ HDmemcpy(prop1_cb_info.set_value, value, size);
+
+ return (SUCCEED);
+}
+
+/****************************************************************
+**
+** test_genprop_prop_get_cb1(): Property get callback for test_genprop_list_callback
+**
+****************************************************************/
+static herr_t
+test_genprop_prop_get_cb1(hid_t plist_id, const char *name, size_t size, void *value)
+{
+ /* Set the information from the get call */
+ prop1_cb_info.get_count++;
+ prop1_cb_info.get_plist_id = plist_id;
+ if (prop1_cb_info.get_name == NULL)
+ prop1_cb_info.get_name = HDstrdup(name);
+ if (prop1_cb_info.get_value == NULL)
+ prop1_cb_info.get_value = HDmalloc(size);
+ HDmemcpy(prop1_cb_info.get_value, value, size);
+
+ return (SUCCEED);
+}
+
+/****************************************************************
+**
+** test_genprop_prop_cop_cb1(): Property copy callback for test_genprop_list_callback
+**
+****************************************************************/
+static herr_t
+test_genprop_prop_cop_cb1(const char *name, size_t size, void *value)
+{
+ /* Set the information from the get call */
+ prop1_cb_info.cop_count++;
+ if (prop1_cb_info.cop_name == NULL)
+ prop1_cb_info.cop_name = HDstrdup(name);
+ if (prop1_cb_info.cop_value == NULL)
+ prop1_cb_info.cop_value = HDmalloc(size);
+ HDmemcpy(prop1_cb_info.cop_value, value, size);
+
+ return (SUCCEED);
+}
+
+/****************************************************************
+**
+** test_genprop_prop_cmp_cb1(): Property comparison callback for test_genprop_list_callback
+**
+****************************************************************/
+static int
+test_genprop_prop_cmp_cb1(const void *value1, const void *value2, size_t size)
+{
+ /* Set the information from the comparison call */
+ prop1_cb_info.cmp_count++;
+
+ return (HDmemcmp(value1, value2, size));
+}
+
+/****************************************************************
+**
+** test_genprop_prop_cmp_cb3(): Property comparison callback for test_genprop_list_callback
+**
+****************************************************************/
+static int
+test_genprop_prop_cmp_cb3(const void *value1, const void *value2, size_t size)
+{
+ /* Set the information from the comparison call */
+ prop3_cb_info.cmp_count++;
+
+ return (HDmemcmp(value1, value2, size));
+}
+
+/****************************************************************
+**
+** test_genprop_prop_cls_cb1(): Property close callback for test_genprop_list_callback
+**
+****************************************************************/
+static herr_t
+test_genprop_prop_cls_cb1(const char *name, size_t size, void *value)
+{
+ /* Set the information from the close call */
+ prop1_cb_info.cls_count++;
+ if (prop1_cb_info.cls_name == NULL)
+ prop1_cb_info.cls_name = HDstrdup(name);
+ if (prop1_cb_info.cls_value == NULL)
+ prop1_cb_info.cls_value = HDmalloc(size);
+ HDmemcpy(prop1_cb_info.cls_value, value, size);
+
+ return (SUCCEED);
+}
+
+/****************************************************************
+**
+** test_genprop_prop_del_cb2(): Property delete callback for test_genprop_list_callback
+**
+****************************************************************/
+static herr_t
+test_genprop_prop_del_cb2(hid_t plist_id, const char *name, size_t size, void *value)
+{
+ /* Set the information from the delete call */
+ prop2_cb_info.del_count++;
+ prop2_cb_info.del_plist_id = plist_id;
+ prop2_cb_info.del_name = HDstrdup(name);
+ prop2_cb_info.del_value = HDmalloc(size);
+ HDmemcpy(prop2_cb_info.del_value, value, size);
+
+ return (SUCCEED);
+}
+
+/****************************************************************
+**
+** test_genprop_list_callback(): Test basic generic property list code.
+** Tests callbacks for properties in a generic property list.
+**
+****************************************************************/
+static void
+test_genprop_list_callback(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t lid1; /* Generic Property list ID */
+ hid_t lid2; /* 2nd Generic Property list ID */
+ size_t nprops; /* Number of properties in class */
+ int prop1_value; /* Value for property #1 */
+ int prop1_new_value = 20; /* Property #1 new value */
+ float prop2_value; /* Value for property #2 */
+ char prop3_value[10]; /* Property #3 value */
+ char prop3_new_value[10] = "10 chairs"; /* Property #3 new value */
+ double prop4_value; /* Property #4 value */
+ struct { /* Struct for callbacks */
+ int count;
+ hid_t id;
+ } cop_cb_struct;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Generic Property List Property Callback Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, test_genprop_cls_cpy_cb2, &cop_cb_struct, NULL,
+ NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Insert first property into class (with callbacks) */
+ ret = H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, test_genprop_prop_crt_cb1,
+ test_genprop_prop_set_cb1, test_genprop_prop_get_cb1, NULL, test_genprop_prop_cop_cb1,
+ test_genprop_prop_cmp_cb1, test_genprop_prop_cls_cb1);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert second property into class (with only delete callback) */
+ ret = H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL,
+ test_genprop_prop_del_cb2, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert third property into class (with only compare callback) */
+ ret = H5Pregister2(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL,
+ test_genprop_prop_cmp_cb3, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert fourth property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 4, "H5Pget_nprops");
+
+ /* Initialize class callback structs */
+ cop_cb_struct.count = 0;
+ cop_cb_struct.id = (-1);
+
+ /* Initialize callback information for properties tracked */
+ HDmemset(&prop1_cb_info, 0, sizeof(prop_cb_info));
+ HDmemset(&prop2_cb_info, 0, sizeof(prop_cb_info));
+ HDmemset(&prop3_cb_info, 0, sizeof(prop_cb_info));
+
+ /* Create a property list from the class */
+ lid1 = H5Pcreate(cid1);
+ CHECK_I(lid1, "H5Pcreate");
+
+ /* The compare callback should not have been called once on property 1, as
+ * the property is always copied */
+ VERIFY(prop1_cb_info.cmp_count, 0, "H5Pcreate");
+ /* The compare callback should not have been called on property 3, as there
+ * is no create callback */
+ VERIFY(prop3_cb_info.cmp_count, 0, "H5Pcreate");
+
+ /* Verify creation callback information for properties tracked */
+ VERIFY(prop1_cb_info.crt_count, 1, "H5Pcreate");
+ if (HDstrcmp(prop1_cb_info.crt_name, PROP1_NAME) != 0)
+ TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__);
+ if (HDmemcmp(prop1_cb_info.crt_value, PROP1_DEF_VALUE, PROP1_SIZE) != 0)
+ TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__);
+
+ /* Check values of permanent properties (set with default values) */
+ ret = H5Pget(lid1, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+ /* The compare callback should not have been called */
+ VERIFY(prop1_cb_info.cmp_count, 0, "H5Pget");
+ ret = H5Pget(lid1, PROP2_NAME, &prop2_value);
+ CHECK_I(ret, "H5Pget");
+ /* Verify the floating-poing value in this way to avoid compiler warning. */
+ if (!H5_FLT_ABS_EQUAL(prop2_value, *PROP2_DEF_VALUE))
+ HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget",
+ (double)*PROP2_DEF_VALUE, (double)prop2_value, (int)__LINE__, __FILE__);
+
+ /* Check values of temporary properties (set with regular values) */
+ ret = H5Pget(lid1, PROP3_NAME, &prop3_value);
+ CHECK_I(ret, "H5Pget");
+ if (HDmemcmp(&prop3_value, PROP3_DEF_VALUE, PROP3_SIZE) != 0)
+ TestErrPrintf("Property #3 doesn't match!, line=%d\n", __LINE__);
+ /* The compare callback should not have been called, as there is no get
+ * callback for this property */
+ VERIFY(prop3_cb_info.cmp_count, 0, "H5Pget");
+ ret = H5Pget(lid1, PROP4_NAME, &prop4_value);
+ CHECK_I(ret, "H5Pget");
+ /* Verify the floating-poing value in this way to avoid compiler warning. */
+ if (!H5_DBL_ABS_EQUAL(prop4_value, *PROP4_DEF_VALUE))
+ HDprintf("*** UNEXPECTED VALUE from %s should be %f, but is %f at line %4d in %s\n", "H5Pget",
+ *PROP4_DEF_VALUE, prop4_value, (int)__LINE__, __FILE__);
+
+ /* Verify get callback information for properties tracked */
+ VERIFY(prop1_cb_info.get_count, 1, "H5Pget");
+ VERIFY(prop1_cb_info.get_plist_id, lid1, "H5Pget");
+ if (HDstrcmp(prop1_cb_info.get_name, PROP1_NAME) != 0)
+ TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__);
+ if (HDmemcmp(prop1_cb_info.get_value, PROP1_DEF_VALUE, PROP1_SIZE) != 0)
+ TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__);
+
+ /* Set value of property #1 to different value */
+ ret = H5Pset(lid1, PROP1_NAME, &prop1_new_value);
+ CHECK_I(ret, "H5Pset");
+
+ /* Verify set callback information for properties tracked */
+ VERIFY(prop1_cb_info.set_count, 1, "H5Pset");
+ VERIFY(prop1_cb_info.set_plist_id, lid1, "H5Pset");
+ if (HDstrcmp(prop1_cb_info.set_name, PROP1_NAME) != 0)
+ TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__);
+ if (HDmemcmp(prop1_cb_info.set_value, &prop1_new_value, PROP1_SIZE) != 0)
+ TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__);
+
+ /* The compare callback should not have been called */
+ VERIFY(prop1_cb_info.cmp_count, 0, "H5Pset");
+
+ /* Set value of property #3 to different value */
+ ret = H5Pset(lid1, PROP3_NAME, prop3_new_value);
+ CHECK_I(ret, "H5Pset");
+
+ /* The compare callback should not have been called */
+ VERIFY(prop3_cb_info.cmp_count, 0, "H5Pset");
+
+ /* Check new value of tracked properties */
+ ret = H5Pget(lid1, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, prop1_new_value, "H5Pget");
+
+ /* Verify get callback information again for properties tracked */
+ VERIFY(prop1_cb_info.get_count, 2, "H5Pget");
+ VERIFY(prop1_cb_info.get_plist_id, lid1, "H5Pget");
+ if (HDstrcmp(prop1_cb_info.get_name, PROP1_NAME) != 0)
+ TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__);
+ if (HDmemcmp(prop1_cb_info.get_value, &prop1_new_value, PROP1_SIZE) != 0)
+ TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__);
+
+ /* Delete property #2 */
+ ret = H5Premove(lid1, PROP2_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Verify delete callback information for properties tracked */
+ VERIFY(prop2_cb_info.del_count, 1, "H5Premove");
+ VERIFY(prop2_cb_info.del_plist_id, lid1, "H5Premove");
+ if (HDstrcmp(prop2_cb_info.del_name, PROP2_NAME) != 0)
+ TestErrPrintf("Property #2 name doesn't match!, line=%d\n", __LINE__);
+ if (HDmemcmp(prop2_cb_info.del_value, PROP2_DEF_VALUE, PROP2_SIZE) != 0)
+ TestErrPrintf("Property #2 value doesn't match!, line=%d\n", __LINE__);
+
+ /* Copy first list */
+ lid2 = H5Pcopy(lid1);
+ CHECK_I(lid2, "H5Pcopy");
+
+ /* Verify copy callback information for properties tracked */
+ VERIFY(prop1_cb_info.cop_count, 1, "H5Pcopy");
+ if (HDstrcmp(prop1_cb_info.cop_name, PROP1_NAME) != 0)
+ TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__);
+ if (HDmemcmp(prop1_cb_info.cop_value, &prop1_new_value, PROP1_SIZE) != 0)
+ TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__);
+
+ /* Verify that the class creation callback occurred */
+ VERIFY(cop_cb_struct.count, 1, "H5Pcopy");
+ VERIFY(cop_cb_struct.id, lid2, "H5Pcopy");
+
+ /* Compare the two lists */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Verify compare callback information for properties tracked */
+ VERIFY(prop1_cb_info.cmp_count, 1, "H5Pequal");
+ VERIFY(prop3_cb_info.cmp_count, 1, "H5Pequal");
+
+ /* Close first list */
+ ret = H5Pclose(lid1);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Verify close callback information for properties tracked */
+ VERIFY(prop1_cb_info.cls_count, 1, "H5Pclose");
+ if (HDstrcmp(prop1_cb_info.cls_name, PROP1_NAME) != 0)
+ TestErrPrintf("Property #1 name doesn't match!, line=%d\n", __LINE__);
+ if (HDmemcmp(prop1_cb_info.cls_value, &prop1_new_value, PROP1_SIZE) != 0)
+ TestErrPrintf("Property #1 value doesn't match!, line=%d\n", __LINE__);
+
+ /* Close second list */
+ ret = H5Pclose(lid2);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Verify close callback information for properties tracked */
+ VERIFY(prop1_cb_info.cls_count, 2, "H5Pclose");
+
+ /* Free memory allocated for tracking properties */
+ HDfree(prop1_cb_info.crt_name);
+ HDfree(prop1_cb_info.crt_value);
+ HDfree(prop1_cb_info.get_name);
+ HDfree(prop1_cb_info.get_value);
+ HDfree(prop1_cb_info.set_name);
+ HDfree(prop1_cb_info.set_value);
+ HDfree(prop1_cb_info.cop_name);
+ HDfree(prop1_cb_info.cop_value);
+ HDfree(prop1_cb_info.cls_name);
+ HDfree(prop1_cb_info.cls_value);
+ HDfree(prop2_cb_info.del_name);
+ HDfree(prop2_cb_info.del_value);
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+} /* end test_genprop_list_callback() */
+
+/****************************************************************
+**
+** test_genprop_list_addprop(): Test adding properties to a
+** standard HDF5 property list and verify that the library
+** ignores the extra properties.
+**
+****************************************************************/
+static void
+test_genprop_list_addprop(void)
+{
+ hid_t fid; /* File ID */
+ hid_t did; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t pid; /* Property List ID */
+ int prop1_value; /* Value for property #1 */
+ herr_t ret; /* Generic return value */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create scalar dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a dataset creation property list */
+ pid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(pid, FAIL, "H5Pcreate");
+
+ /* Insert temporary property into class (with no callbacks) */
+ ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Check existence of added property */
+ ret = H5Pexist(pid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check values of property (set with default value) */
+ ret = H5Pget(pid, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+
+ /* Create a dataset */
+ did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check existence of added property (after using property list) */
+ ret = H5Pexist(pid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check values of property (set with default value) (after using property list) */
+ ret = H5Pget(pid, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+
+ /* Close property list */
+ ret = H5Pclose(pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_genprop_list_addprop() */
+
+/****************************************************************
+**
+** test_genprop_class_addprop(): Test adding properties to a
+** standard HDF5 property class and verify that the library
+** ignores the extra properties and continues to recognize the
+** derived class as a valid version of the derived-from class.
+**
+****************************************************************/
+static void
+test_genprop_class_addprop(void)
+{
+ hid_t fid; /* File ID */
+ hid_t did; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t cid; /* Property Class ID */
+ hid_t pid; /* Property List ID */
+ int prop1_value; /* Value for property #1 */
+ herr_t ret; /* Generic return value */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create scalar dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a new class, derived from the dataset creation property list class */
+ cid = H5Pcreate_class(H5P_DATASET_CREATE, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid, "H5Pcreate_class");
+#if 0
+ /* Check existence of an original property */
+ ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+#endif
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+#if 0
+ /* Check existence of an original property */
+ ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+#endif
+ /* Check existence of added property */
+ ret = H5Pexist(cid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Create a derived dataset creation property list */
+ pid = H5Pcreate(cid);
+ CHECK(pid, FAIL, "H5Pcreate");
+#if 0
+ /* Check existence of an original property */
+ ret = H5Pexist(pid, H5O_CRT_PIPELINE_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+#endif
+ /* Check existence of added property */
+ ret = H5Pexist(pid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check values of property (set with default value) */
+ ret = H5Pget(pid, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+#if 0
+ /* Check existence of an original property (in class) */
+ ret = H5Pexist(cid, H5O_CRT_PIPELINE_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+#endif
+ /* Check existence of first added property (in class) */
+ ret = H5Pexist(cid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check existence of second added property (in class) */
+ ret = H5Pexist(cid, PROP2_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+#if 0
+ /* Check existence of an original property (in property list) */
+ ret = H5Pexist(pid, H5O_CRT_PIPELINE_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+#endif
+ /* Check existence of first added property (in property list) */
+ ret = H5Pexist(pid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check existence of second added property (in property list) (should not exist) */
+ ret = H5Pexist(pid, PROP2_NAME);
+ VERIFY(ret, 0, "H5Pexist");
+
+ /* Create a dataset */
+ did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check existence of added property (after using property list) */
+ ret = H5Pexist(pid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check values of property (set with default value) (after using property list) */
+ ret = H5Pget(pid, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+
+ /* Close property class */
+ ret = H5Pclose_class(cid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close property list */
+ ret = H5Pclose(pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_genprop_class_addprop() */
+
+/****************************************************************
+**
+** test_genprop_list_add_remove_prop(): Test adding then removing the
+** same properties to a standard HDF5 property list. This is testing
+** also for a memory leak that could be caused by not freeing the
+** removed property resources from the property list.
+**
+****************************************************************/
+static void
+test_genprop_list_add_remove_prop(void)
+{
+ hid_t pid; /* Property List ID */
+ herr_t ret; /* Generic return value */
+
+ /* Create a dataset creation property list */
+ pid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(pid, FAIL, "H5Pcreate");
+
+ /* Insert temporary property into class (with no callbacks) */
+ ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Delete added property */
+ ret = H5Premove(pid, PROP1_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Insert temporary property into class (with no callbacks) */
+ ret = H5Pinsert2(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Delete added property */
+ ret = H5Premove(pid, PROP1_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Close property list */
+ ret = H5Pclose(pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_genprop_list_add_remove_prop() */
+
+/****************************************************************
+**
+** test_genprop_equal(): Test basic generic property list code.
+** More tests for H5Pequal()
+**
+****************************************************************/
+static void
+test_genprop_equal(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t lid1; /* Generic Property list ID */
+ hid_t lid2; /* Generic Property list ID */
+ int prop1_new_value = 20; /* Property #1 new value */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Generic Property List Equal Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Create a property list from the class */
+ lid1 = H5Pcreate(cid1);
+ CHECK_I(lid1, "H5Pcreate");
+
+ /* Copy the property list */
+ lid2 = H5Pcopy(lid1);
+ CHECK_I(lid2, "H5Pcopy");
+
+ /* Check that the lists are equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Set property in first list to another value */
+ ret = H5Pset(lid1, PROP1_NAME, &prop1_new_value);
+ CHECK_I(ret, "H5Pset");
+
+ /* Check that the lists are not equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Set property in first list back to default */
+ ret = H5Pset(lid1, PROP1_NAME, PROP1_DEF_VALUE);
+ CHECK_I(ret, "H5Pset");
+
+ /* Check that the lists are still equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Insert first temporary property into first list (with no callbacks) */
+ ret = H5Pinsert2(lid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Check that the lists are not equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Insert first temporary property into second list (with no callbacks) */
+ ret = H5Pinsert2(lid2, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Check that the lists are equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Insert second temporary property into second list (with no callbacks) */
+ ret = H5Pinsert2(lid2, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Check that the lists are not equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Insert second temporary property into first list (with no callbacks) */
+ ret = H5Pinsert2(lid1, PROP4_NAME, PROP4_SIZE, PROP4_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert2");
+
+ /* Check that the lists are equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Remove first temporary property from first list */
+ ret = H5Premove(lid1, PROP3_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Check that the lists are not equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Remove second temporary property from second list */
+ ret = H5Premove(lid2, PROP4_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Check that the lists are not equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Remove first temporary property from second list */
+ ret = H5Premove(lid2, PROP3_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Check that the lists are not equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Remove first permanent property from first list */
+ ret = H5Premove(lid1, PROP1_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Check that the lists are not equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Remove second temporary property from first list */
+ ret = H5Premove(lid1, PROP4_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Check that the lists are not equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 0, "H5Pequal");
+
+ /* Remove first permanent property from second list */
+ ret = H5Premove(lid2, PROP1_NAME);
+ CHECK_I(ret, "H5Premove");
+
+ /* Check that the lists are equal */
+ ret = H5Pequal(lid1, lid2);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Close property lists */
+ ret = H5Pclose(lid1);
+ CHECK_I(ret, "H5Pclose");
+ ret = H5Pclose(lid2);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+} /* ent test_genprop_equal() */
+
+/****************************************************************
+**
+** test_genprop_path(): Test basic generic property list code.
+** Tests for class paths
+**
+****************************************************************/
+static void
+test_genprop_path(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t cid2; /* Generic Property class ID */
+#if 0
+ hid_t cid3; /* Generic Property class ID */
+ char *path; /* Class path */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Generic Property List Class Path Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+#if 0
+ /* Get full path for first class */
+ path = H5P__get_class_path_test(cid1);
+ CHECK_PTR(path, "H5P__get_class_path_test");
+ if (HDstrcmp(path, CLASS1_PATH) != 0)
+ TestErrPrintf("Class names don't match!, path=%s, CLASS1_PATH=%s\n", path, CLASS1_PATH);
+ H5free_memory(path);
+#endif
+ /* Create another new generic class, derived from first class */
+ cid2 = H5Pcreate_class(cid1, CLASS2_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid2, "H5Pcreate_class");
+
+ /* Insert second property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid2, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+#if 0
+ /* Get full path for second class */
+ path = H5P__get_class_path_test(cid2);
+ CHECK_PTR(path, "H5P__get_class_path_test");
+ if (HDstrcmp(path, CLASS2_PATH) != 0)
+ TestErrPrintf("Class names don't match!, path=%s, CLASS2_PATH=%s\n", path, CLASS2_PATH);
+
+ /* Open a copy of the class with the path name */
+ cid3 = H5P__open_class_path_test(path);
+ CHECK_I(cid3, "H5P__open_class_path_test");
+
+ /* Check that the classes are equal */
+ ret = H5Pequal(cid2, cid3);
+ VERIFY(ret, 1, "H5Pequal");
+
+ /* Release the path string */
+ H5free_memory(path);
+
+ /* Close class */
+ ret = H5Pclose_class(cid3);
+ CHECK_I(ret, "H5Pclose_class");
+#endif
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+
+ /* Close class */
+ ret = H5Pclose_class(cid2);
+ CHECK_I(ret, "H5Pclose_class");
+
+} /* ent test_genprop_path() */
+
+/****************************************************************
+**
+** test_genprop_refcount(): Test basic generic property list code.
+** Tests for correct reference counting
+**
+****************************************************************/
+static void
+test_genprop_refcount(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ hid_t lid1; /* Generic Property class ID */
+ char *name; /* Name of class */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Generic Property List Reference Count Functionality\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Insert first property into class (with no callbacks) */
+ ret =
+ H5Pregister2(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister2");
+
+ /* Create a new generic list, derived from the root of the class hierarchy */
+ lid1 = H5Pcreate(cid1);
+ CHECK_I(lid1, "H5Pcreate");
+
+ /* Check class name */
+ name = H5Pget_class_name(cid1);
+ CHECK_PTR(name, "H5Pget_class_name");
+ if (HDstrcmp(name, CLASS1_NAME) != 0)
+ TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME);
+ H5free_memory(name);
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+
+ /* Get the list's class */
+ cid1 = H5Pget_class(lid1);
+ CHECK_I(cid1, "H5Pget_class");
+
+ /* Check correct "is a" class/list relationship */
+ ret = H5Pisa_class(lid1, cid1);
+ VERIFY(ret, 1, "H5Pisa_class");
+
+ /* Check class name */
+ name = H5Pget_class_name(cid1);
+ CHECK_PTR(name, "H5Pget_class_name");
+ if (HDstrcmp(name, CLASS1_NAME) != 0)
+ TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME);
+ H5free_memory(name);
+
+ /* Close list */
+ ret = H5Pclose(lid1);
+ CHECK_I(ret, "H5Pclose");
+
+ /* Check class name */
+ name = H5Pget_class_name(cid1);
+ CHECK_PTR(name, "H5Pget_class_name");
+ if (HDstrcmp(name, CLASS1_NAME) != 0)
+ TestErrPrintf("Class names don't match!, name=%s, CLASS1_NAME=%s\n", name, CLASS1_NAME);
+ H5free_memory(name);
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+
+} /* ent test_genprop_refcount() */
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+/****************************************************************
+**
+** test_genprop_deprec_class(): Test basic generic property list code.
+** Tests deprecated property class API routines.
+**
+****************************************************************/
+static void
+test_genprop_deprec_class(void)
+{
+ hid_t cid1; /* Generic Property class ID */
+ size_t size; /* Size of property */
+ size_t nprops; /* Number of properties in class */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Deprecated Generic Property List Functions\n"));
+
+ /* Create a new generic class, derived from the root of the class hierarchy */
+ cid1 = H5Pcreate_class(H5P_ROOT, CLASS1_NAME, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(cid1, "H5Pcreate_class");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 0, "H5Pget_nprops");
+
+ /* Check the existence of the first property (should fail) */
+ ret = H5Pexist(cid1, PROP1_NAME);
+ VERIFY(ret, 0, "H5Pexist");
+
+ /* Insert first property into class (with no callbacks) */
+ ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister1");
+
+ /* Try to insert the first property again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pregister1(cid1, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pregister1");
+
+ /* Check the existence of the first property */
+ ret = H5Pexist(cid1, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check the size of the first property */
+ ret = H5Pget_size(cid1, PROP1_NAME, &size);
+ CHECK_I(ret, "H5Pget_size");
+ VERIFY(size, PROP1_SIZE, "H5Pget_size");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 1, "H5Pget_nprops");
+
+ /* Insert second property into class (with no callbacks) */
+ ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister1");
+
+ /* Try to insert the second property again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pregister1(cid1, PROP2_NAME, PROP2_SIZE, PROP2_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pregister1");
+
+ /* Check the existence of the second property */
+ ret = H5Pexist(cid1, PROP2_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check the size of the second property */
+ ret = H5Pget_size(cid1, PROP2_NAME, &size);
+ CHECK_I(ret, "H5Pget_size");
+ VERIFY(size, PROP2_SIZE, "H5Pget_size");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Insert third property into class (with no callbacks) */
+ ret = H5Pregister1(cid1, PROP3_NAME, PROP3_SIZE, PROP3_DEF_VALUE, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pregister1");
+
+ /* Check the existence of the third property */
+ ret = H5Pexist(cid1, PROP3_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check the size of the third property */
+ ret = H5Pget_size(cid1, PROP3_NAME, &size);
+ CHECK_I(ret, "H5Pget_size");
+ VERIFY(size, PROP3_SIZE, "H5Pget_size");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 3, "H5Pget_nprops");
+
+ /* Unregister first property */
+ ret = H5Punregister(cid1, PROP1_NAME);
+ CHECK_I(ret, "H5Punregister");
+
+ /* Try to check the size of the first property (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pget_size(cid1, PROP1_NAME, &size);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pget_size");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 2, "H5Pget_nprops");
+
+ /* Unregister second property */
+ ret = H5Punregister(cid1, PROP2_NAME);
+ CHECK_I(ret, "H5Punregister");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 1, "H5Pget_nprops");
+
+ /* Unregister third property */
+ ret = H5Punregister(cid1, PROP3_NAME);
+ CHECK_I(ret, "H5Punregister");
+
+ /* Check the number of properties in class */
+ ret = H5Pget_nprops(cid1, &nprops);
+ CHECK_I(ret, "H5Pget_nprops");
+ VERIFY(nprops, 0, "H5Pget_nprops");
+
+ /* Close class */
+ ret = H5Pclose_class(cid1);
+ CHECK_I(ret, "H5Pclose_class");
+} /* end test_genprop_deprec_class() */
+
+/****************************************************************
+**
+** test_genprop_deprec2(): Test basic generic property list code.
+** Tests deprecated property list API routines.
+**
+****************************************************************/
+static void
+test_genprop_deprec_list(void)
+{
+ hid_t fid; /* File ID */
+ hid_t did; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t pid; /* Property List ID */
+ int prop1_value; /* Value for property #1 */
+ herr_t ret; /* Generic return value */
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create scalar dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a dataset creation property list */
+ pid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(pid, FAIL, "H5Pcreate");
+
+ /* Insert temporary property into class (with no callbacks) */
+ ret = H5Pinsert1(pid, PROP1_NAME, PROP1_SIZE, PROP1_DEF_VALUE, NULL, NULL, NULL, NULL, NULL);
+ CHECK_I(ret, "H5Pinsert1");
+
+ /* Check existence of added property */
+ ret = H5Pexist(pid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check values of property (set with default value) */
+ ret = H5Pget(pid, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+
+ /* Create a dataset */
+ did = H5Dcreate2(fid, "Dataset1", H5T_NATIVE_INT, sid, H5P_DEFAULT, pid, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check existence of added property (after using property list) */
+ ret = H5Pexist(pid, PROP1_NAME);
+ VERIFY(ret, 1, "H5Pexist");
+
+ /* Check values of property (set with default value) (after using property list) */
+ ret = H5Pget(pid, PROP1_NAME, &prop1_value);
+ CHECK_I(ret, "H5Pget");
+ VERIFY(prop1_value, *PROP1_DEF_VALUE, "H5Pget");
+
+ /* Close property list */
+ ret = H5Pclose(pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_genprop_deprec_list() */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+/****************************************************************
+**
+** test_genprop(): Main generic property testing routine.
+**
+****************************************************************/
+void
+test_genprop(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Generic Properties\n"));
+
+ /* These tests use the same file... */
+ test_genprop_basic_class(); /* Test basic code for creating a generic class */
+ test_genprop_basic_class_prop(); /* Test basic code for adding properties to a generic class */
+ test_genprop_class_iter(); /* Test code for iterating over properties in a generic class */
+ test_genprop_class_callback(); /* Test code for property class callbacks */
+
+ test_genprop_basic_list(); /* Test basic code for creating a generic property list */
+ test_genprop_basic_list_prop(); /* Test basic code for adding properties to a generic property list */
+ test_genprop_list_iter(); /* Test basic code for iterating over properties in a generic property list */
+ test_genprop_list_callback(); /* Test code for property list callbacks */
+
+ test_genprop_list_addprop(); /* Test adding properties to HDF5 property list */
+ test_genprop_class_addprop(); /* Test adding properties to HDF5 property class */
+
+ test_genprop_list_add_remove_prop(); /* Test adding and removing the same property several times to HDF5
+ property list */
+
+ test_genprop_equal(); /* Tests for more H5Pequal verification */
+ test_genprop_path(); /* Tests for class path verification */
+ test_genprop_refcount(); /* Tests for class reference counting */
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ test_genprop_deprec_class(); /* Tests for deprecated routines */
+ test_genprop_deprec_list(); /* Tests for deprecated routines */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+} /* test_genprop() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_genprop
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Quincey Koziol
+ * June 8, 1999
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_genprop(void)
+{
+ H5Fdelete(FILENAME, H5P_DEFAULT);
+}
diff --git a/test/API/th5o.c b/test/API/th5o.c
new file mode 100644
index 0000000..916f005
--- /dev/null
+++ b/test/API/th5o.c
@@ -0,0 +1,1889 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: th5o
+ *
+ * Test public H5O functions for accessing
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+
+#if 0
+#include "H5Fprivate.h"
+#include "H5VLprivate.h"
+#include "H5VLnative_private.h"
+#endif
+
+#define TEST_FILENAME "th5o_file.h5"
+
+#define RANK 2
+#define DIM0 5
+#define DIM1 10
+
+#define TEST6_DIM1 100
+#define TEST6_DIM2 100
+
+/****************************************************************
+**
+** test_h5o_open(): Test H5Oopen function.
+**
+****************************************************************/
+static void
+test_h5o_open(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ char filename[1024];
+ hsize_t dims[RANK];
+ H5I_type_t id_type; /* Type of IDs returned from H5Oopen */
+ H5G_info_t ginfo; /* Group info struct */
+ H5T_class_t type_class; /* Class of the datatype */
+ herr_t ret; /* Value returned from API calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Oopen\n"));
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Now make sure that H5Oopen can open all three types of objects */
+ grp = H5Oopen(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Oopen");
+ dtype = H5Oopen(fid, "group/datatype", H5P_DEFAULT);
+ CHECK(dtype, FAIL, "H5Oopen");
+ /* Check that we can use the group as a valid location */
+ dset = H5Oopen(grp, "/dataset", H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Oopen");
+
+ /* Make sure that each is the right kind of ID */
+ id_type = H5Iget_type(grp);
+ VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID");
+ id_type = H5Iget_type(dtype);
+ VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID");
+ id_type = H5Iget_type(dset);
+ VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID");
+
+ /* Do something more complex with each of the IDs to make sure they "work" */
+ ret = H5Gget_info(grp, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */
+
+ type_class = H5Tget_class(dtype);
+ VERIFY(type_class, H5T_INTEGER, "H5Tget_class");
+
+ dspace = H5Dget_space(dset);
+ CHECK(dspace, FAIL, "H5Dget_space");
+
+ /* Close the IDs */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Trying to open objects with bogus names should fail gracefully */
+ H5E_BEGIN_TRY
+ {
+ grp = H5Oopen(fid, "bogus_group", H5P_DEFAULT);
+ VERIFY(grp, FAIL, "H5Oopen");
+ dtype = H5Oopen(fid, "group/bogus_datatype", H5P_DEFAULT);
+ VERIFY(dtype, FAIL, "H5Oopen");
+ dset = H5Oopen(fid, "/bogus_dataset", H5P_DEFAULT);
+ VERIFY(dset, FAIL, "H5Oopen");
+ }
+ H5E_END_TRY
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Trying to open an object with a bogus file ID should fail */
+ H5E_BEGIN_TRY
+ {
+ dset = H5Oopen(fid, "dataset", H5P_DEFAULT);
+ VERIFY(dset, FAIL, "H5Oopen");
+ }
+ H5E_END_TRY
+} /* test_h5o_open() */
+
+/****************************************************************
+**
+** test_h5o_close(): Test H5Oclose function.
+**
+****************************************************************/
+static void
+test_h5o_close(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ char filename[1024];
+ hsize_t dims[RANK];
+ herr_t ret; /* Value returned from API calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Oclose\n"));
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group and close it with H5Oclose */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+ VERIFY_TYPE(H5Iget_type(grp), H5I_GROUP, H5I_type_t, "%d", "H5Iget_type");
+ ret = H5Oclose(grp);
+ CHECK(ret, FAIL, "H5Oclose");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ ret = H5Oclose(dtype);
+ CHECK(ret, FAIL, "H5Oclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+ ret = H5Oclose(dset);
+ CHECK(ret, FAIL, "H5Oclose");
+
+ /* Attempting to close the data space with H5Oclose should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Oclose(dspace);
+ VERIFY(ret, FAIL, "H5Oclose");
+ }
+ H5E_END_TRY
+ /* Close the dataspace for real */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Make sure that H5Oclose can close objects opened with H5Oopen */
+ grp = H5Oopen(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Oopen");
+ dtype = H5Oopen(fid, "group/datatype", H5P_DEFAULT);
+ CHECK(dtype, FAIL, "H5Oopen");
+ dset = H5Oopen(fid, "dataset", H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Oopen");
+
+ ret = H5Oclose(grp);
+ CHECK(ret, FAIL, "H5Oclose");
+ ret = H5Oclose(dtype);
+ CHECK(ret, FAIL, "H5Oclose");
+ ret = H5Oclose(dset);
+ CHECK(ret, FAIL, "H5Oclose");
+
+ /* Make sure H5Oclose can close objects opened with H5*open */
+ grp = H5Gopen2(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gopen2");
+ dtype = H5Topen2(fid, "group/datatype", H5P_DEFAULT);
+ CHECK(dtype, FAIL, "H5Topen2");
+ dset = H5Dopen2(fid, "dataset", H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dopen2");
+
+ ret = H5Oclose(grp);
+ CHECK(ret, FAIL, "H5Oclose");
+ ret = H5Oclose(dtype);
+ CHECK(ret, FAIL, "H5Oclose");
+ ret = H5Oclose(dset);
+ CHECK(ret, FAIL, "H5Oclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+/****************************************************************
+**
+** test_h5o_open_by_addr(): Test H5Oopen_by_addr function.
+**
+****************************************************************/
+static void
+test_h5o_open_by_addr(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ char filename[1024];
+ H5L_info2_t li; /* Buffer for H5Lget_info2 */
+ haddr_t grp_addr; /* Addresses for objects */
+ haddr_t dset_addr;
+ haddr_t dtype_addr;
+ hsize_t dims[RANK];
+ H5I_type_t id_type; /* Type of IDs returned from H5Oopen */
+ H5G_info_t ginfo; /* Group info struct */
+ H5T_class_t type_class; /* Class of the datatype */
+ herr_t ret; /* Value returned from API calls */
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Get address for each object */
+ ret = H5Lget_info2(fid, "group", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info2");
+ ret = H5VLnative_token_to_addr(fid, li.u.token, &grp_addr);
+ CHECK(ret, FAIL, "H5VLnative_token_to_addr");
+
+ ret = H5Lget_info2(fid, "group/datatype", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info2");
+ ret = H5VLnative_token_to_addr(fid, li.u.token, &dtype_addr);
+ CHECK(ret, FAIL, "H5VLnative_token_to_addr");
+
+ ret = H5Lget_info2(fid, "dataset", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info2");
+ ret = H5VLnative_token_to_addr(fid, li.u.token, &dset_addr);
+ CHECK(ret, FAIL, "H5VLnative_token_to_addr");
+
+ /* Now make sure that H5Oopen_by_addr can open all three types of objects */
+ grp = H5Oopen_by_addr(fid, grp_addr);
+ CHECK(grp, FAIL, "H5Oopen_by_addr");
+ dtype = H5Oopen_by_addr(fid, dtype_addr);
+ CHECK(dtype, FAIL, "H5Oopen_by_addr");
+ /* Check that we can use the group ID as a valid location */
+ dset = H5Oopen_by_addr(grp, dset_addr);
+ CHECK(dset, FAIL, "H5Oopen_by_addr");
+
+ /* Make sure that each is the right kind of ID */
+ id_type = H5Iget_type(grp);
+ VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID");
+ id_type = H5Iget_type(dtype);
+ VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID");
+ id_type = H5Iget_type(dset);
+ VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID");
+
+ /* Do something more complex with each of the IDs to make sure they "work" */
+ ret = H5Gget_info(grp, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */
+
+ type_class = H5Tget_class(dtype);
+ VERIFY(type_class, H5T_INTEGER, "H5Tget_class");
+
+ dspace = H5Dget_space(dset);
+ CHECK(dspace, FAIL, "H5Dget_space");
+
+ /* Close the IDs */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Try giving some bogus values to H5O_open_by_addr. */
+ /* Try to open an object with a bad address */
+ grp_addr += 20;
+ H5E_BEGIN_TRY
+ {
+ grp = H5Oopen_by_addr(fid, grp_addr);
+ }
+ H5E_END_TRY
+ VERIFY(grp, FAIL, "H5Oopen_by_addr");
+
+ /* For instance, an objectno smaller than the end of the file's superblock should
+ * trigger an error */
+ grp_addr = 10;
+ H5E_BEGIN_TRY
+ {
+ grp = H5Oopen_by_addr(fid, grp_addr);
+ }
+ H5E_END_TRY
+ VERIFY(grp, FAIL, "H5Oopen_by_addr");
+
+ /* Likewise, an objectno larger than the size of the file should fail */
+ grp_addr = 0;
+ grp_addr = 1000000000;
+ H5E_BEGIN_TRY
+ {
+ grp = H5Oopen_by_addr(fid, grp_addr);
+ }
+ H5E_END_TRY
+ VERIFY(grp, FAIL, "H5Oopen_by_addr");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Also, trying to open an object without a valid location should fail */
+ H5E_BEGIN_TRY
+ {
+ dtype = H5Oopen_by_addr(fid, dtype_addr);
+ }
+ H5E_END_TRY
+ VERIFY(dtype, FAIL, "H5Oopen_by_addr");
+} /* test_h5o_open_by_addr() */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+
+/****************************************************************
+**
+** test_h5o_open_by_token(): Test H5Oopen_by_token function.
+**
+****************************************************************/
+static void
+test_h5o_open_by_token(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ char filename[1024];
+ H5L_info2_t li; /* Buffer for H5Lget_info */
+ hsize_t dims[RANK];
+ H5I_type_t id_type; /* Type of IDs returned from H5Oopen */
+ H5G_info_t ginfo; /* Group info struct */
+ H5T_class_t type_class; /* Class of the datatype */
+ herr_t ret; /* Value returned from API calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Oopen_by_token\n"));
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Make sure that H5Oopen_by_token can open all three types of objects */
+ ret = H5Lget_info2(fid, "group", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info");
+ grp = H5Oopen_by_token(fid, li.u.token);
+ CHECK(grp, FAIL, "H5Oopen_by_token");
+
+ ret = H5Lget_info2(fid, "group/datatype", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info");
+ dtype = H5Oopen_by_token(fid, li.u.token);
+ CHECK(dtype, FAIL, "H5Oopen_by_token");
+
+ ret = H5Lget_info2(fid, "dataset", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info");
+ /* Check that we can use the group ID as a valid location */
+ dset = H5Oopen_by_token(grp, li.u.token);
+ CHECK(dset, FAIL, "H5Oopen_by_token");
+
+ /* Make sure that each is the right kind of ID */
+ id_type = H5Iget_type(grp);
+ VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID");
+ id_type = H5Iget_type(dtype);
+ VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID");
+ id_type = H5Iget_type(dset);
+ VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID");
+
+ /* Do something more complex with each of the IDs to make sure they "work" */
+ ret = H5Gget_info(grp, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */
+
+ type_class = H5Tget_class(dtype);
+ VERIFY(type_class, H5T_INTEGER, "H5Tget_class");
+
+ dspace = H5Dget_space(dset);
+ CHECK(dspace, FAIL, "H5Dget_space");
+
+ /* Close the IDs */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Try giving some bogus values to H5O_open_by_token */
+ /* Try opening an object using H5O_TOKEN_UNDEF (should fail) */
+ H5E_BEGIN_TRY
+ {
+ dtype = H5Oopen_by_token(fid, H5O_TOKEN_UNDEF);
+ }
+ H5E_END_TRY
+ VERIFY(dtype, FAIL, "H5Oopen_by_token");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Also, trying to open an object without a valid location (should fail) */
+ H5E_BEGIN_TRY
+ {
+ dtype = H5Oopen_by_token(fid, li.u.token);
+ }
+ H5E_END_TRY
+ VERIFY(dtype, FAIL, "H5Oopen_by_token");
+
+} /* test_h5o_open_by_token() */
+
+/****************************************************************
+**
+** test_h5o_refcount(): Test H5O refcounting functions.
+**
+****************************************************************/
+static void
+test_h5o_refcount(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ char filename[1024];
+ H5O_info2_t oinfo; /* Object info struct */
+ hsize_t dims[RANK];
+ herr_t ret; /* Value returned from API calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing retrieval of object reference count with H5Oget_info\n"));
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Get ref counts for each object. They should all be 1, since each object has a hard link. */
+ ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+
+ /* Increment each object's reference count. */
+ ret = H5Oincr_refcount(grp);
+ CHECK(ret, FAIL, "H5Oincr_refcount");
+ ret = H5Oincr_refcount(dtype);
+ CHECK(ret, FAIL, "H5Oincr_refcount");
+ ret = H5Oincr_refcount(dset);
+ CHECK(ret, FAIL, "H5Oincr_refcount");
+
+ /* Get ref counts for each object. They should all be 2 now. */
+ ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3");
+
+ /* Decrement the reference counts and check that they decrease back to 1. */
+ ret = H5Odecr_refcount(grp);
+ CHECK(ret, FAIL, "H5Odecr_refcount");
+ ret = H5Odecr_refcount(dtype);
+ CHECK(ret, FAIL, "H5Odecr_refcount");
+ ret = H5Odecr_refcount(dset);
+ CHECK(ret, FAIL, "H5Odecr_refcount");
+
+ ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+
+ /* Increment the reference counts and then close the file to make sure the increment is permanent */
+ ret = H5Oincr_refcount(grp);
+ CHECK(ret, FAIL, "H5Oincr_refcount");
+ ret = H5Oincr_refcount(dtype);
+ CHECK(ret, FAIL, "H5Oincr_refcount");
+ ret = H5Oincr_refcount(dset);
+ CHECK(ret, FAIL, "H5Oincr_refcount");
+
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file and check that the reference counts were really incremented */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ grp = H5Gopen2(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gopen2");
+ dtype = H5Topen2(fid, "datatype", H5P_DEFAULT);
+ CHECK(dtype, FAIL, "H5Topen2");
+ dset = H5Dopen2(fid, "dataset", H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dopen2");
+
+ ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 2, "reference count in H5Oget_info_by_name3");
+
+ /* Decrement the reference counts and close the file */
+ ret = H5Odecr_refcount(grp);
+ CHECK(ret, FAIL, "H5Odecr_refcount");
+ ret = H5Odecr_refcount(dtype);
+ CHECK(ret, FAIL, "H5Odecr_refcount");
+ ret = H5Odecr_refcount(dset);
+ CHECK(ret, FAIL, "H5Odecr_refcount");
+
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file and check that the reference counts were really decremented */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ grp = H5Gopen2(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gopen2");
+ dtype = H5Topen2(fid, "datatype", H5P_DEFAULT);
+ CHECK(dtype, FAIL, "H5Topen2");
+ dset = H5Dopen2(fid, "dataset", H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dopen2");
+
+ ret = H5Oget_info_by_name3(fid, "group", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "datatype", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid, "dataset", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ VERIFY(oinfo.rc, 1, "reference count in H5Oget_info_by_name3");
+
+ /* Close the IDs */
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Make sure that bogus IDs return errors properly */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Oincr_refcount(grp);
+ VERIFY(ret, FAIL, "H5Oincr_refcount");
+ ret = H5Oincr_refcount(dtype);
+ VERIFY(ret, FAIL, "H5Oincr_refcount");
+ ret = H5Oincr_refcount(dset);
+ VERIFY(ret, FAIL, "H5Oincr_refcount");
+ ret = H5Odecr_refcount(grp);
+ VERIFY(ret, FAIL, "H5Odecr_refcount");
+ ret = H5Odecr_refcount(dtype);
+ VERIFY(ret, FAIL, "H5Odecr_refcount");
+ ret = H5Odecr_refcount(dset);
+ VERIFY(ret, FAIL, "H5Odecr_refcount");
+ }
+ H5E_END_TRY
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_h5o_refcount() */
+
+/****************************************************************
+**
+** test_h5o_plist(): Test object creation properties
+**
+****************************************************************/
+static void
+test_h5o_plist(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ hid_t fapl; /* File access property list */
+ hid_t gcpl, dcpl, tcpl; /* Object creation properties */
+ char filename[1024];
+ unsigned def_max_compact, def_min_dense; /* Default phase change parameters */
+ unsigned max_compact, min_dense; /* Actual phase change parameters */
+ herr_t ret; /* Value returned from API calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Object creation properties\n"));
+
+ /* Make a FAPL that uses the "use the latest version of the format" flag */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Set the "use the latest version of the format" bounds for creating objects in the file */
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ h5_fixname(TEST_FILENAME, fapl, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create group, dataset & named datatype creation property lists */
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+ CHECK(gcpl, FAIL, "H5Pcreate");
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ tcpl = H5Pcreate(H5P_DATATYPE_CREATE);
+ CHECK(tcpl, FAIL, "H5Pcreate");
+
+ /* Retrieve default attribute phase change values */
+ ret = H5Pget_attr_phase_change(gcpl, &def_max_compact, &def_min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+
+ /* Set non-default attribute phase change values on each creation property list */
+ ret = H5Pset_attr_phase_change(gcpl, def_max_compact + 1, def_min_dense - 1);
+ CHECK(ret, FAIL, "H5Pset_attr_phase_change");
+ ret = H5Pset_attr_phase_change(dcpl, def_max_compact + 1, def_min_dense - 1);
+ CHECK(ret, FAIL, "H5Pset_attr_phase_change");
+ ret = H5Pset_attr_phase_change(tcpl, def_max_compact + 1, def_min_dense - 1);
+ CHECK(ret, FAIL, "H5Pset_attr_phase_change");
+
+ /* Retrieve attribute phase change values on each creation property list and verify */
+ ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+ ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+
+ /* Create a group, dataset, and committed datatype within the file,
+ * using the respective type of creation property lists.
+ */
+
+ /* Create the group anonymously and link it in */
+ grp = H5Gcreate_anon(fid, gcpl, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate_anon");
+ ret = H5Olink(grp, fid, "group", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Olink");
+
+ /* Commit the type inside the group anonymously and link it in */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit_anon(fid, dtype, tcpl, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit_anon");
+ ret = H5Olink(dtype, fid, "datatype", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Olink");
+
+ /* Create the dataspace for the dataset. */
+ dspace = H5Screate(H5S_SCALAR);
+ CHECK(dspace, FAIL, "H5Screate");
+
+ /* Create the dataset anonymously and link it in */
+ dset = H5Dcreate_anon(fid, H5T_NATIVE_INT, dspace, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate_anon");
+ ret = H5Olink(dset, fid, "dataset", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Olink");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close current creation property lists */
+ ret = H5Pclose(gcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(tcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Retrieve each object's creation property list */
+ gcpl = H5Gget_create_plist(grp);
+ CHECK(gcpl, FAIL, "H5Gget_create_plist");
+ tcpl = H5Tget_create_plist(dtype);
+ CHECK(tcpl, FAIL, "H5Tget_create_plist");
+ dcpl = H5Dget_create_plist(dset);
+ CHECK(dcpl, FAIL, "H5Dget_create_plist");
+
+ /* Retrieve attribute phase change values on each creation property list and verify */
+ ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+ ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+
+ /* Close current objects */
+ ret = H5Pclose(gcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(tcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file and check that the object creation properties persist */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open objects */
+ grp = H5Gopen2(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gopen2");
+ dtype = H5Topen2(fid, "datatype", H5P_DEFAULT);
+ CHECK(dtype, FAIL, "H5Topen2");
+ dset = H5Dopen2(fid, "dataset", H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dopen2");
+
+ /* Retrieve each object's creation property list */
+ gcpl = H5Gget_create_plist(grp);
+ CHECK(gcpl, FAIL, "H5Gget_create_plist");
+ tcpl = H5Tget_create_plist(dtype);
+ CHECK(tcpl, FAIL, "H5Tget_create_plist");
+ dcpl = H5Dget_create_plist(dset);
+ CHECK(dcpl, FAIL, "H5Dget_create_plist");
+
+ /* Retrieve attribute phase change values on each creation property list and verify */
+ ret = H5Pget_attr_phase_change(gcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+ ret = H5Pget_attr_phase_change(dcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+ ret = H5Pget_attr_phase_change(tcpl, &max_compact, &min_dense);
+ CHECK(ret, FAIL, "H5Pget_attr_phase_change");
+ VERIFY(max_compact, (def_max_compact + 1), "H5Pget_attr_phase_change");
+ VERIFY(min_dense, (def_min_dense - 1), "H5Pget_attr_phase_change");
+
+ /* Close current objects */
+ ret = H5Pclose(gcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(tcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close the FAPL */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* test_h5o_plist() */
+
+/****************************************************************
+**
+** test_h5o_link(): Test creating link to object
+**
+****************************************************************/
+static void
+test_h5o_link(void)
+{
+ hid_t file_id = -1;
+ hid_t group_id = -1;
+ hid_t space_id = -1;
+ hid_t dset_id = -1;
+ hid_t type_id = -1;
+ hid_t fapl_id = -1;
+ hid_t lcpl_id = -1;
+ char filename[1024];
+ hsize_t dims[2] = {TEST6_DIM1, TEST6_DIM2};
+ htri_t committed; /* Whether the named datatype is committed */
+ H5F_libver_t low, high; /* File format bounds */
+ int *wdata;
+ int *rdata;
+ int i, n;
+ herr_t ret; /* Value returned from API calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Olink\n"));
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Allocate memory buffers */
+ /* (These are treated as 2-D buffers) */
+ wdata = (int *)HDmalloc((size_t)(TEST6_DIM1 * TEST6_DIM2) * sizeof(int));
+ CHECK_PTR(wdata, "HDmalloc");
+ rdata = (int *)HDmalloc((size_t)(TEST6_DIM1 * TEST6_DIM2) * sizeof(int));
+ CHECK_PTR(rdata, "HDmalloc");
+
+ /* Initialize the raw data */
+ for (i = n = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++)
+ wdata[i] = n++;
+
+ /* Create the dataspace */
+ space_id = H5Screate_simple(2, dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+
+ /* Create LCPL with intermediate group creation flag set */
+ lcpl_id = H5Pcreate(H5P_LINK_CREATE);
+ CHECK(lcpl_id, FAIL, "H5Pcreate");
+ ret = H5Pset_create_intermediate_group(lcpl_id, TRUE);
+ CHECK(ret, FAIL, "H5Pset_create_intermediate_group");
+
+ /* Create a file access property list */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl_id, FAIL, "H5Pcreate");
+
+ /* Loop through all the combinations of low/high library format bounds */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+
+ /* Set version bounds */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_libver_bounds(fapl_id, low, high);
+ }
+ H5E_END_TRY;
+
+ if (ret < 0) /* Invalid low/high combinations */
+ continue;
+
+ /* Create a new HDF5 file */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Close the FAPL */
+ ret = H5Pclose(fapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Create and commit a datatype with no name */
+ type_id = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(type_id, FAIL, "H5Fcreate");
+ ret = H5Tcommit_anon(file_id, type_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit_anon");
+ committed = H5Tcommitted(type_id);
+ VERIFY(committed, TRUE, "H5Tcommitted");
+
+ /* Create a dataset with no name using the committed datatype*/
+ dset_id = H5Dcreate_anon(file_id, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate_anon");
+
+ /* Verify that we can write to and read from the dataset */
+
+ /* Write the data to the dataset */
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read the data back */
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data */
+ for (i = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++)
+ VERIFY(wdata[i], rdata[i], "H5Dread");
+
+ /* Create a group with no name*/
+ group_id = H5Gcreate_anon(file_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gcreate_anon");
+
+ /* Link nameless datatype into nameless group */
+ ret = H5Olink(type_id, group_id, "datatype", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Olink");
+
+ /* Link nameless dataset into nameless group with intermediate group */
+ ret = H5Olink(dset_id, group_id, "inter_group/dataset", lcpl_id, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Olink");
+
+ /* Close IDs for dataset and datatype */
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Re-open datatype using new link */
+ type_id = H5Topen2(group_id, "datatype", H5P_DEFAULT);
+ CHECK(type_id, FAIL, "H5Topen2");
+
+ /* Link nameless group to root group and close the group ID*/
+ ret = H5Olink(group_id, file_id, "/group", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Olink");
+ ret = H5Gclose(group_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open dataset through root group and verify its data */
+ dset_id = H5Dopen2(file_id, "/group/inter_group/dataset", H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dopen2");
+
+ /* Read data from dataset */
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ for (i = 0; i < (TEST6_DIM1 * TEST6_DIM2); i++)
+ VERIFY(wdata[i], rdata[i], "H5Dread");
+
+ /* Close open IDs */
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* for high */
+ } /* for low */
+
+ /* Close remaining IDs */
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(lcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Release buffers */
+ HDfree(wdata);
+ HDfree(rdata);
+} /* end test_h5o_link() */
+
+#if 0
+/****************************************************************
+**
+** test_h5o_comment(): Test H5Oset(get)_comment functions.
+**
+****************************************************************/
+static void
+test_h5o_comment(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ hid_t attr_space, attr_id;
+ char filename[1024];
+ hsize_t dims[RANK];
+ hsize_t attr_dims = 1;
+ int attr_value = 5;
+ const char *file_comment = "file comment";
+ const char *grp_comment = "group comment";
+ const char *dset_comment = "dataset comment";
+ const char *dtype_comment = "datatype comment";
+ char check_comment[64];
+ ssize_t comment_len = 0;
+ ssize_t len;
+ herr_t ret; /* Value returned from API calls */
+ int ret_value;
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create an attribute for the file */
+ attr_space = H5Screate_simple(1, &attr_dims, NULL);
+ CHECK(attr_space, FAIL, "H5Screate_simple");
+ attr_id = H5Acreate2(fid, "file attribute", H5T_NATIVE_INT, attr_space, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+ ret = H5Awrite(attr_id, H5T_NATIVE_INT, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Putting a comment on the file through its attribute */
+ ret = H5Oset_comment(attr_id, file_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+
+ ret = H5Sclose(attr_space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+
+ /* Putting a comment on the group */
+ ret = H5Oset_comment(grp, grp_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Putting a comment on the committed data type */
+ ret = H5Oset_comment(dtype, dtype_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ /* Putting a comment on the dataset */
+ ret = H5Oset_comment(dset, dset_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+
+ /* Putting a comment on the dataspace. It's supposed to fail. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Oset_comment(dspace, "dataspace comment");
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Oset_comment");
+
+ /* Close the file */
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Now make sure that the comments are correct all 4 types of objects */
+ /* Open file */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Getting the comment on the file and verify it */
+ comment_len = H5Oget_comment(fid, NULL, (size_t)0);
+ CHECK(comment_len, FAIL, "H5Oget_comment");
+
+ len = H5Oget_comment(fid, check_comment, (size_t)comment_len + 1);
+ CHECK(len, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(file_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment");
+
+ /* Open the group */
+ grp = H5Gopen2(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gopen2");
+
+ /* Getting the comment on the group and verify it */
+ comment_len = H5Oget_comment(grp, NULL, (size_t)0);
+ CHECK(comment_len, FAIL, "H5Oget_comment");
+
+ len = H5Oget_comment(grp, check_comment, (size_t)comment_len + 1);
+ CHECK(len, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(grp_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment");
+
+ /* Open the datatype */
+ dtype = H5Topen2(fid, "group/datatype", H5P_DEFAULT);
+ CHECK(dtype, FAIL, "H5Topen2");
+
+ /* Getting the comment on the datatype and verify it */
+ comment_len = H5Oget_comment(dtype, NULL, (size_t)0);
+ CHECK(comment_len, FAIL, "H5Oget_comment");
+
+ len = H5Oget_comment(dtype, check_comment, (size_t)comment_len + 1);
+ CHECK(len, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(dtype_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment");
+
+ /* Open the dataset */
+ dset = H5Dopen2(fid, "dataset", H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dopen2");
+
+ /* Getting the comment on the dataset and verify it */
+ comment_len = H5Oget_comment(dset, NULL, (size_t)0);
+ CHECK(comment_len, FAIL, "H5Oget_comment");
+
+ len = H5Oget_comment(dset, check_comment, (size_t)comment_len + 1);
+ CHECK(ret, len, "H5Oget_comment");
+
+ ret_value = HDstrcmp(dset_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment");
+
+ /* Close the IDs */
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_h5o_comment() */
+
+/****************************************************************
+**
+** test_h5o_comment_by_name(): Test H5Oset(get)_comment_by_name functions.
+**
+****************************************************************/
+static void
+test_h5o_comment_by_name(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ hid_t attr_space, attr_id;
+ char filename[1024];
+ hsize_t dims[RANK];
+ hsize_t attr_dims = 1;
+ int attr_value = 5;
+ const char *file_comment = "file comment by name";
+ const char *grp_comment = "group comment by name";
+ const char *dset_comment = "dataset comment by name";
+ const char *dtype_comment = "datatype comment by name";
+ char check_comment[64];
+ ssize_t comment_len = 0;
+ ssize_t len;
+ herr_t ret; /* Value returned from API calls */
+ int ret_value;
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create an attribute for the file */
+ attr_space = H5Screate_simple(1, &attr_dims, NULL);
+ CHECK(attr_space, FAIL, "H5Screate_simple");
+ attr_id = H5Acreate2(fid, "file attribute", H5T_NATIVE_INT, attr_space, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+ ret = H5Awrite(attr_id, H5T_NATIVE_INT, &attr_value);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Putting a comment on the file through its attribute */
+ ret = H5Oset_comment_by_name(attr_id, ".", file_comment, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+
+ ret = H5Sclose(attr_space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+
+ /* Putting a comment on the group */
+ ret = H5Oset_comment_by_name(fid, "group", grp_comment, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Putting a comment on the committed data type */
+ ret = H5Oset_comment_by_name(grp, "datatype", dtype_comment, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ /* Putting a comment on the dataset */
+ ret = H5Oset_comment_by_name(fid, "dataset", dset_comment, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+
+ /* Putting a comment on the dataspace. It's supposed to fail. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Oset_comment_by_name(dspace, ".", "dataspace comment", H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Oset_comment");
+
+ /* Close the file */
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Now make sure that the comments are correct all 4 types of objects */
+ /* Open file */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Getting the comment on the file and verify it */
+ comment_len = H5Oget_comment_by_name(fid, ".", NULL, (size_t)0, H5P_DEFAULT);
+ CHECK(comment_len, FAIL, "H5Oget_comment_by_name");
+
+ len = H5Oget_comment_by_name(fid, ".", check_comment, (size_t)comment_len + 1, H5P_DEFAULT);
+ CHECK(len, FAIL, "H5Oget_comment_by_name");
+
+ ret_value = HDstrcmp(file_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment_by_name");
+
+ /* Open the group */
+ grp = H5Gopen2(fid, "group", H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gopen2");
+
+ /* Getting the comment on the group and verify it */
+ comment_len = H5Oget_comment_by_name(fid, "group", NULL, (size_t)0, H5P_DEFAULT);
+ CHECK(comment_len, FAIL, "H5Oget_comment_by_name");
+
+ len = H5Oget_comment_by_name(fid, "group", check_comment, (size_t)comment_len + 1, H5P_DEFAULT);
+ CHECK(len, FAIL, "H5Oget_comment_by_name");
+
+ ret_value = HDstrcmp(grp_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment_by_name");
+
+ /* Getting the comment on the datatype and verify it */
+ comment_len = H5Oget_comment_by_name(grp, "datatype", NULL, (size_t)0, H5P_DEFAULT);
+ CHECK(comment_len, FAIL, "H5Oget_comment_by_name");
+
+ len = H5Oget_comment_by_name(grp, "datatype", check_comment, (size_t)comment_len + 1, H5P_DEFAULT);
+ CHECK(len, FAIL, "H5Oget_comment");
+
+ ret_value = HDstrcmp(dtype_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment_by_name");
+
+ /* Getting the comment on the dataset and verify it */
+ comment_len = H5Oget_comment_by_name(fid, "dataset", NULL, (size_t)0, H5P_DEFAULT);
+ CHECK(comment_len, FAIL, "H5Oget_comment_by_name");
+
+ len = H5Oget_comment_by_name(fid, "dataset", check_comment, (size_t)comment_len + 1, H5P_DEFAULT);
+ CHECK(len, FAIL, "H5Oget_comment_by_name");
+
+ ret_value = HDstrcmp(dset_comment, check_comment);
+ VERIFY(ret_value, 0, "H5Oget_comment_by_name");
+
+ /* Close the IDs */
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_h5o_comment_by_name() */
+#endif
+
+/****************************************************************
+**
+** test_h5o_getinfo_same_file(): Test that querying the object info for
+** objects in the same file will return the same file "number"
+**
+****************************************************************/
+static void
+test_h5o_getinfo_same_file(void)
+{
+ hid_t fid1, fid2; /* HDF5 File ID */
+ hid_t gid1, gid2; /* Group IDs */
+ char filename[1024];
+ H5O_info2_t oinfo1, oinfo2; /* Object info structs */
+ herr_t ret; /* Value returned from API calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Oget_info on objects in same file\n"));
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create two groups in the file */
+ gid1 = H5Gcreate2(fid1, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
+ gid2 = H5Gcreate2(fid1, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gcreate2");
+
+ /* Reset object info */
+ HDmemset(&oinfo1, 0, sizeof(oinfo1));
+ HDmemset(&oinfo2, 0, sizeof(oinfo2));
+
+ /* Query the object info for each object, through group IDs */
+ ret = H5Oget_info3(gid1, &oinfo1, H5O_INFO_BASIC);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ ret = H5Oget_info3(gid2, &oinfo2, H5O_INFO_BASIC);
+ CHECK(ret, FAIL, "H5Oget_info3");
+
+ VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3");
+
+ /* Reset object info */
+ HDmemset(&oinfo1, 0, sizeof(oinfo1));
+ HDmemset(&oinfo2, 0, sizeof(oinfo2));
+
+ /* Query the object info for each object, by name */
+ ret = H5Oget_info_by_name3(fid1, "group1", &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid1, "group2", &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+
+ VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3");
+
+ /* Close everything */
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open file twice */
+ fid1 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+ fid2 = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Open the two groups in the file */
+ gid1 = H5Gopen2(fid1, "group1", H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gopen2");
+ gid2 = H5Gopen2(fid2, "group2", H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gopen2");
+
+ /* Reset object info */
+ HDmemset(&oinfo1, 0, sizeof(oinfo1));
+ HDmemset(&oinfo2, 0, sizeof(oinfo2));
+
+ /* Query the object info for each object, through group IDs */
+ ret = H5Oget_info3(gid1, &oinfo1, H5O_INFO_BASIC);
+ CHECK(ret, FAIL, "H5Oget_info3");
+ ret = H5Oget_info3(gid2, &oinfo2, H5O_INFO_BASIC);
+ CHECK(ret, FAIL, "H5Oget_info3");
+
+ VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3");
+
+ /* Reset object info */
+ HDmemset(&oinfo1, 0, sizeof(oinfo1));
+ HDmemset(&oinfo2, 0, sizeof(oinfo2));
+
+ /* Query the object info for each object, by name */
+ ret = H5Oget_info_by_name3(fid1, "group1", &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(fid1, "group2", &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+
+ VERIFY(oinfo1.fileno, oinfo2.fileno, "file number from H5Oget_info3");
+
+ /* Close everything */
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_h5o_getinfo_same_file() */
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+#if 0
+/****************************************************************
+**
+** test_h5o_open_by_addr_deprec(): Test H5Oopen_by_addr function.
+**
+****************************************************************/
+static void
+test_h5o_open_by_addr_deprec(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t grp, dset, dtype, dspace; /* Object identifiers */
+ char filename[1024];
+ H5L_info1_t li; /* Buffer for H5Lget_info1 */
+ haddr_t grp_addr; /* Addresses for objects */
+ haddr_t dset_addr;
+ haddr_t dtype_addr;
+ hsize_t dims[RANK];
+ H5I_type_t id_type; /* Type of IDs returned from H5Oopen */
+ H5G_info_t ginfo; /* Group info struct */
+ H5T_class_t type_class; /* Class of the datatype */
+ herr_t ret; /* Value returned from API calls */
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create a new HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a group, dataset, and committed datatype within the file */
+ /* Create the group */
+ grp = H5Gcreate2(fid, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp, FAIL, "H5Gcreate2");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Commit the type inside the group */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tcommit2(fid, "group/datatype", dtype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create the data space for the dataset. */
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ dspace = H5Screate_simple(RANK, dims, NULL);
+ CHECK(dspace, FAIL, "H5Screate_simple");
+
+ /* Create the dataset. */
+ dset = H5Dcreate2(fid, "dataset", H5T_NATIVE_INT, dspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Get address for each object */
+ ret = H5Lget_info1(fid, "group", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info");
+ grp_addr = li.u.address;
+ ret = H5Lget_info1(fid, "group/datatype", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info");
+ dtype_addr = li.u.address;
+ ret = H5Lget_info1(fid, "dataset", &li, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info");
+ dset_addr = li.u.address;
+
+ /* Now make sure that H5Oopen_by_addr can open all three types of objects */
+ grp = H5Oopen_by_addr(fid, grp_addr);
+ CHECK(grp, FAIL, "H5Oopen_by_addr");
+ dtype = H5Oopen_by_addr(fid, dtype_addr);
+ CHECK(dtype, FAIL, "H5Oopen_by_addr");
+ /* Check that we can use the group ID as a valid location */
+ dset = H5Oopen_by_addr(grp, dset_addr);
+ CHECK(dset, FAIL, "H5Oopen_by_addr");
+
+ /* Make sure that each is the right kind of ID */
+ id_type = H5Iget_type(grp);
+ VERIFY(id_type, H5I_GROUP, "H5Iget_type for group ID");
+ id_type = H5Iget_type(dtype);
+ VERIFY(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID");
+ id_type = H5Iget_type(dset);
+ VERIFY(id_type, H5I_DATASET, "H5Iget_type for dataset ID");
+
+ /* Do something more complex with each of the IDs to make sure they "work" */
+ ret = H5Gget_info(grp, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, 1, "H5Gget_info"); /* There should be one object, the datatype */
+
+ type_class = H5Tget_class(dtype);
+ VERIFY(type_class, H5T_INTEGER, "H5Tget_class");
+
+ dspace = H5Dget_space(dset);
+ CHECK(dspace, FAIL, "H5Dget_space");
+
+ /* Close the IDs */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Try giving some bogus values to H5O_open_by_addr. */
+ /* Try to open an object with a bad address */
+ grp_addr += 20;
+ H5E_BEGIN_TRY
+ {
+ grp = H5Oopen_by_addr(fid, grp_addr);
+ }
+ H5E_END_TRY
+ VERIFY(grp, FAIL, "H5Oopen_by_addr");
+
+ /* For instance, an objectno smaller than the end of the file's superblock should
+ * trigger an error */
+ grp_addr = 10;
+ H5E_BEGIN_TRY
+ {
+ grp = H5Oopen_by_addr(fid, grp_addr);
+ }
+ H5E_END_TRY
+ VERIFY(grp, FAIL, "H5Oopen_by_addr");
+
+ /* Likewise, an objectno larger than the size of the file should fail */
+ grp_addr = 0;
+ grp_addr = 1000000000;
+ H5E_BEGIN_TRY
+ {
+ grp = H5Oopen_by_addr(fid, grp_addr);
+ }
+ H5E_END_TRY
+ VERIFY(grp, FAIL, "H5Oopen_by_addr");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Also, trying to open an object without a valid location should fail */
+ H5E_BEGIN_TRY
+ {
+ dtype = H5Oopen_by_addr(fid, dtype_addr);
+ }
+ H5E_END_TRY
+ VERIFY(dtype, FAIL, "H5Oopen_by_addr");
+} /* test_h5o_open_by_addr_deprec() */
+
+/****************************************************************
+**
+** visit_obj_cb():
+** This is the callback function invoked by H5Ovisit1() in
+** test_h5o_getinfo_visit():
+** --Verify that the object info returned to the callback
+** function is the same as H5Oget_info2().
+**
+****************************************************************/
+static int
+visit_obj_cb(hid_t group_id, const char *name, const H5O_info1_t *oinfo1, void H5_ATTR_UNUSED *_op_data)
+{
+ H5O_info1_t oinfo2; /* Object info structs */
+
+ /* Verify the object info for "group1", "group2" and the root group */
+ if (!(HDstrcmp(name, "group1"))) {
+ H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_NUM_ATTRS, H5P_DEFAULT);
+ VERIFY(oinfo1->num_attrs, oinfo2.num_attrs, "obj info from H5Ovisit1");
+ }
+ else if (!(HDstrcmp(name, "group2"))) {
+ H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_HDR, H5P_DEFAULT);
+ VERIFY(oinfo1->hdr.nmesgs, oinfo2.hdr.nmesgs, "obj info from H5Ovisit1/H5Oget_info2");
+ VERIFY(oinfo1->hdr.nchunks, oinfo2.hdr.nchunks, "obj info from H5Ovisit1/H5Oget_info2");
+ }
+ else if (!(HDstrcmp(name, "."))) {
+ H5Oget_info_by_name2(group_id, name, &oinfo2, H5O_INFO_META_SIZE, H5P_DEFAULT);
+ VERIFY(oinfo1->meta_size.obj.index_size, oinfo2.meta_size.obj.index_size,
+ "obj info from H5Ovisit1/H5Oget_info2");
+ VERIFY(oinfo1->meta_size.obj.heap_size, oinfo2.meta_size.obj.heap_size,
+ "obj info from H5Ovisit1/H5Oget_info2");
+ }
+
+ return (H5_ITER_CONT);
+} /* end visit_obj_cb() */
+
+/****************************************************************
+**
+** test_h5o_getinfo_visit():
+** Verify that the object info returned via H5Oget_info1()
+** and H5Oget_info2() are the same.
+** Verify that the object info retrieved via H5Ovisit1() is
+** the same as H5Oget_info2().
+**
+****************************************************************/
+static void
+test_h5o_getinfo_visit(void)
+{
+ hid_t fid = -1; /* HDF5 File ID */
+ hid_t gid1 = -1, gid2 = -1; /* Group IDs */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t aid = -1; /* Attribute ID */
+ char filename[1024];
+ H5O_info1_t oinfo1, oinfo2; /* Object info structs */
+ char attrname[25]; /* Attribute name */
+ int j; /* Local index variable */
+ herr_t ret; /* Value returned from API calls */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing info returned by H5Oget_info vs H5Ovisit\n"));
+
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+
+ /* Create an HDF5 file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create "group1" in the file */
+ gid1 = H5Gcreate2(fid, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
+
+ /* Create dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Attach 10 attributes to "group1" */
+ for (j = 0; j < 10; j++) {
+ /* Create the attribute name */
+ HDsnprintf(attrname, sizeof(attrname), "attr%u", j);
+ /* Create the attribute */
+ aid = H5Acreate2(gid1, attrname, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+ /* Close the attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+ }
+
+ /* Create "group2" in the file */
+ gid2 = H5Gcreate2(fid, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gcreate2");
+
+ /* Reset object info */
+ HDmemset(&oinfo1, 0, sizeof(oinfo1));
+ HDmemset(&oinfo2, 0, sizeof(oinfo2));
+
+ /* Query the object info for "group1" via H5Oget_info1 and H5Oget_info2 */
+ ret = H5Oget_info1(gid1, &oinfo1);
+ CHECK(ret, FAIL, "H5Oget_info1");
+ ret = H5Oget_info2(gid1, &oinfo2, H5O_INFO_BASIC | H5O_INFO_NUM_ATTRS);
+ CHECK(ret, FAIL, "H5Oget_info2");
+
+ /* Verify the object info for "group1" is correct */
+ VERIFY(oinfo1.fileno, oinfo2.fileno, "obj info from H5Oget_info1/2");
+ VERIFY(oinfo1.num_attrs, oinfo2.num_attrs, "obj info from H5Oget_info1/2");
+
+ /* Reset object info */
+ HDmemset(&oinfo1, 0, sizeof(oinfo1));
+ HDmemset(&oinfo2, 0, sizeof(oinfo2));
+
+ /* Query the object info for "group2" via H5Oget_info1 and H5Oget_info2 */
+ ret = H5Oget_info_by_name1(fid, "group2", &oinfo1, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+ ret = H5Oget_info_by_name2(fid, "group2", &oinfo2, H5O_INFO_HDR | H5O_INFO_META_SIZE, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+
+ /* Verify the object info for "group2" is correct */
+ VERIFY(oinfo1.hdr.nmesgs, oinfo2.hdr.nmesgs, "obj info from H5Oget_info1/2");
+ VERIFY(oinfo1.hdr.nchunks, oinfo2.hdr.nchunks, "obj info from H5Oget_info1/2");
+ VERIFY(oinfo1.meta_size.obj.index_size, oinfo2.meta_size.obj.index_size, "obj info from H5Oget_info1/2");
+ VERIFY(oinfo1.meta_size.obj.heap_size, oinfo2.meta_size.obj.heap_size, "obj info from H5Oget_info1/2");
+
+ /* Close everything */
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Verify the object info returned to the callback function is correct */
+ ret = H5Ovisit1(fid, H5_INDEX_NAME, H5_ITER_INC, visit_obj_cb, NULL);
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_h5o_getinfo_visit() */
+#endif
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+/****************************************************************
+**
+** test_h5o(): Main H5O (generic object) testing routine.
+**
+****************************************************************/
+void
+test_h5o(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Objects\n"));
+
+ test_h5o_open(); /* Test generic open function */
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ test_h5o_open_by_addr(); /* Test opening objects by address */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+ test_h5o_open_by_token(); /* Test opening objects by token */
+ test_h5o_close(); /* Test generic close function */
+ test_h5o_refcount(); /* Test incrementing and decrementing reference count */
+ test_h5o_plist(); /* Test object creation properties */
+ test_h5o_link(); /* Test object link routine */
+#if 0
+ test_h5o_comment(); /* Test routines for comment */
+ test_h5o_comment_by_name(); /* Test routines for comment by name */
+#endif
+ test_h5o_getinfo_same_file(); /* Test info for objects in the same file */
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+#if 0
+ test_h5o_open_by_addr_deprec(); /* Test opening objects by address with H5Lget_info1 */
+ test_h5o_getinfo_visit(); /* Test object info for H5Oget_info1/2 and H5Ovisit1 */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+} /* test_h5o() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_h5o
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: James Laird
+ * June 3, 2006
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_h5o(void)
+{
+ char filename[1024];
+
+ H5E_BEGIN_TRY
+ {
+ h5_fixname(TEST_FILENAME, H5P_DEFAULT, filename, sizeof filename);
+ H5Fdelete(filename, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+}
diff --git a/test/API/th5s.c b/test/API/th5s.c
new file mode 100644
index 0000000..cb1c899
--- /dev/null
+++ b/test/API/th5s.c
@@ -0,0 +1,3538 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: th5s
+ *
+ * Test the dataspace functionality
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+/* #include "H5srcdir.h" */
+
+/* #include "H5Iprivate.h" */
+/* #include "H5Pprivate.h" */
+
+#if 0
+/*
+ * This file needs to access private information from the H5S package.
+ * This file also needs to access the dataspace testing code.
+ */
+#define H5S_FRIEND /*suppress error about including H5Spkg */
+#define H5S_TESTING /*suppress warning about H5S testing funcs*/
+#include "H5Spkg.h" /* Dataspaces */
+
+/*
+ * This file needs to access private information from the H5O package.
+ * This file also needs to access the dataspace testing code.
+ */
+#define H5O_FRIEND /*suppress error about including H5Opkg */
+#define H5O_TESTING
+#include "H5Opkg.h" /* Object header */
+#endif
+
+#define TESTFILE "th5s.h5"
+#define DATAFILE "th5s1.h5"
+#define NULLFILE "th5s2.h5"
+#define BASICFILE "th5s3.h5"
+#define ZEROFILE "th5s4.h5"
+#define BASICDATASET "basic_dataset"
+#define BASICDATASET1 "basic_dataset1"
+#define BASICDATASET2 "basic_dataset2"
+#define BASICDATASET3 "basic_dataset3"
+#define BASICDATASET4 "basic_dataset4"
+#define BASICATTR "basic_attribute"
+#define NULLDATASET "null_dataset"
+#define NULLATTR "null_attribute"
+#define EXTFILE_NAME "ext_file"
+
+/* 3-D dataset with fixed dimensions */
+#define SPACE1_RANK 3
+#define SPACE1_DIM1 3
+#define SPACE1_DIM2 15
+#define SPACE1_DIM3 13
+
+/* 4-D dataset with one unlimited dimension */
+#define SPACE2_RANK 4
+#define SPACE2_DIM1 0
+#define SPACE2_DIM2 15
+#define SPACE2_DIM3 13
+#define SPACE2_DIM4 23
+#define SPACE2_MAX1 H5S_UNLIMITED
+#define SPACE2_MAX2 15
+#define SPACE2_MAX3 13
+#define SPACE2_MAX4 23
+
+/* Scalar dataset with simple datatype */
+#define SPACE3_RANK 0
+unsigned space3_data = 65;
+
+/* Scalar dataset with compound datatype */
+#define SPACE4_FIELDNAME1 "c1"
+#define SPACE4_FIELDNAME2 "u"
+#define SPACE4_FIELDNAME3 "f"
+#define SPACE4_FIELDNAME4 "c2"
+size_t space4_field1_off = 0;
+size_t space4_field2_off = 0;
+size_t space4_field3_off = 0;
+size_t space4_field4_off = 0;
+struct space4_struct {
+ char c1;
+ unsigned u;
+ float f;
+ char c2;
+} space4_data = {'v', 987123, -3.14F, 'g'}; /* Test data for 4th dataspace */
+
+/*
+ * Testing configuration defines used by:
+ * test_h5s_encode_regular_hyper()
+ * test_h5s_encode_irregular_hyper()
+ * test_h5s_encode_points()
+ */
+#define CONFIG_8 1
+#define CONFIG_16 2
+#define CONFIG_32 3
+#define POWER8 256 /* 2^8 */
+#define POWER16 65536 /* 2^16 */
+#define POWER32 4294967296 /* 2^32 */
+
+/****************************************************************
+**
+** test_h5s_basic(): Test basic H5S (dataspace) code.
+**
+****************************************************************/
+static void
+test_h5s_basic(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t dset1; /* Dataset ID */
+#ifndef NO_VALIDATE_DATASPACE
+ hid_t aid1; /* Attribute ID */
+#endif
+ int rank; /* Logical rank of dataspace */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2, SPACE2_DIM3, SPACE2_DIM4};
+ hsize_t dims3[H5S_MAX_RANK + 1];
+ hsize_t max2[] = {SPACE2_MAX1, SPACE2_MAX2, SPACE2_MAX3, SPACE2_MAX4};
+ hsize_t tdims[4]; /* Dimension array to test with */
+ hsize_t tmax[4];
+ hssize_t n; /* Number of dataspace elements */
+#if 0
+ hbool_t driver_is_default_compatible;
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataspace Manipulation\n"));
+
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, max2);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ n = H5Sget_simple_extent_npoints(sid1);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints");
+
+ rank = H5Sget_simple_extent_ndims(sid1);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims");
+
+ rank = H5Sget_simple_extent_dims(sid1, tdims, NULL);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_dims");
+ VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims");
+
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, max2);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ n = H5Sget_simple_extent_npoints(sid2);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, SPACE2_DIM1 * SPACE2_DIM2 * SPACE2_DIM3 * SPACE2_DIM4, "H5Sget_simple_extent_npoints");
+
+ rank = H5Sget_simple_extent_ndims(sid2);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE2_RANK, "H5Sget_simple_extent_ndims");
+
+ rank = H5Sget_simple_extent_dims(sid2, tdims, tmax);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_dims");
+ VERIFY(HDmemcmp(tdims, dims2, SPACE2_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims");
+ VERIFY(HDmemcmp(tmax, max2, SPACE2_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims");
+
+ /* Change max dims to be equal to the dimensions */
+ ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL);
+ CHECK(ret, FAIL, "H5Sset_extent_simple");
+ rank = H5Sget_simple_extent_dims(sid1, tdims, tmax);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_dims");
+ VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims");
+ VERIFY(HDmemcmp(tmax, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims");
+
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /*
+ * Check to be sure we can't create a simple dataspace that has too many
+ * dimensions.
+ */
+ H5E_BEGIN_TRY
+ {
+ sid1 = H5Screate_simple(H5S_MAX_RANK + 1, dims3, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(sid1, FAIL, "H5Screate_simple");
+#if 0
+ /*
+ * Try reading a file that has been prepared that has a dataset with a
+ * higher dimensionality than what the library can handle.
+ *
+ * If this test fails and the H5S_MAX_RANK variable has changed, follow
+ * the instructions in space_overflow.c for regenerating the th5s.h5 file.
+ */
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK_I(ret, "h5_driver_is_default_vfd_compatible");
+
+ if (driver_is_default_compatible) {
+ const char *testfile = H5_get_srcdir_filename(TESTFILE); /* Corrected test file name */
+
+ fid1 = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK_I(fid1, "H5Fopen");
+ if (fid1 >= 0) {
+ dset1 = H5Dopen2(fid1, "dset", H5P_DEFAULT);
+ VERIFY(dset1, FAIL, "H5Dopen2");
+ ret = H5Fclose(fid1);
+ CHECK_I(ret, "H5Fclose");
+ }
+ else
+ HDprintf("***cannot open the pre-created H5S_MAX_RANK test file (%s)\n", testfile);
+ }
+#endif
+ /* Verify that incorrect dimensions don't work */
+ dims1[0] = H5S_UNLIMITED;
+ H5E_BEGIN_TRY
+ {
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(sid1, FAIL, "H5Screate_simple");
+
+ dims1[0] = H5S_UNLIMITED;
+ sid1 = H5Screate(H5S_SIMPLE);
+ CHECK(sid1, FAIL, "H5Screate");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sset_extent_simple");
+
+ ret = H5Sclose(sid1);
+ CHECK_I(ret, "H5Sclose");
+
+ /*
+ * Try writing simple dataspaces without setting their extents
+ */
+ /* Create the file */
+ fid1 = H5Fcreate(BASICFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ dims1[0] = SPACE1_DIM1;
+
+ sid1 = H5Screate(H5S_SIMPLE);
+ CHECK(sid1, FAIL, "H5Screate");
+ sid2 = H5Screate_simple(1, dims1, dims1);
+ CHECK(sid2, FAIL, "H5Screate");
+#ifndef NO_VALIDATE_DATASPACE
+ /* This dataset's space has no extent; it should not be created */
+ H5E_BEGIN_TRY
+ {
+ dset1 = H5Dcreate2(fid1, BASICDATASET, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY
+ VERIFY(dset1, FAIL, "H5Dcreate2");
+#endif
+ dset1 = H5Dcreate2(fid1, BASICDATASET2, H5T_NATIVE_INT, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+
+ /* Try some writes with the bad dataspace (sid1) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, &n);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Dwrite");
+#ifndef NO_VALIDATE_DATASPACE
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &n);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Dwrite");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, sid1, H5P_DEFAULT, &n);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Dwrite");
+#endif
+ /* Try to iterate using the bad dataspace */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Diterate(&n, H5T_NATIVE_INT, sid1, NULL, NULL);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Diterate");
+
+ /* Try to fill using the bad dataspace */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dfill(NULL, H5T_NATIVE_INT, &n, H5T_NATIVE_INT, sid1);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Dfill");
+#ifndef NO_VALIDATE_DATASPACE
+ /* Now use the bad dataspace as the space for an attribute */
+ H5E_BEGIN_TRY
+ {
+ aid1 = H5Acreate2(dset1, BASICATTR, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY
+ VERIFY(aid1, FAIL, "H5Acreate2");
+#endif
+ /* Make sure that dataspace reads using the bad dataspace fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, &n);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Dread");
+#ifndef NO_VALIDATE_DATASPACE
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &n);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Dread");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, sid1, H5P_DEFAULT, &n);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Dread");
+#endif
+ /* Clean up */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_h5s_basic() */
+
+/****************************************************************
+**
+** test_h5s_null(): Test NULL dataspace
+**
+****************************************************************/
+static void
+test_h5s_null(void)
+{
+ hid_t fid; /* File ID */
+ hid_t sid; /* Dataspace IDs */
+ hid_t dset_sid, dset_sid2; /* Dataspace IDs */
+ hid_t attr_sid; /* Dataspace IDs */
+ hid_t did; /* Dataset ID */
+ hid_t attr; /*Attribute ID */
+ H5S_class_t stype; /* dataspace type */
+ hssize_t nelem; /* Number of elements */
+ unsigned uval = 2; /* Buffer for writing to dataset */
+ int val = 1; /* Buffer for writing to attribute */
+ H5S_sel_type sel_type; /* Type of selection currently */
+ hsize_t dims[1] = {10}; /* Dimensions for converting null dataspace to simple */
+ H5S_class_t space_type; /* Type of dataspace */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Null Dataspace\n"));
+
+ /* Create the file */
+ fid = H5Fcreate(NULLFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ sid = H5Screate(H5S_NULL);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Check that the null dataspace actually has 0 elements */
+ nelem = H5Sget_simple_extent_npoints(sid);
+ VERIFY(nelem, 0, "H5Sget_simple_extent_npoints");
+
+ /* Check that the dataspace was created with an "all" selection */
+ sel_type = H5Sget_select_type(sid);
+ VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type");
+
+ /* Check that the null dataspace has 0 elements selected */
+ nelem = H5Sget_select_npoints(sid);
+ VERIFY(nelem, 0, "H5Sget_select_npoints");
+
+ /* Change to "none" selection */
+ ret = H5Sselect_none(sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Check that the null dataspace has 0 elements selected */
+ nelem = H5Sget_select_npoints(sid);
+ VERIFY(nelem, 0, "H5Sget_select_npoints");
+
+ /* Check to be sure we can't set a hyperslab selection on a null dataspace */
+ H5E_BEGIN_TRY
+ {
+ hsize_t start[1] = {0};
+ hsize_t count[1] = {0};
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Check to be sure we can't set a point selection on a null dataspace */
+ H5E_BEGIN_TRY
+ {
+ hsize_t coord[1][1]; /* Coordinates for point selection */
+
+ coord[0][0] = 0;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sselect_elements");
+
+ /* Create first dataset */
+ did = H5Dcreate2(fid, NULLDATASET, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Write "nothing" to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Write "nothing" to the dataset (with type conversion :-) */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval);
+ CHECK(ret, FAIL, "H5Dread");
+ VERIFY(uval, 2, "H5Dread");
+
+ /* Try reading from the dataset (with type conversion :-) (make certain our buffer is unmodified) */
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &val);
+ CHECK(ret, FAIL, "H5Dread");
+ VERIFY(val, 1, "H5Dread");
+
+ /* Create an attribute for the group */
+ attr = H5Acreate2(did, NULLATTR, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write "nothing" to the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, &val);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Write "nothing" to the attribute (with type conversion :-) */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, &uval);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Try reading from the attribute (make certain our buffer is unmodified) */
+ ret = H5Aread(attr, H5T_NATIVE_INT, &val);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(val, 1, "H5Aread");
+
+ /* Try reading from the attribute (with type conversion :-) (make certain our buffer is unmodified) */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, &uval);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(uval, 2, "H5Aread");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Verify that we've got the right kind of dataspace */
+ space_type = H5Sget_simple_extent_type(sid);
+ VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type");
+
+ /* Convert the null dataspace to a simple dataspace */
+ ret = H5Sset_extent_simple(sid, 1, dims, NULL);
+ CHECK(ret, FAIL, "H5Sset_extent_simple");
+
+ /* Verify that we've got the right kind of dataspace now */
+ space_type = H5Sget_simple_extent_type(sid);
+ VERIFY(space_type, H5S_SIMPLE, "H5Sget_simple_extent_type");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*============================================
+ * Reopen the file to check the dataspace
+ *============================================
+ */
+ fid = H5Fopen(NULLFILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Reopen the dataset */
+ did = H5Dopen2(fid, NULLDATASET, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Get the space of the dataset */
+ dset_sid = H5Dget_space(did);
+ CHECK(dset_sid, FAIL, "H5Dget_space");
+
+ /* Query the NULL dataspace */
+ dset_sid2 = H5Scopy(dset_sid);
+ CHECK(dset_sid2, FAIL, "H5Scopy");
+
+ /* Verify the class type of dataspace */
+ stype = H5Sget_simple_extent_type(dset_sid2);
+ VERIFY(stype, H5S_NULL, "H5Sget_simple_extent_type");
+
+ /* Verify there is zero element in the dataspace */
+ ret = (herr_t)H5Sget_simple_extent_npoints(dset_sid2);
+ VERIFY(ret, 0, "H5Sget_simple_extent_npoints");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(did, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &uval);
+ CHECK(ret, FAIL, "H5Dread");
+ VERIFY(uval, 2, "H5Dread");
+
+ /* Close the dataspace */
+ ret = H5Sclose(dset_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(dset_sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Open the attribute for the dataset */
+ attr = H5Aopen(did, NULLATTR, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Get the space of the dataset */
+ attr_sid = H5Aget_space(attr);
+ CHECK(attr_sid, FAIL, "H5Aget_space");
+
+ /* Verify the class type of dataspace */
+ stype = H5Sget_simple_extent_type(attr_sid);
+ VERIFY(stype, H5S_NULL, "H5Sget_simple_extent_type");
+
+ /* Verify there is zero element in the dataspace */
+ ret = (herr_t)H5Sget_simple_extent_npoints(attr_sid);
+ VERIFY(ret, 0, "H5Sget_simple_extent_npoints");
+
+ /* Close the dataspace */
+ ret = H5Sclose(attr_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Try reading from the attribute (make certain our buffer is unmodified) */
+ ret = H5Aread(attr, H5T_NATIVE_INT, &val);
+ CHECK(ret, FAIL, "H5Aread");
+ VERIFY(val, 1, "H5Aread");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_h5s_null() */
+
+/****************************************************************
+**
+** test_h5s_zero_dim(): Test the code for dataspace with zero dimension size
+**
+****************************************************************/
+static void
+test_h5s_zero_dim(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t sid1, attr_sid; /* Dataspace ID */
+ hid_t sid_chunk; /* Dataspace ID for chunked dataset */
+ hid_t dset1; /* Dataset ID */
+ hid_t plist_id; /* Dataset creation property list */
+ hid_t attr; /* Attribute ID */
+ int rank; /* Logical rank of dataspace */
+ hsize_t dims1[] = {0, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t max_dims[] = {SPACE1_DIM1 + 1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t extend_dims[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t chunk_dims[] = {SPACE1_DIM1, SPACE1_DIM2 / 3, SPACE1_DIM3};
+ hsize_t tdims[SPACE1_RANK]; /* Dimension array to test with */
+ int wdata[SPACE1_DIM2][SPACE1_DIM3];
+ int rdata[SPACE1_DIM2][SPACE1_DIM3];
+ short wdata_short[SPACE1_DIM2][SPACE1_DIM3];
+ short rdata_short[SPACE1_DIM2][SPACE1_DIM3];
+ int wdata_real[SPACE1_DIM1][SPACE1_DIM2][SPACE1_DIM3];
+ int rdata_real[SPACE1_DIM1][SPACE1_DIM2][SPACE1_DIM3];
+#ifndef NO_CHECK_SELECTION_BOUNDS
+ int val = 3;
+ hsize_t start[] = {0, 0, 0};
+ hsize_t count[] = {3, 15, 13};
+ hsize_t coord[1][3]; /* Coordinates for point selection */
+#endif
+ hssize_t nelem; /* Number of elements */
+ H5S_sel_type sel_type; /* Type of selection currently */
+ H5S_class_t stype; /* dataspace type */
+ H5D_alloc_time_t alloc_time; /* Space allocation time */
+ herr_t ret; /* Generic return value */
+ unsigned int i, j, k;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataspace with zero dimension size\n"));
+
+ /* Initialize the data */
+ for (i = 0; i < SPACE1_DIM2; i++)
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ wdata[i][j] = (int)(i + j);
+ rdata[i][j] = 7;
+ wdata_short[i][j] = (short)(i + j);
+ rdata_short[i][j] = 7;
+ }
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ for (j = 0; j < SPACE1_DIM2; j++)
+ for (k = 0; k < SPACE1_DIM3; k++)
+ wdata_real[i][j][k] = (int)(i + j + k);
+
+ /* Test with different space allocation times */
+ for (alloc_time = H5D_ALLOC_TIME_EARLY; alloc_time <= H5D_ALLOC_TIME_INCR; alloc_time++) {
+
+ /* Make sure we can create the space with the dimension size 0 (starting from v1.8.7).
+ * The dimension doesn't need to be unlimited. */
+ dims1[0] = 0;
+ dims1[1] = SPACE1_DIM2;
+ dims1[2] = SPACE1_DIM3;
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ sid1 = H5Screate(H5S_SIMPLE);
+ CHECK(sid1, FAIL, "H5Screate");
+
+ /* SID1 has the 1st dimension size as zero. The maximal dimension will be
+ * the same as the dimension because of the NULL passed in. */
+ ret = H5Sset_extent_simple(sid1, SPACE1_RANK, dims1, NULL);
+ CHECK(ret, FAIL, "H5Sset_extent_simple");
+
+ /* Check that the dataspace actually has 0 elements */
+ nelem = H5Sget_simple_extent_npoints(sid1);
+ VERIFY(nelem, 0, "H5Sget_simple_extent_npoints");
+
+ /* Check that the dataspace was created with an "all" selection */
+ sel_type = H5Sget_select_type(sid1);
+ VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type");
+
+ /* Check that the dataspace has 0 elements selected */
+ nelem = H5Sget_select_npoints(sid1);
+ VERIFY(nelem, 0, "H5Sget_select_npoints");
+
+ /* Change to "none" selection */
+ ret = H5Sselect_none(sid1);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Check that the dataspace has 0 elements selected */
+ nelem = H5Sget_select_npoints(sid1);
+ VERIFY(nelem, 0, "H5Sget_select_npoints");
+
+ /* Try to select all dataspace */
+ ret = H5Sselect_all(sid1);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ /* Check that the dataspace has 0 elements selected */
+ nelem = H5Sget_select_npoints(sid1);
+ VERIFY(nelem, 0, "H5Sget_select_npoints");
+
+ /* Create the dataspace for chunked dataset with the first dimension size as zero.
+ * The maximal dimensions are bigger than the dimensions for later expansion. */
+ sid_chunk = H5Screate_simple(SPACE1_RANK, dims1, max_dims);
+ CHECK(sid_chunk, FAIL, "H5Screate_simple");
+
+ /*============================================
+ * Make sure we can use 0-dimension to create
+ * contiguous, chunked, compact, and external
+ * datasets, and also attribute.
+ *============================================
+ */
+ fid1 = H5Fcreate(ZEROFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /*===================== Contiguous dataset =======================*/
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plist_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_alloc_time(plist_id, alloc_time);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ dset1 = H5Dcreate2(fid1, BASICDATASET, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+
+ ret = H5Pclose(plist_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Write "nothing" to the dataset */
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++) {
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]);
+ }
+ }
+ }
+
+ /* Write "nothing" to the dataset (with type conversion :-) */
+ ret = H5Dwrite(dset1, H5T_NATIVE_SHORT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_short);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata_short);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++) {
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata_short[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]);
+ }
+ }
+ }
+#ifndef NO_CHECK_SELECTION_BOUNDS
+ /* Select a hyperslab beyond its current dimension sizes, then try to write
+ * the data. It should fail. */
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, wdata);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dwrite");
+#endif
+ /* Change to "none" selection */
+ ret = H5Sselect_none(sid1);
+ CHECK(ret, FAIL, "H5Sselect_none");
+#ifndef NO_CHECK_SELECTION_BOUNDS
+ /* Select a point beyond the dimension size, then try to write the data.
+ * It should fail. */
+ coord[0][0] = 2;
+ coord[0][1] = 5;
+ coord[0][2] = 3;
+ ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, sid1, H5P_DEFAULT, &val);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dwrite");
+#endif
+ /* Restore the selection to all */
+ ret = H5Sselect_all(sid1);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /*=================== Chunked dataset ====================*/
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plist_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_chunk(plist_id, SPACE1_RANK, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* ret = H5Pset_alloc_time(plist_id, alloc_time); */
+ /* CHECK(ret, FAIL, "H5Pset_alloc_time"); */
+
+ dset1 =
+ H5Dcreate2(fid1, BASICDATASET1, H5T_NATIVE_INT, sid_chunk, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+
+ /* Write "nothing" to the dataset */
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++)
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]);
+ }
+ }
+
+ /* Now extend the dataset to SPACE1_DIM1*SPACE1_DIM2*SPACE1_DIM3 and make sure
+ * we can write data to it */
+ extend_dims[0] = SPACE1_DIM1;
+ ret = H5Dset_extent(dset1, extend_dims);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_real);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_real);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ for (j = 0; j < SPACE1_DIM2; j++) {
+ for (k = 0; k < SPACE1_DIM3; k++) {
+ if (rdata_real[i][j][k] != wdata_real[i][j][k]) {
+ H5_FAILED();
+ HDprintf("element [%d][%d][%d] is %d but should have been %d\n", i, j, k,
+ rdata_real[i][j][k], wdata_real[i][j][k]);
+ }
+ }
+ }
+ }
+
+ /* Now shrink the first dimension size of the dataset to 0 and make sure no data is in it */
+ extend_dims[0] = 0;
+ ret = H5Dset_extent(dset1, extend_dims);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++)
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]);
+ }
+ }
+#ifndef NO_CHECK_SELECTION_BOUNDS
+ /* Now extend the first dimension size of the dataset to SPACE1_DIM1*3 past the maximal size.
+ * It is supposed to fail. */
+ extend_dims[0] = SPACE1_DIM1 * 3;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dset1, extend_dims);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dset_extent");
+#endif
+ ret = H5Pclose(plist_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /*=================== Compact dataset =====================*/
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plist_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(plist_id, H5D_COMPACT);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Don't set the allocation time for compact storage datasets (must be early) */
+
+ dset1 = H5Dcreate2(fid1, BASICDATASET2, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+
+ /* Write "nothing" to the dataset */
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++)
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]);
+ }
+ }
+
+ ret = H5Pclose(plist_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /*=========== Contiguous dataset with external storage ============*/
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plist_id, FAIL, "H5Pcreate");
+
+ /* Change the DCPL for contiguous layout with external storage. The size of the reserved
+ * space in the external file is the size of the dataset (zero because one dimension size is zero).
+ * There's no need to clean up the external file since the library doesn't create it
+ * until the data is written to it. */
+ ret = H5Pset_external(plist_id, EXTFILE_NAME, (off_t)0, (hsize_t)0);
+ CHECK(ret, FAIL, "H5Pset_external");
+
+ ret = H5Pset_alloc_time(plist_id, alloc_time);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ dset1 = H5Dcreate2(fid1, BASICDATASET3, H5T_NATIVE_INT, sid1, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+
+ /* Write "nothing" to the dataset */
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(dset1, H5T_NATIVE_INT, sid1, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++) {
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]);
+ }
+ }
+ }
+
+ ret = H5Pclose(plist_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /*=============== Create an attribute for the file ================*/
+ attr = H5Acreate2(fid1, NULLATTR, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Acreate2");
+
+ /* Write "nothing" to the attribute */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, wdata);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Try reading from the attribute (make certain our buffer is unmodified) */
+ ret = H5Aread(attr, H5T_NATIVE_INT, rdata);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++) {
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]);
+ }
+ }
+ }
+
+ /* Write "nothing" to the attribute (with type conversion :-) */
+ ret = H5Awrite(attr, H5T_NATIVE_SHORT, wdata_short);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Try reading from the attribute (with type conversion :-) (make certain our buffer is unmodified) */
+ ret = H5Aread(attr, H5T_NATIVE_SHORT, rdata_short);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++) {
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata_short[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]);
+ }
+ }
+ }
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /*===============================================================
+ * Extend the dimension to make it a normal dataspace (3x15x13).
+ * Verify that data can be written to and read from the chunked
+ * dataset now.
+ *===============================================================
+ */
+ dims1[0] = SPACE1_DIM1;
+ ret = H5Sset_extent_simple(sid_chunk, SPACE1_RANK, dims1, max_dims);
+ CHECK(ret, FAIL, "H5Sset_extent_simple");
+
+ nelem = H5Sget_simple_extent_npoints(sid_chunk);
+ CHECK(nelem, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(nelem, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints");
+
+ rank = H5Sget_simple_extent_ndims(sid_chunk);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims");
+
+ rank = H5Sget_simple_extent_dims(sid_chunk, tdims, NULL);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_dims");
+ VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims");
+
+ /* Set it to chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plist_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_chunk(plist_id, SPACE1_RANK, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ ret = H5Pset_alloc_time(plist_id, alloc_time);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ dset1 =
+ H5Dcreate2(fid1, BASICDATASET4, H5T_NATIVE_INT, sid_chunk, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata_real);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_real);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ for (j = 0; j < SPACE1_DIM2; j++) {
+ for (k = 0; k < SPACE1_DIM3; k++) {
+ if (rdata_real[i][j][k] != wdata_real[i][j][k]) {
+ H5_FAILED();
+ HDprintf("element [%d][%d][%d] is %d but should have been %d\n", i, j, k,
+ rdata_real[i][j][k], wdata_real[i][j][k]);
+ }
+ }
+ }
+ }
+
+ ret = H5Pclose(plist_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Change the dimensions to make them zero size again (0x0x0). Verify that
+ * no element is in the dataspace. */
+ dims1[0] = dims1[1] = dims1[2] = 0;
+ ret = H5Sset_extent_simple(sid_chunk, SPACE1_RANK, dims1, NULL);
+ CHECK(ret, FAIL, "H5Sset_extent_simple");
+
+ /* Check that the dataspace actually has 0 elements */
+ nelem = H5Sget_simple_extent_npoints(sid_chunk);
+ VERIFY(nelem, 0, "H5Sget_simple_extent_npoints");
+
+ /* Check that the dataspace was created with an "all" selection */
+ sel_type = H5Sget_select_type(sid_chunk);
+ VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type");
+
+ /* Check that the dataspace has 0 elements selected */
+ nelem = H5Sget_select_npoints(sid_chunk);
+ VERIFY(nelem, 0, "H5Sget_select_npoints");
+
+ /* Change to "none" selection */
+ ret = H5Sselect_none(sid_chunk);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Check that the dataspace has 0 elements selected */
+ nelem = H5Sget_select_npoints(sid_chunk);
+ VERIFY(nelem, 0, "H5Sget_select_npoints");
+
+ ret = H5Sclose(sid_chunk);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*============================================
+ * Reopen the file to check the dataspace
+ *============================================
+ */
+ fid1 = H5Fopen(ZEROFILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Reopen the chunked dataset */
+ dset1 = H5Dopen2(fid1, BASICDATASET1, H5P_DEFAULT);
+ CHECK(dset1, FAIL, "H5Dopen2");
+
+ /* Get the space of the dataset and query it */
+ sid1 = H5Dget_space(dset1);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Verify the class type of dataspace */
+ stype = H5Sget_simple_extent_type(sid1);
+ VERIFY(stype, H5S_SIMPLE, "H5Sget_simple_extent_type");
+
+ /* Verify there is zero element in the dataspace */
+ nelem = H5Sget_simple_extent_npoints(sid1);
+ VERIFY(nelem, 0, "H5Sget_simple_extent_npoints");
+
+ /* Verify the dimension sizes are correct */
+ rank = H5Sget_simple_extent_dims(sid1, tdims, NULL);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_dims");
+ VERIFY(tdims[0], 0, "H5Sget_simple_extent_dims");
+ VERIFY(tdims[1], SPACE1_DIM2, "H5Sget_simple_extent_dims");
+ VERIFY(tdims[2], SPACE1_DIM3, "H5Sget_simple_extent_dims");
+
+ /* Try reading from the dataset (make certain our buffer is unmodified) */
+ ret = H5Dread(dset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++) {
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata[i][j]);
+ }
+ }
+ }
+
+ /* Close the dataset and its dataspace */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Open the attribute for the file */
+ attr = H5Aopen(fid1, NULLATTR, H5P_DEFAULT);
+ CHECK(attr, FAIL, "H5Aopen");
+
+ /* Get the space of the dataset */
+ attr_sid = H5Aget_space(attr);
+ CHECK(attr_sid, FAIL, "H5Aget_space");
+
+ /* Verify the class type of dataspace */
+ stype = H5Sget_simple_extent_type(attr_sid);
+ VERIFY(stype, H5S_SIMPLE, "H5Sget_simple_extent_type");
+
+ /* Verify there is zero element in the dataspace */
+ nelem = H5Sget_simple_extent_npoints(attr_sid);
+ VERIFY(nelem, 0, "H5Sget_simple_extent_npoints");
+
+ /* Try reading from the attribute (make certain our buffer is unmodified) */
+ ret = H5Aread(attr, H5T_NATIVE_SHORT, rdata_short);
+ CHECK(ret, FAIL, "H5Aread");
+
+ /* Check results */
+ for (i = 0; i < SPACE1_DIM2; i++) {
+ for (j = 0; j < SPACE1_DIM3; j++) {
+ if (rdata_short[i][j] != 7) {
+ H5_FAILED();
+ HDprintf("element [%d][%d] is %d but should have been 7\n", i, j, rdata_short[i][j]);
+ }
+ }
+ }
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(attr_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+} /* test_h5s_zero_dim() */
+
+/****************************************************************
+**
+** test_h5s_encode(): Test H5S (dataspace) encoding and decoding.
+**
+** Note: See "RFC: H5Sencode/H5Sdecode Format Change".
+**
+****************************************************************/
+static void
+test_h5s_encode(H5F_libver_t low, H5F_libver_t high)
+{
+ hid_t sid1, sid2, sid3; /* Dataspace ID */
+ hid_t decoded_sid1, decoded_sid2, decoded_sid3;
+ int rank; /* Logical rank of dataspace */
+ hid_t fapl = -1; /* File access property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ size_t sbuf_size = 0, null_size = 0, scalar_size = 0;
+ unsigned char *sbuf = NULL, *null_sbuf = NULL, *scalar_buf = NULL;
+ hsize_t tdims[4]; /* Dimension array to test with */
+ hssize_t n; /* Number of dataspace elements */
+ hsize_t start[] = {0, 0, 0};
+ hsize_t stride[] = {2, 5, 3};
+ hsize_t count[] = {2, 2, 2};
+ hsize_t block[] = {1, 3, 1};
+ H5S_sel_type sel_type;
+ H5S_class_t space_type;
+ hssize_t nblocks;
+ hid_t ret_id; /* Generic hid_t return value */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataspace Encoding and Decoding\n"));
+
+ /*-------------------------------------------------------------------------
+ * Test encoding and decoding of simple dataspace and hyperslab selection.
+ *-------------------------------------------------------------------------
+ */
+
+ /* Create the file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Set low/high bounds in the fapl */
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create the dataspace */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Encode simple dataspace in a buffer with the fapl setting */
+ ret = H5Sencode2(sid1, NULL, &sbuf_size, fapl);
+ CHECK(ret, FAIL, "H5Sencode2");
+
+ if (sbuf_size > 0) {
+ sbuf = (unsigned char *)HDcalloc((size_t)1, sbuf_size);
+ CHECK_PTR(sbuf, "HDcalloc");
+ }
+
+ /* Try decoding bogus buffer */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Sdecode(sbuf);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Sdecode");
+
+ /* Encode the simple dataspace in a buffer with the fapl setting */
+ ret = H5Sencode2(sid1, sbuf, &sbuf_size, fapl);
+ CHECK(ret, FAIL, "H5Sencode");
+
+ /* Decode from the dataspace buffer and return an object handle */
+ decoded_sid1 = H5Sdecode(sbuf);
+ CHECK(decoded_sid1, FAIL, "H5Sdecode");
+
+ /* Verify the decoded dataspace */
+ n = H5Sget_simple_extent_npoints(decoded_sid1);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints");
+
+ /* Retrieve and verify the dataspace rank */
+ rank = H5Sget_simple_extent_ndims(decoded_sid1);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims");
+
+ /* Retrieve and verify the dataspace dimensions */
+ rank = H5Sget_simple_extent_dims(decoded_sid1, tdims, NULL);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_dims");
+ VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims");
+
+ /* Verify the type of dataspace selection */
+ sel_type = H5Sget_select_type(decoded_sid1);
+ VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type");
+
+ /* Verify the number of hyperslab blocks */
+ nblocks = H5Sget_select_hyper_nblocks(decoded_sid1);
+ VERIFY(nblocks, 2 * 2 * 2, "H5Sget_select_hyper_nblocks");
+
+ /* Close the dataspaces */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(decoded_sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /*-------------------------------------------------------------------------
+ * Test encoding and decoding of null dataspace.
+ *-------------------------------------------------------------------------
+ */
+ sid2 = H5Screate(H5S_NULL);
+ CHECK(sid2, FAIL, "H5Screate");
+
+ /* Encode null dataspace in a buffer */
+ ret = H5Sencode2(sid2, NULL, &null_size, fapl);
+ CHECK(ret, FAIL, "H5Sencode");
+
+ if (null_size > 0) {
+ null_sbuf = (unsigned char *)HDcalloc((size_t)1, null_size);
+ CHECK_PTR(null_sbuf, "HDcalloc");
+ }
+
+ /* Encode the null dataspace in the buffer */
+ ret = H5Sencode2(sid2, null_sbuf, &null_size, fapl);
+ CHECK(ret, FAIL, "H5Sencode2");
+
+ /* Decode from the dataspace buffer and return an object handle */
+ decoded_sid2 = H5Sdecode(null_sbuf);
+ CHECK(decoded_sid2, FAIL, "H5Sdecode");
+
+ /* Verify the decoded dataspace type */
+ space_type = H5Sget_simple_extent_type(decoded_sid2);
+ VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type");
+
+ /* Close the dataspaces */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(decoded_sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /*-------------------------------------------------------------------------
+ * Test encoding and decoding of scalar dataspace.
+ *-------------------------------------------------------------------------
+ */
+ /* Create scalar dataspace */
+ sid3 = H5Screate(H5S_SCALAR);
+ CHECK(sid3, FAIL, "H5Screate_simple");
+
+ /* Encode scalar dataspace in a buffer */
+ ret = H5Sencode2(sid3, NULL, &scalar_size, fapl);
+ CHECK(ret, FAIL, "H5Sencode");
+
+ if (scalar_size > 0) {
+ scalar_buf = (unsigned char *)HDcalloc((size_t)1, scalar_size);
+ CHECK_PTR(scalar_buf, "HDcalloc");
+ }
+
+ /* Encode the scalar dataspace in the buffer */
+ ret = H5Sencode2(sid3, scalar_buf, &scalar_size, fapl);
+ CHECK(ret, FAIL, "H5Sencode2");
+
+ /* Decode from the dataspace buffer and return an object handle */
+ decoded_sid3 = H5Sdecode(scalar_buf);
+ CHECK(decoded_sid3, FAIL, "H5Sdecode");
+
+ /* Verify extent type */
+ space_type = H5Sget_simple_extent_type(decoded_sid3);
+ VERIFY(space_type, H5S_SCALAR, "H5Sget_simple_extent_type");
+
+ /* Verify decoded dataspace */
+ n = H5Sget_simple_extent_npoints(decoded_sid3);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, 1, "H5Sget_simple_extent_npoints");
+
+ /* Retrieve and verify the dataspace rank */
+ rank = H5Sget_simple_extent_ndims(decoded_sid3);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, 0, "H5Sget_simple_extent_ndims");
+
+ /* Close the dataspaces */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(decoded_sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Release resources */
+ if (sbuf)
+ HDfree(sbuf);
+ if (null_sbuf)
+ HDfree(null_sbuf);
+ if (scalar_buf)
+ HDfree(scalar_buf);
+} /* test_h5s_encode() */
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+
+/****************************************************************
+**
+** test_h5s_encode(): Test H5S (dataspace) encoding and decoding.
+**
+****************************************************************/
+static void
+test_h5s_encode1(void)
+{
+ hid_t sid1, sid2, sid3; /* Dataspace ID */
+ hid_t decoded_sid1, decoded_sid2, decoded_sid3;
+ int rank; /* Logical rank of dataspace */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ size_t sbuf_size = 0, null_size = 0, scalar_size = 0;
+ unsigned char *sbuf = NULL, *null_sbuf = NULL, *scalar_buf = NULL;
+ hsize_t tdims[4]; /* Dimension array to test with */
+ hssize_t n; /* Number of dataspace elements */
+ hsize_t start[] = {0, 0, 0};
+ hsize_t stride[] = {2, 5, 3};
+ hsize_t count[] = {2, 2, 2};
+ hsize_t block[] = {1, 3, 1};
+ H5S_sel_type sel_type;
+ H5S_class_t space_type;
+ hssize_t nblocks;
+ hid_t ret_id; /* Generic hid_t return value */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataspace Encoding (H5Sencode1) and Decoding\n"));
+
+ /*-------------------------------------------------------------------------
+ * Test encoding and decoding of simple dataspace and hyperslab selection.
+ *-------------------------------------------------------------------------
+ */
+ /* Create the dataspace */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Encode simple dataspace in a buffer with the fapl setting */
+ ret = H5Sencode1(sid1, NULL, &sbuf_size);
+ CHECK(ret, FAIL, "H5Sencode2");
+
+ if (sbuf_size > 0) {
+ sbuf = (unsigned char *)HDcalloc((size_t)1, sbuf_size);
+ CHECK_PTR(sbuf, "HDcalloc");
+ }
+
+ /* Try decoding bogus buffer */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Sdecode(sbuf);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, FAIL, "H5Sdecode");
+
+ /* Encode the simple dataspace in a buffer */
+ ret = H5Sencode1(sid1, sbuf, &sbuf_size);
+ CHECK(ret, FAIL, "H5Sencode");
+
+ /* Decode from the dataspace buffer and return an object handle */
+ decoded_sid1 = H5Sdecode(sbuf);
+ CHECK(decoded_sid1, FAIL, "H5Sdecode");
+
+ /* Verify the decoded dataspace */
+ n = H5Sget_simple_extent_npoints(decoded_sid1);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3, "H5Sget_simple_extent_npoints");
+
+ /* Retrieve and verify the dataspace rank */
+ rank = H5Sget_simple_extent_ndims(decoded_sid1);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE1_RANK, "H5Sget_simple_extent_ndims");
+
+ /* Retrieve and verify the dataspace dimensions */
+ rank = H5Sget_simple_extent_dims(decoded_sid1, tdims, NULL);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_dims");
+ VERIFY(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(hsize_t)), 0, "H5Sget_simple_extent_dims");
+
+ /* Verify the type of dataspace selection */
+ sel_type = H5Sget_select_type(decoded_sid1);
+ VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type");
+
+ /* Verify the number of hyperslab blocks */
+ nblocks = H5Sget_select_hyper_nblocks(decoded_sid1);
+ VERIFY(nblocks, 2 * 2 * 2, "H5Sget_select_hyper_nblocks");
+
+ /* Close the dataspaces */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(decoded_sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /*-------------------------------------------------------------------------
+ * Test encoding and decoding of null dataspace.
+ *-------------------------------------------------------------------------
+ */
+ sid2 = H5Screate(H5S_NULL);
+ CHECK(sid2, FAIL, "H5Screate");
+
+ /* Encode null dataspace in a buffer */
+ ret = H5Sencode1(sid2, NULL, &null_size);
+ CHECK(ret, FAIL, "H5Sencode");
+
+ if (null_size > 0) {
+ null_sbuf = (unsigned char *)HDcalloc((size_t)1, null_size);
+ CHECK_PTR(null_sbuf, "HDcalloc");
+ }
+
+ /* Encode the null dataspace in the buffer */
+ ret = H5Sencode1(sid2, null_sbuf, &null_size);
+ CHECK(ret, FAIL, "H5Sencode2");
+
+ /* Decode from the dataspace buffer and return an object handle */
+ decoded_sid2 = H5Sdecode(null_sbuf);
+ CHECK(decoded_sid2, FAIL, "H5Sdecode");
+
+ /* Verify the decoded dataspace type */
+ space_type = H5Sget_simple_extent_type(decoded_sid2);
+ VERIFY(space_type, H5S_NULL, "H5Sget_simple_extent_type");
+
+ /* Close the dataspaces */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(decoded_sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /*-------------------------------------------------------------------------
+ * Test encoding and decoding of scalar dataspace.
+ *-------------------------------------------------------------------------
+ */
+ /* Create scalar dataspace */
+ sid3 = H5Screate(H5S_SCALAR);
+ CHECK(sid3, FAIL, "H5Screate");
+
+ /* Encode scalar dataspace in a buffer */
+ ret = H5Sencode1(sid3, NULL, &scalar_size);
+ CHECK(ret, FAIL, "H5Sencode");
+
+ if (scalar_size > 0) {
+ scalar_buf = (unsigned char *)HDcalloc((size_t)1, scalar_size);
+ CHECK_PTR(scalar_buf, "HDcalloc");
+ }
+
+ /* Encode the scalar dataspace in the buffer */
+ ret = H5Sencode1(sid3, scalar_buf, &scalar_size);
+ CHECK(ret, FAIL, "H5Sencode2");
+
+ /* Decode from the dataspace buffer and return an object handle */
+ decoded_sid3 = H5Sdecode(scalar_buf);
+ CHECK(decoded_sid3, FAIL, "H5Sdecode");
+
+ /* Verify extent type */
+ space_type = H5Sget_simple_extent_type(decoded_sid3);
+ VERIFY(space_type, H5S_SCALAR, "H5Sget_simple_extent_type");
+
+ /* Verify decoded dataspace */
+ n = H5Sget_simple_extent_npoints(decoded_sid3);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, 1, "H5Sget_simple_extent_npoints");
+
+ /* Retrieve and verify the dataspace rank */
+ rank = H5Sget_simple_extent_ndims(decoded_sid3);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, 0, "H5Sget_simple_extent_ndims");
+
+ /* Close the dataspaces */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(decoded_sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Release resources */
+ if (sbuf)
+ HDfree(sbuf);
+ if (null_sbuf)
+ HDfree(null_sbuf);
+ if (scalar_buf)
+ HDfree(scalar_buf);
+} /* test_h5s_encode1() */
+
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+/****************************************************************
+**
+** test_h5s_check_encoding():
+** This is the helper routine to verify that H5Sencode2()
+** works as specified in the RFC for the library format setting
+** in the file access property list.
+** See "RFC: H5Sencode/H5Sdeocde Format Change".
+**
+** This routine is used by:
+** test_h5s_encode_regular_hyper()
+** test_h5s_encode_irregular_hyper()
+** test_h5s_encode_points()
+**
+****************************************************************/
+static herr_t
+test_h5s_check_encoding(hid_t in_fapl, hid_t in_sid, uint32_t expected_version, uint8_t expected_enc_size,
+ hbool_t expected_to_fail)
+{
+ char *buf = NULL; /* Pointer to the encoded buffer */
+ size_t buf_size; /* Size of the encoded buffer */
+ hid_t d_sid = -1; /* The decoded dataspace ID */
+ htri_t check;
+ hsize_t in_low_bounds[1]; /* The low bounds for the selection for in_sid */
+ hsize_t in_high_bounds[1]; /* The high bounds for the selection for in_sid */
+ hsize_t d_low_bounds[1]; /* The low bounds for the selection for d_sid */
+ hsize_t d_high_bounds[1]; /* The high bounds for the selection for d_sid */
+ herr_t ret; /* Return value */
+
+ /* Get buffer size for encoding with the format setting in in_fapl */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sencode2(in_sid, NULL, &buf_size, in_fapl);
+ }
+ H5E_END_TRY
+
+ if (expected_to_fail) {
+ VERIFY(ret, FAIL, "H5Screate_simple");
+ }
+ else {
+
+ CHECK(ret, FAIL, "H5Sencode2");
+
+ /* Allocate the buffer for encoding */
+ buf = (char *)HDmalloc(buf_size);
+ CHECK_PTR(buf, "HDmalloc");
+
+ /* Encode according to the setting in in_fapl */
+ ret = H5Sencode2(in_sid, buf, &buf_size, in_fapl);
+ CHECK(ret, FAIL, "H5Sencode2");
+
+ /* Decode the buffer */
+ d_sid = H5Sdecode(buf);
+ CHECK(d_sid, FAIL, "H5Sdecode");
+
+ /* Verify the number of selected points for in_sid and d_sid */
+ VERIFY(H5Sget_select_npoints(in_sid), H5Sget_select_npoints(d_sid), "Compare npoints");
+
+ /* Verify if the two dataspace selections (in_sid, d_sid) are the same shape */
+ check = H5Sselect_shape_same(in_sid, d_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare the starting/ending coordinates of the bounding box for in_sid and d_sid */
+ ret = H5Sget_select_bounds(in_sid, in_low_bounds, in_high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ ret = H5Sget_select_bounds(d_sid, d_low_bounds, d_high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(in_low_bounds[0], d_low_bounds[0], "Compare selection low bounds");
+ VERIFY(in_high_bounds[0], d_high_bounds[0], "Compare selection high bounds");
+
+ /*
+ * See "RFC: H5Sencode/H5Sdeocde Format Change" for the verification of:
+ * H5S_SEL_POINTS:
+ * --the expected version for point selection info
+ * --the expected encoded size (version 2 points selection info)
+ * H5S_SEL_HYPERSLABS:
+ * --the expected version for hyperslab selection info
+ * --the expected encoded size (version 3 hyperslab selection info)
+ */
+
+ if (H5Sget_select_type(in_sid) == H5S_SEL_POINTS) {
+
+ /* Verify the version */
+ VERIFY((uint32_t)buf[35], expected_version, "Version for point selection");
+
+ /* Verify the encoded size for version 2 */
+ if (expected_version == 2)
+ VERIFY((uint8_t)buf[39], expected_enc_size, "Encoded size of point selection info");
+ }
+
+ if (H5Sget_select_type(in_sid) == H5S_SEL_HYPERSLABS) {
+
+ /* Verify the version */
+ VERIFY((uint32_t)buf[35], expected_version, "Version for hyperslab selection info");
+
+ /* Verify the encoded size for version 3 */
+ if (expected_version == 3)
+ VERIFY((uint8_t)buf[40], expected_enc_size, "Encoded size of selection info");
+
+ } /* hyperslab selection */
+
+ ret = H5Sclose(d_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ if (buf)
+ HDfree(buf);
+ }
+
+ return (0);
+
+} /* test_h5s_check_encoding */
+
+/****************************************************************
+**
+** test_h5s_encode_regular_hyper():
+** This test verifies that H5Sencode2() works as specified in
+** the RFC for regular hyperslabs.
+** See "RFC: H5Sencode/H5Sdeocde Format Change".
+**
+****************************************************************/
+static void
+test_h5s_encode_regular_hyper(H5F_libver_t low, H5F_libver_t high)
+{
+ hid_t fapl = -1; /* File access property list ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hsize_t numparticles = 8388608; /* Used to calculate dimension size */
+ unsigned num_dsets = 513; /* Used to calculate dimension size */
+ hsize_t total_particles = numparticles * num_dsets;
+ hsize_t vdsdims[1] = {total_particles}; /* Dimension size */
+ hsize_t start, stride, count, block; /* Selection info */
+ unsigned config; /* Testing configuration */
+ unsigned unlim; /* H5S_UNLIMITED setting or not */
+ herr_t ret; /* Generic return value */
+ uint32_t expected_version = 0; /* Expected version for selection info */
+ uint8_t expected_enc_size = 0; /* Expected encoded size for selection info */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataspace encoding of regular hyperslabs\n"));
+
+ /* Create the file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Set the low/high bounds in the fapl */
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create the dataspace */
+ sid = H5Screate_simple(1, vdsdims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Testing with each configuration */
+ for (config = CONFIG_16; config <= CONFIG_32; config++) {
+ hbool_t expected_to_fail = FALSE;
+
+ /* Testing with unlimited or not */
+ for (unlim = 0; unlim <= 1; unlim++) {
+ start = 0;
+ count = unlim ? H5S_UNLIMITED : 2;
+
+ if ((high <= H5F_LIBVER_V18) && (unlim || config == CONFIG_32))
+ expected_to_fail = TRUE;
+
+ if (low >= H5F_LIBVER_V112)
+ expected_version = 3;
+ else if (config == CONFIG_16 && !unlim)
+ expected_version = 1;
+ else
+ expected_version = 2;
+
+ /* test 1 */
+ switch (config) {
+ case CONFIG_16:
+ stride = POWER16 - 1;
+ block = 4;
+ expected_enc_size = (uint8_t)(expected_version == 3 ? 2 : 4);
+ break;
+ case CONFIG_32:
+ stride = POWER32 - 1;
+ block = 4;
+ expected_enc_size = (uint8_t)(expected_version == 3 ? 4 : 8);
+
+ break;
+ default:
+ HDassert(0);
+ break;
+ } /* end switch */
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify the version and encoded size expected for this configuration */
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ /* test 2 */
+ switch (config) {
+ case CONFIG_16:
+ stride = POWER16 - 1;
+ block = POWER16 - 2;
+ expected_enc_size = (uint8_t)(expected_version == 3 ? 2 : 4);
+ break;
+ case CONFIG_32:
+ stride = POWER32 - 1;
+ block = POWER32 - 2;
+ expected_enc_size = (uint8_t)(expected_version == 3 ? 4 : 8);
+ break;
+ default:
+ HDassert(0);
+ break;
+ } /* end switch */
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify the version and encoded size for this configuration */
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ /* test 3 */
+ switch (config) {
+ case CONFIG_16:
+ stride = POWER16 - 1;
+ block = POWER16 - 1;
+ expected_enc_size = 4;
+ break;
+ case CONFIG_32:
+ stride = POWER32 - 1;
+ block = POWER32 - 1;
+ expected_enc_size = 8;
+ break;
+ default:
+ HDassert(0);
+ break;
+ }
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify the version and encoded size expected for this configuration */
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ /* test 4 */
+ switch (config) {
+ case CONFIG_16:
+ stride = POWER16;
+ block = POWER16 - 2;
+ expected_enc_size = 4;
+ break;
+ case CONFIG_32:
+ stride = POWER32;
+ block = POWER32 - 2;
+ expected_enc_size = 8;
+ break;
+ default:
+ HDassert(0);
+ break;
+ } /* end switch */
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify the version and encoded size expected for this configuration */
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ /* test 5 */
+ switch (config) {
+ case CONFIG_16:
+ stride = POWER16;
+ block = 1;
+ expected_enc_size = 4;
+ break;
+ case CONFIG_32:
+ stride = POWER32;
+ block = 1;
+ expected_enc_size = 8;
+ break;
+ default:
+ HDassert(0);
+ break;
+ }
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify the version and encoded size expected for this configuration */
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ } /* for unlim */
+ } /* for config */
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* test_h5s_encode_regular_hyper() */
+
+/****************************************************************
+**
+** test_h5s_encode_irregular_hyper():
+** This test verifies that H5Sencode2() works as specified in
+** the RFC for irregular hyperslabs.
+** See "RFC: H5Sencode/H5Sdeocde Format Change".
+**
+****************************************************************/
+static void
+test_h5s_encode_irregular_hyper(H5F_libver_t low, H5F_libver_t high)
+{
+ hid_t fapl = -1; /* File access property list ID */
+ hid_t sid; /* Dataspace ID */
+ hsize_t numparticles = 8388608; /* Used to calculate dimension size */
+ unsigned num_dsets = 513; /* Used to calculate dimension size */
+ hsize_t total_particles = numparticles * num_dsets;
+ hsize_t vdsdims[1] = {total_particles}; /* Dimension size */
+ hsize_t start, stride, count, block; /* Selection info */
+ htri_t is_regular; /* Is this a regular hyperslab */
+ unsigned config; /* Testing configuration */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataspace encoding of irregular hyperslabs\n"));
+
+ /* Create the file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Set the low/high bounds in the fapl */
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create the dataspace */
+ sid = H5Screate_simple(1, vdsdims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Testing with each configuration */
+ for (config = CONFIG_8; config <= CONFIG_32; config++) {
+ hbool_t expected_to_fail = FALSE; /* Whether H5Sencode2 is expected to fail */
+ uint32_t expected_version = 0; /* Expected version for selection info */
+ uint32_t expected_enc_size = 0; /* Expected encoded size for selection info */
+
+ start = 0;
+ count = 2;
+ block = 4;
+
+ /* H5Sencode2 is expected to fail for library v110 and below
+ when the selection exceeds the 32 bits integer limit */
+ if (high <= H5F_LIBVER_V110 && config == CONFIG_32)
+ expected_to_fail = TRUE;
+
+ if (low >= H5F_LIBVER_V112 || config == CONFIG_32)
+ expected_version = 3;
+ else
+ expected_version = 1;
+
+ switch (config) {
+ case CONFIG_8:
+ stride = POWER8 - 2;
+ break;
+
+ case CONFIG_16:
+ stride = POWER16 - 2;
+ break;
+
+ case CONFIG_32:
+ stride = POWER32 - 2;
+ break;
+
+ default:
+ HDassert(0);
+ break;
+ }
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start = 8;
+ count = 5;
+ block = 2;
+
+ switch (config) {
+ case CONFIG_8:
+ stride = POWER8;
+ expected_enc_size = expected_version == 3 ? 2 : 4;
+ break;
+
+ case CONFIG_16:
+ stride = POWER16;
+ expected_enc_size = 4;
+ break;
+
+ case CONFIG_32:
+ stride = POWER32;
+ expected_enc_size = 8;
+ break;
+
+ default:
+ HDassert(0);
+ break;
+ }
+
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, &start, &stride, &count, &block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Should be irregular hyperslab */
+ is_regular = H5Sis_regular_hyperslab(sid);
+ VERIFY(is_regular, FALSE, "H5Sis_regular_hyperslab");
+
+ /* Verify the version and encoded size expected for the configuration */
+ HDassert(expected_enc_size <= 255);
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, (uint8_t)expected_enc_size,
+ expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ } /* for config */
+
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_h5s_encode_irregular_hyper() */
+
+/****************************************************************
+**
+** test_h5s_encode_points():
+** This test verifies that H5Sencode2() works as specified in
+** the RFC for point selection.
+** See "RFC: H5Sencode/H5Sdeocde Format Change".
+**
+****************************************************************/
+static void
+test_h5s_encode_points(H5F_libver_t low, H5F_libver_t high)
+{
+ hid_t fapl = -1; /* File access property list ID */
+ hid_t sid; /* Dataspace ID */
+ hsize_t numparticles = 8388608; /* Used to calculate dimension size */
+ unsigned num_dsets = 513; /* used to calculate dimension size */
+ hsize_t total_particles = numparticles * num_dsets;
+ hsize_t vdsdims[1] = {total_particles}; /* Dimension size */
+ hsize_t coord[4]; /* The point coordinates */
+ herr_t ret; /* Generic return value */
+ hbool_t expected_to_fail = FALSE; /* Expected to fail or not */
+ uint32_t expected_version = 0; /* Expected version for selection info */
+ uint8_t expected_enc_size = 0; /* Expected encoded size of selection info */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataspace encoding of points selection\n"));
+
+ /* Create the file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Set the low/high bounds in the fapl */
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create the dataspace */
+ sid = H5Screate_simple(1, vdsdims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* test 1 */
+ coord[0] = 5;
+ coord[1] = 15;
+ coord[2] = POWER16;
+ coord[3] = 19;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ expected_to_fail = FALSE;
+ expected_enc_size = 4;
+ expected_version = 1;
+
+ if (low >= H5F_LIBVER_V112)
+ expected_version = 2;
+
+ /* Verify the version and encoded size expected for the configuration */
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ /* test 2 */
+ coord[0] = 5;
+ coord[1] = 15;
+ coord[2] = POWER32 - 1;
+ coord[3] = 19;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Expected result same as test 1 */
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ /* test 3 */
+ if (high <= H5F_LIBVER_V110)
+ expected_to_fail = TRUE;
+
+ if (high >= H5F_LIBVER_V112) {
+ expected_version = 2;
+ expected_enc_size = 8;
+ }
+
+ coord[0] = 5;
+ coord[1] = 15;
+ coord[2] = POWER32 + 1;
+ coord[3] = 19;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)4, coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Verify the version and encoded size expected for the configuration */
+ ret = test_h5s_check_encoding(fapl, sid, expected_version, expected_enc_size, expected_to_fail);
+ CHECK(ret, FAIL, "test_h5s_check_encoding");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_h5s_encode_points() */
+
+/****************************************************************
+**
+** test_h5s_encode_length():
+** Test to verify HDFFV-10271 is fixed.
+** Verify that version 2 hyperslab encoding length is correct.
+**
+** See "RFC: H5Sencode/H5Sdecode Format Change" for the
+** description of the encoding format.
+**
+****************************************************************/
+static void
+test_h5s_encode_length(void)
+{
+ hid_t sid; /* Dataspace ID */
+ hid_t decoded_sid; /* Dataspace ID from H5Sdecode2 */
+ size_t sbuf_size = 0; /* Buffer size for H5Sencode2/1 */
+ unsigned char *sbuf = NULL; /* Buffer for H5Sencode2/1 */
+ hsize_t dims[1] = {500}; /* Dimension size */
+ hsize_t start, count, block, stride; /* Hyperslab selection specifications */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Version 2 Hyperslab Encoding Length is correct\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(1, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Setting H5S_UNLIMITED in count will use version 2 for hyperslab encoding */
+ start = 0;
+ stride = 10;
+ block = 4;
+ count = H5S_UNLIMITED;
+
+ /* Set hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, &start, &stride, &count, &block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Encode simple dataspace in a buffer */
+ ret = H5Sencode2(sid, NULL, &sbuf_size, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Sencode");
+
+ /* Allocate the buffer */
+ if (sbuf_size > 0) {
+ sbuf = (unsigned char *)HDcalloc((size_t)1, sbuf_size);
+ CHECK_PTR(sbuf, "H5Sencode2");
+ }
+
+ /* Encode the dataspace */
+ ret = H5Sencode2(sid, sbuf, &sbuf_size, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Sencode");
+
+ /* Verify that length stored at this location in the buffer is correct */
+ VERIFY((uint32_t)sbuf[40], 36, "Length for encoding version 2");
+ VERIFY((uint32_t)sbuf[35], 2, "Hyperslab encoding version is 2");
+
+ /* Decode from the dataspace buffer and return an object handle */
+ decoded_sid = H5Sdecode(sbuf);
+ CHECK(decoded_sid, FAIL, "H5Sdecode");
+
+ /* Verify that the original and the decoded dataspace are equal */
+ VERIFY(H5Sget_select_npoints(sid), H5Sget_select_npoints(decoded_sid), "Compare npoints");
+
+ /* Close the decoded dataspace */
+ ret = H5Sclose(decoded_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Free the buffer */
+ if (sbuf)
+ HDfree(sbuf);
+
+ /* Close the original dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_h5s_encode_length() */
+
+/****************************************************************
+**
+** test_h5s_scalar_write(): Test scalar H5S (dataspace) writing code.
+**
+****************************************************************/
+static void
+test_h5s_scalar_write(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ int rank; /* Logical rank of dataspace */
+ hsize_t tdims[4]; /* Dimension array to test with */
+ hssize_t n; /* Number of dataspace elements */
+ H5S_class_t ext_type; /* Extent type */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Scalar Dataspace Manipulation during Writing\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Verify a non-zero rank fails with a NULL dimension. */
+ H5E_BEGIN_TRY
+ {
+ sid1 = H5Screate_simple(SPACE1_RANK, NULL, NULL);
+ }
+ H5E_END_TRY
+ VERIFY(sid1, FAIL, "H5Screate_simple");
+
+ /* Create scalar dataspace */
+ sid1 = H5Screate_simple(SPACE3_RANK, NULL, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Retrieve the number of elements in the dataspace selection */
+ n = H5Sget_simple_extent_npoints(sid1);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, 1, "H5Sget_simple_extent_npoints");
+
+ /* Get the dataspace rank */
+ rank = H5Sget_simple_extent_ndims(sid1);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims");
+
+ /* Get the dataspace dimension sizes */
+ rank = H5Sget_simple_extent_dims(sid1, tdims, NULL);
+ VERIFY(rank, 0, "H5Sget_simple_extent_dims");
+
+ /* Verify extent type */
+ ext_type = H5Sget_simple_extent_type(sid1);
+ VERIFY(ext_type, H5S_SCALAR, "H5Sget_simple_extent_type");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write to the dataset */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &space3_data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close scalar dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_h5s_scalar_write() */
+
+/****************************************************************
+**
+** test_h5s_scalar_read(): Test scalar H5S (dataspace) reading code.
+**
+****************************************************************/
+static void
+test_h5s_scalar_read(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ int rank; /* Logical rank of dataspace */
+ hsize_t tdims[4]; /* Dimension array to test with */
+ hssize_t n; /* Number of dataspace elements */
+ unsigned rdata; /* Scalar data read in */
+ herr_t ret; /* Generic return value */
+ H5S_class_t ext_type; /* Extent type */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Scalar Dataspace Manipulation during Reading\n"));
+
+ /* Create file */
+ fid1 = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Create a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ n = H5Sget_simple_extent_npoints(sid1);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, 1, "H5Sget_simple_extent_npoints");
+
+ rank = H5Sget_simple_extent_ndims(sid1);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims");
+
+ rank = H5Sget_simple_extent_dims(sid1, tdims, NULL);
+ VERIFY(rank, 0, "H5Sget_simple_extent_dims");
+
+ /* Verify extent type */
+ ext_type = H5Sget_simple_extent_type(sid1);
+ VERIFY(ext_type, H5S_SCALAR, "H5Sget_simple_extent_type");
+
+ ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ VERIFY(rdata, space3_data, "H5Dread");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close scalar dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_h5s_scalar_read() */
+
+/****************************************************************
+**
+** test_h5s_compound_scalar_write(): Test scalar H5S (dataspace) writing for
+** compound datatypes.
+**
+****************************************************************/
+static void
+test_h5s_compound_scalar_write(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t tid1; /* Attribute datatype ID */
+ hid_t sid1; /* Dataspace ID */
+ int rank; /* Logical rank of dataspace */
+ hsize_t tdims[4]; /* Dimension array to test with */
+ hssize_t n; /* Number of dataspace elements */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Scalar Dataspace Manipulation for Writing Compound Datatypes\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create the compound datatype. */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(struct space4_struct));
+ CHECK(tid1, FAIL, "H5Tcreate");
+ space4_field1_off = HOFFSET(struct space4_struct, c1);
+ ret = H5Tinsert(tid1, SPACE4_FIELDNAME1, space4_field1_off, H5T_NATIVE_SCHAR);
+ CHECK(ret, FAIL, "H5Tinsert");
+ space4_field2_off = HOFFSET(struct space4_struct, u);
+ ret = H5Tinsert(tid1, SPACE4_FIELDNAME2, space4_field2_off, H5T_NATIVE_UINT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ space4_field3_off = HOFFSET(struct space4_struct, f);
+ ret = H5Tinsert(tid1, SPACE4_FIELDNAME3, space4_field3_off, H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ space4_field4_off = HOFFSET(struct space4_struct, c2);
+ ret = H5Tinsert(tid1, SPACE4_FIELDNAME4, space4_field4_off, H5T_NATIVE_SCHAR);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create scalar dataspace */
+ sid1 = H5Screate_simple(SPACE3_RANK, NULL, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ n = H5Sget_simple_extent_npoints(sid1);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, 1, "H5Sget_simple_extent_npoints");
+
+ rank = H5Sget_simple_extent_ndims(sid1);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims");
+
+ rank = H5Sget_simple_extent_dims(sid1, tdims, NULL);
+ VERIFY(rank, 0, "H5Sget_simple_extent_dims");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &space4_data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close compound datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close scalar dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_h5s_compound_scalar_write() */
+
+/****************************************************************
+**
+** test_h5s_compound_scalar_read(): Test scalar H5S (dataspace) reading for
+** compound datatypes.
+**
+****************************************************************/
+static void
+test_h5s_compound_scalar_read(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t type; /* Datatype */
+ int rank; /* Logical rank of dataspace */
+ hsize_t tdims[4]; /* Dimension array to test with */
+ hssize_t n; /* Number of dataspace elements */
+ struct space4_struct rdata; /* Scalar data read in */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Scalar Dataspace Manipulation for Reading Compound Datatypes\n"));
+
+ /* Create file */
+ fid1 = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Create a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ n = H5Sget_simple_extent_npoints(sid1);
+ CHECK(n, FAIL, "H5Sget_simple_extent_npoints");
+ VERIFY(n, 1, "H5Sget_simple_extent_npoints");
+
+ rank = H5Sget_simple_extent_ndims(sid1);
+ CHECK(rank, FAIL, "H5Sget_simple_extent_ndims");
+ VERIFY(rank, SPACE3_RANK, "H5Sget_simple_extent_ndims");
+
+ rank = H5Sget_simple_extent_dims(sid1, tdims, NULL);
+ VERIFY(rank, 0, "H5Sget_simple_extent_dims");
+
+ type = H5Dget_type(dataset);
+ CHECK(type, FAIL, "H5Dget_type");
+
+ ret = H5Dread(dataset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (HDmemcmp(&space4_data, &rdata, sizeof(struct space4_struct)) != 0) {
+ HDprintf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n", space4_data.c1, rdata.c1);
+ HDprintf("scalar data different: space4_data.u=%u, read_data4.u=%u\n", space4_data.u, rdata.u);
+ HDprintf("scalar data different: space4_data.f=%f, read_data4.f=%f\n", (double)space4_data.f,
+ (double)rdata.f);
+ TestErrPrintf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n", space4_data.c1,
+ rdata.c2);
+ } /* end if */
+
+ /* Close datatype */
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close scalar dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_h5s_compound_scalar_read() */
+
+/* Data array sizes for chunk test */
+#define CHUNK_DATA_NX 50000
+#define CHUNK_DATA_NY 3
+
+/****************************************************************
+**
+** test_h5s_chunk(): Exercise chunked I/O, testing when data conversion
+** is necessary and the entire chunk read in doesn't fit into the
+** conversion buffer
+**
+****************************************************************/
+static void
+test_h5s_chunk(void)
+{
+ herr_t status;
+ hid_t fileID, dsetID;
+ hid_t plist_id;
+ hid_t space_id;
+ hsize_t dims[2];
+ hsize_t csize[2];
+ double **chunk_data_dbl = NULL;
+ double *chunk_data_dbl_data = NULL;
+ float **chunk_data_flt = NULL;
+ float *chunk_data_flt_data = NULL;
+ int i, j;
+
+ /* Allocate memory */
+ chunk_data_dbl_data = (double *)HDcalloc(CHUNK_DATA_NX * CHUNK_DATA_NY, sizeof(double));
+ CHECK_PTR(chunk_data_dbl_data, "HDcalloc");
+ chunk_data_dbl = (double **)HDcalloc(CHUNK_DATA_NX, sizeof(chunk_data_dbl_data));
+ CHECK_PTR(chunk_data_dbl, "HDcalloc");
+ for (i = 0; i < CHUNK_DATA_NX; i++)
+ chunk_data_dbl[i] = chunk_data_dbl_data + (i * CHUNK_DATA_NY);
+
+ chunk_data_flt_data = (float *)HDcalloc(CHUNK_DATA_NX * CHUNK_DATA_NY, sizeof(float));
+ CHECK_PTR(chunk_data_flt_data, "HDcalloc");
+ chunk_data_flt = (float **)HDcalloc(CHUNK_DATA_NX, sizeof(chunk_data_flt_data));
+ CHECK_PTR(chunk_data_flt, "HDcalloc");
+ for (i = 0; i < CHUNK_DATA_NX; i++)
+ chunk_data_flt[i] = chunk_data_flt_data + (i * CHUNK_DATA_NY);
+
+ fileID = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fileID, FAIL, "H5Fcreate");
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plist_id, FAIL, "H5Pcreate");
+
+ csize[0] = CHUNK_DATA_NX;
+ csize[1] = CHUNK_DATA_NY;
+ status = H5Pset_chunk(plist_id, 2, csize);
+ CHECK(status, FAIL, "H5Pset_chunk");
+
+ /* Create the dataspace */
+ dims[0] = CHUNK_DATA_NX;
+ dims[1] = CHUNK_DATA_NY;
+ space_id = H5Screate_simple(2, dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+
+ dsetID = H5Dcreate2(fileID, "coords", H5T_NATIVE_FLOAT, space_id, H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ CHECK(dsetID, FAIL, "H5Dcreate2");
+
+ /* Initialize float array */
+ for (i = 0; i < CHUNK_DATA_NX; i++)
+ for (j = 0; j < CHUNK_DATA_NY; j++)
+ chunk_data_flt[i][j] = (float)(i + 1) * 2.5F - (float)j * 100.3F;
+
+ status = H5Dwrite(dsetID, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_flt_data);
+ CHECK(status, FAIL, "H5Dwrite");
+
+ status = H5Pclose(plist_id);
+ CHECK(status, FAIL, "H5Pclose");
+ status = H5Sclose(space_id);
+ CHECK(status, FAIL, "H5Sclose");
+ status = H5Dclose(dsetID);
+ CHECK(status, FAIL, "H5Dclose");
+ status = H5Fclose(fileID);
+ CHECK(status, FAIL, "H5Fclose");
+
+ /* Reset/initialize the data arrays to read in */
+ HDmemset(chunk_data_dbl_data, 0, sizeof(double) * CHUNK_DATA_NX * CHUNK_DATA_NY);
+ HDmemset(chunk_data_flt_data, 0, sizeof(float) * CHUNK_DATA_NX * CHUNK_DATA_NY);
+
+ fileID = H5Fopen(DATAFILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fileID, FAIL, "H5Fopen");
+ dsetID = H5Dopen2(fileID, "coords", H5P_DEFAULT);
+ CHECK(dsetID, FAIL, "H5Dopen2");
+
+ status = H5Dread(dsetID, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_dbl_data);
+ CHECK(status, FAIL, "H5Dread");
+ status = H5Dread(dsetID, H5T_NATIVE_FLOAT, H5S_ALL, H5S_ALL, H5P_DEFAULT, chunk_data_flt_data);
+ CHECK(status, FAIL, "H5Dread");
+
+ status = H5Dclose(dsetID);
+ CHECK(status, FAIL, "H5Dclose");
+ status = H5Fclose(fileID);
+ CHECK(status, FAIL, "H5Fclose");
+
+ for (i = 0; i < CHUNK_DATA_NX; i++) {
+ for (j = 0; j < CHUNK_DATA_NY; j++) {
+ /* Check if the two values are within 0.001% range. */
+ if (!H5_DBL_REL_EQUAL(chunk_data_dbl[i][j], (double)chunk_data_flt[i][j], 0.00001))
+ TestErrPrintf("%u: chunk_data_dbl[%d][%d]=%e, chunk_data_flt[%d][%d]=%e\n",
+ (unsigned)__LINE__, i, j, chunk_data_dbl[i][j], i, j,
+ (double)chunk_data_flt[i][j]);
+ } /* end for */
+ } /* end for */
+
+ HDfree(chunk_data_dbl);
+ HDfree(chunk_data_dbl_data);
+ HDfree(chunk_data_flt);
+ HDfree(chunk_data_flt_data);
+} /* test_h5s_chunk() */
+
+/****************************************************************
+**
+** test_h5s_extent_equal(): Exercise extent comparison code
+**
+****************************************************************/
+static void
+test_h5s_extent_equal(void)
+{
+ hid_t null_space; /* Null dataspace */
+ hid_t scalar_space; /* Scalar dataspace */
+ hid_t d1_space1, d1_space2, d1_space3, d1_space4; /* 1-D dataspaces */
+ hid_t d2_space1, d2_space2, d2_space3, d2_space4; /* 2-D dataspaces */
+ hid_t d3_space1, d3_space2, d3_space3, d3_space4; /* 3-D dataspaces */
+ hsize_t d1_dims1[1] = {10}, /* 1-D dimensions */
+ d1_dims2[1] = {20}, d1_dims3[1] = {H5S_UNLIMITED};
+ hsize_t d2_dims1[2] = {10, 10}, /* 2-D dimensions */
+ d2_dims2[2] = {20, 20}, d2_dims3[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t d3_dims1[3] = {10, 10, 10}, /* 3-D dimensions */
+ d3_dims2[3] = {20, 20, 20}, d3_dims3[3] = {H5S_UNLIMITED, H5S_UNLIMITED, H5S_UNLIMITED};
+ htri_t ext_equal; /* Whether two dataspace extents are equal */
+ herr_t ret; /* Generic error return */
+
+ /* Create dataspaces */
+ null_space = H5Screate(H5S_NULL);
+ CHECK(null_space, FAIL, "H5Screate");
+
+ scalar_space = H5Screate(H5S_SCALAR);
+ CHECK(scalar_space, FAIL, "H5Screate");
+
+ d1_space1 = H5Screate_simple(1, d1_dims1, NULL);
+ CHECK(d1_space1, FAIL, "H5Screate");
+ d1_space2 = H5Screate_simple(1, d1_dims2, NULL);
+ CHECK(d1_space2, FAIL, "H5Screate");
+ d1_space3 = H5Screate_simple(1, d1_dims1, d1_dims2);
+ CHECK(d1_space3, FAIL, "H5Screate");
+ d1_space4 = H5Screate_simple(1, d1_dims1, d1_dims3);
+ CHECK(d1_space4, FAIL, "H5Screate");
+
+ d2_space1 = H5Screate_simple(2, d2_dims1, NULL);
+ CHECK(d2_space1, FAIL, "H5Screate");
+ d2_space2 = H5Screate_simple(2, d2_dims2, NULL);
+ CHECK(d2_space2, FAIL, "H5Screate");
+ d2_space3 = H5Screate_simple(2, d2_dims1, d2_dims2);
+ CHECK(d2_space3, FAIL, "H5Screate");
+ d2_space4 = H5Screate_simple(2, d2_dims1, d2_dims3);
+ CHECK(d2_space4, FAIL, "H5Screate");
+
+ d3_space1 = H5Screate_simple(3, d3_dims1, NULL);
+ CHECK(d3_space1, FAIL, "H5Screate");
+ d3_space2 = H5Screate_simple(3, d3_dims2, NULL);
+ CHECK(d3_space2, FAIL, "H5Screate");
+ d3_space3 = H5Screate_simple(3, d3_dims1, d3_dims2);
+ CHECK(d3_space3, FAIL, "H5Screate");
+ d3_space4 = H5Screate_simple(3, d3_dims1, d3_dims3);
+ CHECK(d3_space4, FAIL, "H5Screate");
+
+ /* Compare all dataspace combinations */
+
+ /* Compare null dataspace against all others, including itself */
+ ext_equal = H5Sextent_equal(null_space, null_space);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(null_space, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare scalar dataspace against all others, including itself */
+ ext_equal = H5Sextent_equal(scalar_space, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, scalar_space);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(scalar_space, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 1-D dataspace w/no max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d1_space1, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d1_space1);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space1, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare larger 1-D dataspace w/no max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d1_space2, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d1_space2);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space2, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 1-D dataspace w/fixed max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d1_space3, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d1_space3);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space3, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 1-D dataspace w/unlimited max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d1_space4, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d1_space4);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d1_space4, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 2-D dataspace w/no max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d2_space1, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d2_space1);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space1, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare larger 2-D dataspace w/no max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d2_space2, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d2_space2);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space2, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 2-D dataspace w/fixed max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d2_space3, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d2_space3);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space3, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 2-D dataspace w/unlimited max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d2_space4, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d2_space4);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d2_space4, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 3-D dataspace w/no max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d3_space1, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d3_space1);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space1, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare larger 2-D dataspace w/no max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d3_space2, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d3_space2);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space2, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 2-D dataspace w/fixed max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d3_space3, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d3_space3);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space3, d3_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+
+ /* Compare small 2-D dataspace w/unlimited max. dims against all others, including itself */
+ ext_equal = H5Sextent_equal(d3_space4, null_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, scalar_space);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d1_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d1_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d1_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d1_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d2_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d2_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d2_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d2_space4);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d3_space1);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d3_space2);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d3_space3);
+ VERIFY(ext_equal, FALSE, "H5Sextent_equal");
+ ext_equal = H5Sextent_equal(d3_space4, d3_space4);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+
+ /* Close dataspaces */
+ ret = H5Sclose(null_space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(scalar_space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(d1_space1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d1_space2);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d1_space3);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d1_space4);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(d2_space1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d2_space2);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d2_space3);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d2_space4);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(d3_space1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d3_space2);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d3_space3);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(d3_space4);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_h5s_extent_equal() */
+
+/****************************************************************
+**
+** test_h5s_extent_copy(): Exercise extent copy code
+**
+****************************************************************/
+static void
+test_h5s_extent_copy(void)
+{
+ hid_t spaces[14] = {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}; /* Array of all dataspaces */
+ hid_t tmp_space = -1;
+ hsize_t d1_dims1[1] = {10}, /* 1-D dimensions */
+ d1_dims2[1] = {20}, d1_dims3[1] = {H5S_UNLIMITED};
+ hsize_t d2_dims1[2] = {10, 10}, /* 2-D dimensions */
+ d2_dims2[2] = {20, 20}, d2_dims3[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t d3_dims1[3] = {10, 10, 10}, /* 3-D dimensions */
+ d3_dims2[3] = {20, 20, 20}, d3_dims3[3] = {H5S_UNLIMITED, H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t npoints[14]; /* Expected number of points in selection for each element in spaces */
+ hssize_t npoints_ret; /* Number of points returned by H5Sget_select_npoints() */
+ htri_t ext_equal; /* Whether two dataspace extents are equal */
+ const unsigned num_spaces = sizeof(spaces) / sizeof(spaces[0]);
+ unsigned i, j;
+ herr_t ret; /* Generic error return */
+
+ /* Create dataspaces */
+ spaces[0] = H5Screate(H5S_NULL);
+ CHECK(spaces[0], FAIL, "H5Screate");
+ npoints[0] = (hsize_t)0;
+
+ spaces[1] = H5Screate(H5S_SCALAR);
+ CHECK(spaces[1], FAIL, "H5Screate");
+ npoints[1] = (hsize_t)1;
+
+ spaces[2] = H5Screate_simple(1, d1_dims1, NULL);
+ CHECK(spaces[2], FAIL, "H5Screate");
+ npoints[2] = d1_dims1[0];
+ spaces[3] = H5Screate_simple(1, d1_dims2, NULL);
+ CHECK(spaces[3], FAIL, "H5Screate");
+ npoints[3] = d1_dims2[0];
+ spaces[4] = H5Screate_simple(1, d1_dims1, d1_dims2);
+ CHECK(spaces[4], FAIL, "H5Screate");
+ npoints[4] = d1_dims1[0];
+ spaces[5] = H5Screate_simple(1, d1_dims1, d1_dims3);
+ CHECK(spaces[5], FAIL, "H5Screate");
+ npoints[5] = d1_dims1[0];
+
+ spaces[6] = H5Screate_simple(2, d2_dims1, NULL);
+ CHECK(spaces[6], FAIL, "H5Screate");
+ npoints[6] = d2_dims1[0] * d2_dims1[1];
+ spaces[7] = H5Screate_simple(2, d2_dims2, NULL);
+ CHECK(spaces[7], FAIL, "H5Screate");
+ npoints[7] = d2_dims2[0] * d2_dims2[1];
+ spaces[8] = H5Screate_simple(2, d2_dims1, d2_dims2);
+ CHECK(spaces[8], FAIL, "H5Screate");
+ npoints[8] = d2_dims1[0] * d2_dims1[1];
+ spaces[9] = H5Screate_simple(2, d2_dims1, d2_dims3);
+ CHECK(spaces[9], FAIL, "H5Screate");
+ npoints[9] = d2_dims1[0] * d2_dims1[1];
+
+ spaces[10] = H5Screate_simple(3, d3_dims1, NULL);
+ CHECK(spaces[10], FAIL, "H5Screate");
+ npoints[10] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2];
+ spaces[11] = H5Screate_simple(3, d3_dims2, NULL);
+ CHECK(spaces[11], FAIL, "H5Screate");
+ npoints[11] = d3_dims2[0] * d3_dims2[1] * d3_dims2[2];
+ spaces[12] = H5Screate_simple(3, d3_dims1, d3_dims2);
+ CHECK(spaces[12], FAIL, "H5Screate");
+ npoints[12] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2];
+ spaces[13] = H5Screate_simple(3, d3_dims1, d3_dims3);
+ CHECK(spaces[13], FAIL, "H5Screate");
+ npoints[13] = d3_dims1[0] * d3_dims1[1] * d3_dims1[2];
+
+ tmp_space = H5Screate(H5S_NULL);
+ CHECK(tmp_space, FAIL, "H5Screate");
+
+ /* Copy between all dataspace combinations. Note there are a few
+ * duplicates. */
+ for (i = 0; i < num_spaces; i++)
+ for (j = i; j < num_spaces; j++) {
+ /* Copy from i to j, unless the inner loop just restarted, in which
+ * case i and j are the same, so the second call to H5Sextent_copy()
+ * will test copying from i/j to i/j */
+ ret = H5Sextent_copy(tmp_space, spaces[j]);
+ CHECK(ret, FAIL, "H5Sextent_copy");
+
+ /* Verify that the extents are equal */
+ ext_equal = H5Sextent_equal(tmp_space, spaces[j]);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+
+ /* Verify that the correct number of elements is selected */
+ npoints_ret = H5Sget_select_npoints(tmp_space);
+ VERIFY((hsize_t)npoints_ret, npoints[j], "H5Sget_select_npoints");
+
+ /* Copy from j to i */
+ ret = H5Sextent_copy(tmp_space, spaces[i]);
+ CHECK(ret, FAIL, "H5Sextent_copy");
+
+ /* Verify that the extents are equal */
+ ext_equal = H5Sextent_equal(tmp_space, spaces[i]);
+ VERIFY(ext_equal, TRUE, "H5Sextent_equal");
+
+ /* Verify that the correct number of elements is selected */
+ npoints_ret = H5Sget_select_npoints(tmp_space);
+ VERIFY((hsize_t)npoints_ret, npoints[i], "H5Sget_select_npoints");
+ } /* end for */
+
+ /* Close dataspaces */
+ for (i = 0; i < num_spaces; i++) {
+ ret = H5Sclose(spaces[i]);
+ CHECK(ret, FAIL, "H5Sclose");
+ spaces[i] = -1;
+ } /* end for */
+
+ ret = H5Sclose(tmp_space);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_h5s_extent_copy() */
+
+/****************************************************************
+**
+** test_h5s_bug1(): Test Creating dataspace with H5Screate then
+* setting extent with H5Sextent_copy.
+**
+****************************************************************/
+static void
+test_h5s_bug1(void)
+{
+ hid_t space1; /* Dataspace to copy extent to */
+ hid_t space2; /* Scalar dataspace */
+ hsize_t dims[2] = {10, 10}; /* Dimensions */
+ hsize_t start[2] = {0, 0}; /* Hyperslab start */
+ htri_t select_valid; /* Whether the dataspace selection is valid */
+ herr_t ret; /* Generic error return */
+
+ /* Create dataspaces */
+ space1 = H5Screate(H5S_SIMPLE);
+ CHECK(space1, FAIL, "H5Screate");
+ space2 = H5Screate_simple(2, dims, NULL);
+ CHECK(space2, FAIL, "H5Screate");
+
+ /* Copy extent to space1 */
+ ret = H5Sextent_copy(space1, space2);
+ CHECK(ret, FAIL, "H5Sextent_copy");
+
+ /* Select hyperslab in space1 containing entire extent */
+ ret = H5Sselect_hyperslab(space1, H5S_SELECT_SET, start, NULL, dims, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Check that space1's selection is valid */
+ select_valid = H5Sselect_valid(space1);
+ CHECK(select_valid, FAIL, "H5Sselect_valid");
+ VERIFY(select_valid, TRUE, "H5Sselect_valid result");
+
+ /* Close dataspaces */
+ ret = H5Sclose(space1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(space2);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_h5s_bug1() */
+
+/****************************************************************
+**
+** test_h5s_bug2(): Test combining hyperslabs in a way that used
+** to trip up H5S__hyper_update_diminfo()
+**
+****************************************************************/
+static void
+test_h5s_bug2(void)
+{
+ hid_t space; /* Dataspace to copy extent to */
+ hsize_t dims[2] = {1, 5}; /* Dimensions */
+ hsize_t start[2] = {0, 0}; /* Hyperslab start */
+ hsize_t count[2] = {1, 1}; /* Hyperslab start */
+ htri_t select_valid; /* Whether the dataspace selection is valid */
+ hssize_t elements_selected; /* Number of elements selected */
+ herr_t ret; /* Generic error return */
+
+ /* Create dataspace */
+ space = H5Screate_simple(2, dims, NULL);
+ CHECK(space, FAIL, "H5Screate");
+
+ /* Select hyperslab in space containing first element */
+ ret = H5Sselect_hyperslab(space, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Add hyperslab in space containing last element */
+ start[1] = 4;
+ ret = H5Sselect_hyperslab(space, H5S_SELECT_OR, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Add hyperslab in space containing the first 3 elements */
+ start[1] = 0;
+ count[1] = 3;
+ ret = H5Sselect_hyperslab(space, H5S_SELECT_OR, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Check that space's selection is valid */
+ select_valid = H5Sselect_valid(space);
+ CHECK(select_valid, FAIL, "H5Sselect_valid");
+ VERIFY(select_valid, TRUE, "H5Sselect_valid result");
+
+ /* Check that 4 elements are selected */
+ elements_selected = H5Sget_select_npoints(space);
+ CHECK(elements_selected, FAIL, "H5Sselect_valid");
+ VERIFY(elements_selected, 4, "H5Sselect_valid result");
+
+ /* Close dataspaces */
+ ret = H5Sclose(space);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_h5s_bug2() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_versionbounds
+ *
+ * Purpose: Tests version bounds with dataspace.
+ *
+ * Description:
+ * This function creates a file with lower bounds then later
+ * reopens it with higher bounds to show that the dataspace
+ * version is upgraded appropriately.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ *-------------------------------------------------------------------------
+ */
+#define VERBFNAME "tverbounds_dspace.h5"
+#define BASIC_DSET "Basic Dataset"
+#define LATEST_DSET "Latest Dataset"
+static void
+test_versionbounds(void)
+{
+ hid_t file = -1; /* File ID */
+ hid_t space = -1; /* Dataspace ID */
+ hid_t dset = -1; /* Dataset ID */
+ hid_t fapl = -1; /* File access property list ID */
+ hid_t dset_space = -1; /* Retrieved dataset's dataspace ID */
+ hsize_t dim[1]; /* Dataset dimensions */
+ H5F_libver_t low, high; /* File format bounds */
+#if 0
+ H5S_t *spacep = NULL; /* Pointer to internal dataspace */
+#endif
+ herr_t ret = 0; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Version Bounds\n"));
+
+ /* Create a file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Create dataspace */
+ dim[0] = 10;
+ space = H5Screate_simple(1, dim, NULL);
+ CHECK(space, FAIL, "H5Screate");
+#if 0
+ /* Its version should be H5O_SDSPACE_VERSION_1 */
+ spacep = (H5S_t *)H5I_object(space);
+ CHECK_PTR(spacep, "H5I_object");
+ VERIFY(spacep->extent.version, H5O_SDSPACE_VERSION_1, "basic dataspace version bound");
+#endif
+
+ /* Set high bound to V18 */
+ low = H5F_LIBVER_EARLIEST;
+ high = H5F_LIBVER_V18;
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create the file */
+ file = H5Fcreate(VERBFNAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create a basic dataset */
+ dset = H5Dcreate2(file, BASIC_DSET, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ if (dset > 0) /* dataset created successfully */
+ {
+ /* Get the internal dataspace pointer */
+ dset_space = H5Dget_space(dset);
+ CHECK(dset_space, FAIL, "H5Dget_space");
+#if 0
+ spacep = (H5S_t *)H5I_object(dset_space);
+ CHECK_PTR(spacep, "H5I_object");
+
+ /* Dataspace version should remain as H5O_SDSPACE_VERSION_1 */
+ VERIFY(spacep->extent.version, H5O_SDSPACE_VERSION_1, "basic dataspace version bound");
+#endif
+ /* Close dataspace */
+ ret = H5Sclose(dset_space);
+ CHECK(ret, FAIL, "H5Sclose");
+ }
+
+ /* Close basic dataset and the file */
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Set low and high bounds to latest to trigger the increment of the
+ dataspace version */
+ low = H5F_LIBVER_LATEST;
+ high = H5F_LIBVER_LATEST;
+ ret = H5Pset_libver_bounds(fapl, low, high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Reopen the file with new version bounds, LATEST/LATEST */
+ file = H5Fopen(VERBFNAME, H5F_ACC_RDWR, fapl);
+
+ /* Create another dataset using the same dspace as the previous dataset */
+ dset = H5Dcreate2(file, LATEST_DSET, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ /* Dataset created successfully. Verify that dataspace version has been
+ upgraded per the low bound */
+
+ /* Get the internal dataspace pointer */
+ dset_space = H5Dget_space(dset);
+ CHECK(dset_space, FAIL, "H5Dget_space");
+#if 0
+ spacep = (H5S_t *)H5I_object(dset_space);
+ CHECK_PTR(spacep, "H5I_object");
+
+ /* Verify the dataspace version */
+ VERIFY(spacep->extent.version, H5O_sdspace_ver_bounds[low], "upgraded dataspace version");
+#endif
+ /* Close everything */
+ ret = H5Sclose(dset_space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_versionbounds() */
+
+/****************************************************************
+**
+** test_h5s(): Main H5S (dataspace) testing routine.
+**
+****************************************************************/
+void
+test_h5s(void)
+{
+ H5F_libver_t low, high; /* Low and high bounds */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataspaces\n"));
+
+ test_h5s_basic(); /* Test basic H5S code */
+ test_h5s_null(); /* Test Null dataspace H5S code */
+ test_h5s_zero_dim(); /* Test dataspace with zero dimension size */
+#if 0
+ /* Loop through all the combinations of low/high version bounds */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+
+ /* Invalid combinations, just continue */
+ if (high == H5F_LIBVER_EARLIEST || high < low)
+ continue;
+#else
+ low = H5F_LIBVER_LATEST;
+ high = H5F_LIBVER_LATEST;
+#endif
+ test_h5s_encode(low, high); /* Test encoding and decoding */
+ test_h5s_encode_regular_hyper(low, high); /* Test encoding regular hyperslabs */
+ test_h5s_encode_irregular_hyper(low, high); /* Test encoding irregular hyperslabs */
+ test_h5s_encode_points(low, high); /* Test encoding points */
+#if 0
+ } /* end high bound */
+ } /* end low bound */
+#endif
+ test_h5s_encode_length(); /* Test version 2 hyperslab encoding length is correct */
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ test_h5s_encode1(); /* Test operations with old API routine (H5Sencode1) */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+ test_h5s_scalar_write(); /* Test scalar H5S writing code */
+ test_h5s_scalar_read(); /* Test scalar H5S reading code */
+
+ test_h5s_compound_scalar_write(); /* Test compound datatype scalar H5S writing code */
+ test_h5s_compound_scalar_read(); /* Test compound datatype scalar H5S reading code */
+
+ /* This test was added later to exercise a bug in chunked I/O */
+ test_h5s_chunk(); /* Exercise bug fix for chunked I/O */
+
+ test_h5s_extent_equal(); /* Test extent comparison code */
+ test_h5s_extent_copy(); /* Test extent copy code */
+ test_h5s_bug1(); /* Test bug in offset initialization */
+ test_h5s_bug2(); /* Test bug found in H5S__hyper_update_diminfo() */
+ test_versionbounds(); /* Test version bounds with dataspace */
+} /* test_h5s() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_h5s
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Albert Cheng
+ * July 2, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_h5s(void)
+{
+ H5Fdelete(DATAFILE, H5P_DEFAULT);
+ H5Fdelete(NULLFILE, H5P_DEFAULT);
+ H5Fdelete(BASICFILE, H5P_DEFAULT);
+ H5Fdelete(ZEROFILE, H5P_DEFAULT);
+ H5Fdelete(VERBFNAME, H5P_DEFAULT);
+}
diff --git a/test/API/tid.c b/test/API/tid.c
new file mode 100644
index 0000000..2dd8851
--- /dev/null
+++ b/test/API/tid.c
@@ -0,0 +1,1413 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Test user-created identifiers (hid_t's) and identifier types. */
+
+#include "testhdf5.h"
+
+#if 0
+/* Include H5Ipkg.h to calculate max number of groups */
+#define H5I_FRIEND /*suppress error about including H5Ipkg */
+#include "H5Ipkg.h"
+#endif
+
+/*
+ * Number of bits to use for ID Type in each ID. Increase if more types
+ * are needed (though this will decrease the number of available IDs per
+ * type). This is the only number that must be changed since all other bit
+ * field sizes and masks are calculated from TYPE_BITS.
+ */
+#define TYPE_BITS 7
+#define TYPE_MASK (((hid_t)1 << TYPE_BITS) - 1)
+
+#define H5I_MAX_NUM_TYPES TYPE_MASK
+
+static herr_t
+free_wrapper(void *p, void H5_ATTR_UNUSED **_ctx)
+{
+ HDfree(p);
+ return SUCCEED;
+}
+
+/* Test basic functionality of registering and deleting types and IDs */
+static int
+basic_id_test(void)
+{
+ H5I_type_t myType = H5I_BADID;
+ hid_t arrayID = H5I_INVALID_HID;
+ void *testObj = NULL;
+ void *testPtr = NULL;
+ char nameString[10];
+ hid_t testID;
+ ssize_t testSize = -1;
+ herr_t err;
+ int num_ref;
+ hsize_t num_members;
+
+ /* Try to register an ID with fictitious types */
+ H5E_BEGIN_TRY
+ arrayID = H5Iregister((H5I_type_t)420, testObj);
+ H5E_END_TRY
+
+ VERIFY(arrayID, H5I_INVALID_HID, "H5Iregister");
+ if (arrayID != H5I_INVALID_HID)
+ goto out;
+
+ H5E_BEGIN_TRY
+ arrayID = H5Iregister((H5I_type_t)-1, testObj);
+ H5E_END_TRY
+
+ VERIFY(arrayID, H5I_INVALID_HID, "H5Iregister");
+ if (arrayID != H5I_INVALID_HID)
+ goto out;
+
+ /* Try to access IDs with fictitious types */
+ H5E_BEGIN_TRY
+ testPtr = H5Iobject_verify((hid_t)100, (H5I_type_t)0);
+ H5E_END_TRY
+
+ CHECK_PTR_NULL(testPtr, "H5Iobject_verify");
+ if (testPtr != NULL)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testPtr = H5Iobject_verify((hid_t)700, (H5I_type_t)700);
+ H5E_END_TRY
+
+ CHECK_PTR_NULL(testPtr, "H5Iobject_verify");
+ if (testPtr != NULL)
+ goto out;
+
+ /* Register a type */
+ myType = H5Iregister_type((size_t)64, 0, free_wrapper);
+
+ CHECK(myType, H5I_BADID, "H5Iregister_type");
+ if (myType == H5I_BADID)
+ goto out;
+
+ /* Register an ID and retrieve the object it points to.
+ * Once the ID has been registered, testObj will be freed when
+ * its ID type is destroyed.
+ */
+ testObj = HDmalloc(7 * sizeof(int));
+ arrayID = H5Iregister(myType, testObj);
+
+ CHECK(arrayID, H5I_INVALID_HID, "H5Iregister");
+ if (arrayID == H5I_INVALID_HID) {
+ HDfree(testObj);
+ goto out;
+ }
+
+ testPtr = (int *)H5Iobject_verify(arrayID, myType);
+
+ CHECK_PTR_EQ(testPtr, testObj, "H5Iobject_verify");
+ if (testPtr != testObj)
+ goto out;
+
+ /* Ensure that H5Iget_file_id and H5Iget_name() fail, since this
+ * is an hid_t for the wrong kind of object
+ */
+ H5E_BEGIN_TRY
+ testID = H5Iget_file_id(arrayID);
+ H5E_END_TRY
+
+ VERIFY(testID, H5I_INVALID_HID, "H5Iget_file_id");
+ if (testID != H5I_INVALID_HID)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testSize = H5Iget_name(arrayID, nameString, (size_t)9);
+ H5E_END_TRY
+
+ VERIFY(testSize, -1, "H5Iget_name");
+ if (testSize != -1)
+ goto out;
+
+ /* Make sure H5Iremove_verify catches objects of the wrong type */
+ H5E_BEGIN_TRY
+ testPtr = (int *)H5Iremove_verify(arrayID, (H5I_type_t)0);
+ H5E_END_TRY
+
+ CHECK_PTR_NULL(testPtr, "H5Iremove_verify");
+ if (testPtr != NULL)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testPtr = (int *)H5Iremove_verify(arrayID, (H5I_type_t)((int)myType - 1));
+ H5E_END_TRY
+
+ CHECK_PTR_NULL(testPtr, "H5Iremove_verify");
+ if (testPtr != NULL)
+ goto out;
+
+ /* Remove an ID and make sure we can't access it */
+ testPtr = (int *)H5Iremove_verify(arrayID, myType);
+
+ CHECK_PTR(testPtr, "H5Iremove_verify");
+ if (testPtr == NULL)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testPtr = (int *)H5Iobject_verify(arrayID, myType);
+ H5E_END_TRY
+
+ CHECK_PTR_NULL(testPtr, "H5Iobject_verify");
+ if (testPtr != NULL)
+ goto out;
+
+ /* Delete the type and make sure we can't access objects within it */
+ arrayID = H5Iregister(myType, testObj);
+
+ err = H5Idestroy_type(myType);
+ VERIFY(err, 0, "H5Idestroy_type");
+ if (err != 0)
+ goto out;
+ VERIFY(H5Itype_exists(myType), 0, "H5Itype_exists");
+ if (H5Itype_exists(myType) != 0)
+ goto out;
+
+ H5E_BEGIN_TRY
+ VERIFY(H5Inmembers(myType, NULL), -1, "H5Inmembers");
+ if (H5Inmembers(myType, NULL) != -1)
+ goto out;
+ H5E_END_TRY
+
+ /* Register another type and another object in that type */
+ myType = H5Iregister_type((size_t)64, 0, free_wrapper);
+
+ CHECK(myType, H5I_BADID, "H5Iregister_type");
+ if (myType == H5I_BADID)
+ goto out;
+
+ /* The memory that testObj pointed to should already have been
+ * freed when the previous type was destroyed. Allocate new
+ * memory for it.
+ */
+ testObj = HDmalloc(7 * sizeof(int));
+ arrayID = H5Iregister(myType, testObj);
+
+ CHECK(arrayID, H5I_INVALID_HID, "H5Iregister");
+ if (arrayID == H5I_INVALID_HID) {
+ HDfree(testObj);
+ goto out;
+ }
+
+ err = H5Inmembers(myType, &num_members);
+ CHECK(err, -1, "H5Inmembers");
+ if (err < 0)
+ goto out;
+ VERIFY(num_members, 1, "H5Inmembers");
+ if (num_members != 1)
+ goto out;
+
+ /* Increment references to type and ensure that dec_type_ref
+ * doesn't destroy the type
+ */
+ num_ref = H5Iinc_type_ref(myType);
+ VERIFY(num_ref, 2, "H5Iinc_type_ref");
+ if (num_ref != 2)
+ goto out;
+ num_ref = H5Idec_type_ref(myType);
+ VERIFY(num_ref, 1, "H5Idec_type_ref");
+ if (num_ref != 1)
+ goto out;
+ err = H5Inmembers(myType, &num_members);
+ CHECK(err, -1, "H5Inmembers");
+ if (err < 0)
+ goto out;
+ VERIFY(num_members, 1, "H5Inmembers");
+ if (num_members != 1)
+ goto out;
+
+ /* This call to dec_type_ref should destroy the type */
+ num_ref = H5Idec_type_ref(myType);
+ VERIFY(num_ref, 0, "H5Idec_type_ref");
+ if (num_ref != 0)
+ goto out;
+ VERIFY(H5Itype_exists(myType), 0, "H5Itype_exists");
+ if (H5Itype_exists(myType) != 0)
+ goto out;
+
+ H5E_BEGIN_TRY
+ err = H5Inmembers(myType, &num_members);
+ if (err >= 0)
+ goto out;
+ H5E_END_TRY
+
+ return 0;
+
+out:
+ /* Clean up type if it has been allocated and free memory used
+ * by testObj
+ */
+ if (myType >= 0)
+ H5Idestroy_type(myType);
+
+ return -1;
+}
+
+/* A dummy search function for the next test */
+static int
+test_search_func(void H5_ATTR_UNUSED *ptr1, hid_t H5_ATTR_UNUSED id, void H5_ATTR_UNUSED *ptr2)
+{
+ return 0;
+}
+
+/* Ensure that public functions cannot access "predefined" ID types */
+static int
+id_predefined_test(void)
+{
+ void *testObj;
+ hid_t testID;
+ hid_t typeID = H5I_INVALID_HID;
+ void *testPtr;
+ herr_t testErr;
+
+ testObj = HDmalloc(sizeof(int));
+
+ /*
+ * Attempt to perform public functions on various library types
+ */
+
+ H5E_BEGIN_TRY
+ testID = H5Iregister(H5I_FILE, testObj);
+ H5E_END_TRY
+
+ VERIFY(testID, H5I_INVALID_HID, "H5Iregister");
+ if (testID != H5I_INVALID_HID)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testPtr = H5Isearch(H5I_GENPROP_LST, test_search_func, testObj);
+ H5E_END_TRY
+
+ CHECK_PTR_NULL(testPtr, "H5Isearch");
+ if (testPtr != NULL)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testErr = H5Inmembers(H5I_ERROR_STACK, NULL);
+ H5E_END_TRY
+
+ VERIFY(testErr, -1, "H5Inmembers");
+ if (testErr != -1)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testErr = H5Iclear_type(H5I_FILE, 0);
+ H5E_END_TRY
+
+ VERIFY((testErr >= 0), 0, "H5Iclear_type");
+ if (testErr >= 0)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testErr = H5Idestroy_type(H5I_DATASET);
+ H5E_END_TRY
+
+ VERIFY((testErr >= 0), 0, "H5Idestroy_type");
+ if (testErr >= 0)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testErr = H5Itype_exists(H5I_GROUP);
+ H5E_END_TRY
+
+ VERIFY(testErr, -1, "H5Itype_exists");
+ if (testErr != -1)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testErr = H5Itype_exists(H5I_ATTR);
+ H5E_END_TRY
+
+ VERIFY(testErr, -1, "H5Itype_exists");
+ if (testErr != -1)
+ goto out;
+
+ /*
+ * Create a datatype ID and try to perform illegal functions on it
+ */
+
+ typeID = H5Tcreate(H5T_OPAQUE, (size_t)42);
+ CHECK(typeID, H5I_INVALID_HID, "H5Tcreate");
+ if (typeID == H5I_INVALID_HID)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testPtr = H5Iremove_verify(typeID, H5I_DATATYPE);
+ H5E_END_TRY
+
+ CHECK_PTR_NULL(testPtr, "H5Iremove_verify");
+ if (testPtr != NULL)
+ goto out;
+
+ H5E_BEGIN_TRY
+ testPtr = H5Iobject_verify(typeID, H5I_DATATYPE);
+ H5E_END_TRY
+
+ CHECK_PTR_NULL(testPtr, "H5Iobject_verify");
+ if (testPtr != NULL)
+ goto out;
+
+ H5Tclose(typeID);
+
+ /* testObj was never registered as an atom, so it will not be
+ * automatically freed. */
+ HDfree(testObj);
+ return 0;
+
+out:
+ if (typeID != H5I_INVALID_HID)
+ H5Tclose(typeID);
+ if (testObj != NULL)
+ HDfree(testObj);
+
+ return -1;
+}
+
+/* Test the H5Iis_valid function */
+static int
+test_is_valid(void)
+{
+ hid_t dtype; /* datatype id */
+#if 0
+ int64_t nmembs1; /* number of type memnbers */
+ int64_t nmembs2;
+#endif
+ htri_t tri_ret; /* htri_t return value */
+#if 0
+ herr_t ret; /* return value */
+#endif
+
+ /* Create a datatype id */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ if (dtype < 0)
+ goto out;
+
+ /* Check that the ID is valid */
+ tri_ret = H5Iis_valid(dtype);
+ VERIFY(tri_ret, TRUE, "H5Iis_valid");
+ if (tri_ret != TRUE)
+ goto out;
+#if 0 /* Cannot call internal APIs and cannot call public H5Inmembers on library types */
+ /* Artificially manipulate the reference counts so app_count is 0, and dtype
+ * appears to be an internal id. This takes advantage of the fact that
+ * H5Ipkg is included.
+ */
+ ret = H5I_inc_ref(dtype, FALSE);
+ CHECK(ret, FAIL, "H5I_inc_ref");
+ if (ret < 0)
+ goto out;
+ ret = H5I_dec_app_ref(dtype);
+ CHECK(ret, FAIL, "H5I_dec_ref");
+ if (ret < 0)
+ goto out;
+
+ /* Check that dtype is invalid */
+ tri_ret = H5Iis_valid(dtype);
+ VERIFY(tri_ret, FALSE, "H5Iis_valid");
+ if (tri_ret != FALSE)
+ goto out;
+
+ /* Close dtype and verify that it has been closed */
+ nmembs1 = H5I_nmembers(H5I_DATATYPE);
+ CHECK(nmembs1, FAIL, "H5I_nmembers");
+ if (nmembs1 < 0)
+ goto out;
+ ret = H5I_dec_ref(dtype);
+ CHECK(ret, FAIL, "H5I_dec_ref");
+ if (ret < 0)
+ goto out;
+ nmembs2 = H5I_nmembers(H5I_DATATYPE);
+ VERIFY(nmembs2, nmembs1 - 1, "H5I_nmembers");
+ if (nmembs2 != nmembs1 - 1)
+ goto out;
+
+ /* Check that dtype is invalid */
+ tri_ret = H5Iis_valid(dtype);
+ VERIFY(tri_ret, FALSE, "H5Iis_valid");
+ if (tri_ret != FALSE)
+ goto out;
+#endif
+ /* Check that an id of -1 is invalid */
+ tri_ret = H5Iis_valid((hid_t)-1);
+ VERIFY(tri_ret, FALSE, "H4Iis_valid");
+ if (tri_ret != FALSE)
+ goto out;
+
+ return 0;
+
+out:
+ /* Don't attempt to close dtype as we don't know the exact state of the
+ * reference counts. Every state in this function will be automatically
+ * closed at library exit anyways, as internal count is never > 1.
+ */
+ return -1;
+}
+
+/* Test the H5Iget_type function */
+static int
+test_get_type(void)
+{
+ hid_t dtype; /* datatype id */
+ H5I_type_t type_ret; /* return value */
+
+ /* Create a datatype id */
+ dtype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ if (dtype < 0)
+ goto out;
+
+ /* Check that the ID is correct */
+ type_ret = H5Iget_type(dtype);
+ VERIFY(type_ret, H5I_DATATYPE, "H5Iget_type");
+ if (type_ret == H5I_BADID)
+ goto out;
+
+ /* Check that the ID is correct */
+ type_ret = H5Iget_type((hid_t)H5T_STRING);
+ VERIFY(type_ret, H5I_BADID, "H5Iget_type");
+ if (type_ret != H5I_BADID)
+ goto out;
+
+ /* Check that the ID is correct */
+ type_ret = H5Iget_type((hid_t)-1);
+ VERIFY(type_ret, H5I_BADID, "H5Iget_type");
+ if (type_ret != H5I_BADID)
+ goto out;
+
+ H5Tclose(dtype);
+
+ return 0;
+
+out:
+ if (dtype != H5I_INVALID_HID)
+ H5Tclose(dtype);
+
+ return -1;
+}
+
+/* Test boundary cases with lots of types */
+
+/* Type IDs range from H5I_NTYPES to H5I_MAX_NUM_TYPES. The system will assign */
+/* IDs in sequential order until H5I_MAX_NUM_TYPES IDs have been given out, at which */
+/* point it will search for type IDs that were allocated but have since been */
+/* deleted. */
+/* This test will allocate IDs up to H5I_MAX_NUM_TYPES, ensure that IDs wrap around */
+/* to low values successfully, ensure that an error is thrown when all possible */
+/* type IDs are taken, then ensure that deleting types frees up their IDs. */
+/* Note that this test depends on the implementation of IDs, so may break */
+/* if the implementation changes. */
+/* Also note that if someone else registered a user-defined type and forgot to */
+/* destroy it, this test will mysteriously fail (because it will expect there to */
+/* be one more "free" type ID than there is). */
+/* H5I_NTYPES is defined in h5public.h, H5I_MAX_NUM_TYPES is defined in h5pkg.h */
+static int
+test_id_type_list(void)
+{
+ H5I_type_t startType; /* The first type ID we were assigned in this test */
+ H5I_type_t currentType;
+ H5I_type_t testType;
+ int i; /* Just a counter variable */
+
+ startType = H5Iregister_type((size_t)8, 0, free_wrapper);
+ CHECK(startType, H5I_BADID, "H5Iregister_type");
+ if (startType == H5I_BADID)
+ goto out;
+
+ /* Sanity check */
+ if ((int)startType >= H5I_MAX_NUM_TYPES || startType < H5I_NTYPES) {
+ /* Error condition, throw an error */
+ ERROR("H5Iregister_type");
+ goto out;
+ }
+ /* Create types up to H5I_MAX_NUM_TYPES */
+ for (i = startType + 1; i < H5I_MAX_NUM_TYPES; i++) {
+ currentType = H5Iregister_type((size_t)8, 0, free_wrapper);
+ CHECK(currentType, H5I_BADID, "H5Iregister_type");
+ if (currentType == H5I_BADID)
+ goto out;
+ }
+
+ /* Wrap around to low type ID numbers */
+ for (i = H5I_NTYPES; i < startType; i++) {
+ currentType = H5Iregister_type((size_t)8, 0, free_wrapper);
+ CHECK(currentType, H5I_BADID, "H5Iregister_type");
+ if (currentType == H5I_BADID)
+ goto out;
+ }
+
+ /* There should be no room at the inn for a new ID type*/
+ H5E_BEGIN_TRY
+ testType = H5Iregister_type((size_t)8, 0, free_wrapper);
+ H5E_END_TRY
+
+ VERIFY(testType, H5I_BADID, "H5Iregister_type");
+ if (testType != H5I_BADID)
+ goto out;
+
+ /* Now delete a type and try to insert again */
+ H5Idestroy_type(H5I_NTYPES);
+ testType = H5Iregister_type((size_t)8, 0, free_wrapper);
+
+ VERIFY(testType, H5I_NTYPES, "H5Iregister_type");
+ if (testType != H5I_NTYPES)
+ goto out;
+
+ /* Cleanup. Destroy all types. */
+ for (i = H5I_NTYPES; i < H5I_MAX_NUM_TYPES; i++)
+ H5Idestroy_type((H5I_type_t)i);
+
+ return 0;
+
+out:
+ /* Cleanup. For simplicity, just destroy all types and ignore errors. */
+ H5E_BEGIN_TRY
+ for (i = H5I_NTYPES; i < H5I_MAX_NUM_TYPES; i++)
+ H5Idestroy_type((H5I_type_t)i);
+ H5E_END_TRY
+ return -1;
+}
+
+/* Test removing ids in callback for H5Iclear_type */
+
+/* There was a rare bug where, if an id free callback being called by
+ * H5I_clear_type() removed another id in that type, a segfault could occur.
+ * This test tests for that error (and freeing ids "out of order" within
+ * H5Iclear_type() in general).
+ *
+ * NB: RCT = "remove clear type"
+ */
+
+/* Macro definitions */
+#define RCT_MAX_NOBJS 25 /* Maximum number of objects in the list */
+#define RCT_MIN_NOBJS 5
+#define RCT_NITER 50 /* Number of times we cycle through object creation and deletion */
+
+/* Structure to hold the master list of objects */
+typedef struct rct_obj_list_t {
+
+ /* Pointer to the objects */
+ struct rct_obj_t *objects;
+
+ /* The number of objects in the list */
+ long count;
+
+ /* The number of objects in the list that have not been freed */
+ long remaining;
+} rct_obj_list_t;
+
+/* Structure for an object */
+typedef struct rct_obj_t {
+ /* The ID for this object */
+ hid_t id;
+
+ /* The number of times this object has been freed */
+ int nfrees;
+
+ /* Whether we are currently freeing this object directly
+ * through H5Idec_ref().
+ */
+ hbool_t freeing;
+
+ /* Pointer to the master list of all objects */
+ rct_obj_list_t *list;
+} rct_obj_t;
+
+/* Free callback passed to H5Iclear_type()
+ *
+ * When invoked on a closing object, frees a random unfreed ID in the
+ * master list of objects.
+ */
+static herr_t
+rct_free_cb(void *_obj, void H5_ATTR_UNUSED **_ctx)
+{
+ rct_obj_t *obj = (rct_obj_t *)_obj;
+ long remove_nth;
+ long i;
+ herr_t ret;
+
+ /* Mark this object as freed */
+ obj->nfrees++;
+
+ /* Decrement the number of objects in the list that have not been freed */
+ obj->list->remaining--;
+
+ /* If this object isn't already being freed by a callback free call and
+ * the master object list still contains objects to free, pick another
+ * object and free it.
+ */
+ if (!obj->freeing && (obj->list->remaining > 0)) {
+
+ /* Pick a random object from the list. This is done by picking a
+ * random number between 0 and the # of remaining unfreed objects
+ * and then scanning through the list to find that nth unfreed
+ * object.
+ */
+ remove_nth = HDrandom() % obj->list->remaining;
+ for (i = 0; i < obj->list->count; i++)
+ if (obj->list->objects[i].nfrees == 0) {
+ if (remove_nth == 0)
+ break;
+ else
+ remove_nth--;
+ }
+
+ /* Badness if we scanned through the list and didn't manage to
+ * select one to delete (the list stats were probably updated
+ * incorrectly).
+ */
+ if (i == obj->list->count) {
+ ERROR("invalid obj_list");
+ goto error;
+ }
+
+ /* Mark the object we're about to free so its own callback does
+ * not free another object. We don't want to recursively free the
+ * entire list when we free the first ID.
+ */
+ obj->list->objects[i].freeing = TRUE;
+
+ /* Decrement the reference count on the object */
+ ret = H5Idec_ref(obj->list->objects[i].id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (ret == FAIL)
+ goto error;
+
+ /* Unset the "freeing" flag */
+ obj->list->objects[i].freeing = FALSE;
+ }
+
+ /* Verify the number of objects remaining in the master list is non-negative */
+ if (obj->list->remaining < 0) {
+ ERROR("invalid number of objects remaining");
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return -1;
+} /* end rct_free_cb() */
+
+/* Test function */
+static int
+test_remove_clear_type(void)
+{
+ H5I_type_t obj_type;
+ rct_obj_list_t obj_list;
+ rct_obj_t *objects = NULL; /* Convenience pointer to objects stored in master list */
+ size_t list_size;
+ long i, j;
+ herr_t ret; /* return value */
+
+ /* Register a user-defined type with our custom ID-deleting callback */
+ obj_type = H5Iregister_type((size_t)8, 0, rct_free_cb);
+ CHECK(obj_type, H5I_BADID, "H5Iregister_type");
+ if (obj_type == H5I_BADID)
+ goto error;
+
+ /* Create an array to hold the objects in the master list */
+ list_size = RCT_MAX_NOBJS * sizeof(rct_obj_t);
+ obj_list.objects = HDmalloc(list_size);
+ CHECK_PTR(obj_list.objects, "HDcalloc");
+ if (NULL == obj_list.objects)
+ goto error;
+
+ /* Set a convenience pointer to the object array */
+ objects = obj_list.objects;
+
+ for (i = 0; i < RCT_NITER; i++) {
+
+ /* The number of members in the type, according to the HDF5 library */
+ hsize_t nmembers = 1234567; /* (init to fake number) */
+
+ /* The number of objects found while scanning through the object list */
+ int found;
+
+ /*********************
+ * Build object list *
+ *********************/
+
+ HDmemset(obj_list.objects, 0, list_size);
+
+ /* The number of objects used is a random number between the min and max */
+ obj_list.count = obj_list.remaining =
+ RCT_MIN_NOBJS + (HDrandom() % (long)(RCT_MAX_NOBJS - RCT_MIN_NOBJS + 1));
+
+ /* Create the actual objects */
+ for (j = 0; j < obj_list.count; j++) {
+
+ /* Object setup */
+ objects[j].nfrees = 0;
+ objects[j].freeing = FALSE;
+ objects[j].list = &obj_list;
+
+ /* Register an ID for it */
+ objects[j].id = H5Iregister(obj_type, &objects[j]);
+ CHECK(objects[j].id, FAIL, "H5Iregister");
+ if (objects[j].id == FAIL)
+ goto error;
+
+ /* Bump the reference count by 1 (to 2) 50% of the time */
+ if (HDrandom() % 2) {
+ ret = H5Iinc_ref(objects[j].id);
+ CHECK(ret, FAIL, "H5Iinc_ref");
+ if (ret == FAIL)
+ goto error;
+ }
+ }
+
+ /******************************************
+ * Clear the type with force set to FALSE *
+ ******************************************/
+
+ /* Clear the type. Since force is FALSE, only
+ * IDs with a reference count of 1 will be cleared.
+ */
+ ret = H5Iclear_type(obj_type, FALSE);
+ CHECK(ret, FAIL, "H5Iclear_type");
+ if (ret == FAIL)
+ goto error;
+
+ /* Verify that the object struct fields are sane and count the
+ * number of unfreed objects
+ */
+ found = 0;
+ for (j = 0; j < obj_list.count; j++) {
+
+ if (objects[j].nfrees == 0) {
+ /* Count unfreed objects */
+ found++;
+ }
+ else {
+ /* Every freed object should have been freed exactly once */
+ VERIFY(objects[j].nfrees, 1, "object freed more than once");
+ if (objects[j].nfrees != 1)
+ goto error;
+ }
+
+ /* No object should still be marked as "freeing" */
+ VERIFY(objects[j].freeing, FALSE, "object marked as freeing");
+ if (objects[j].freeing != FALSE)
+ goto error;
+ }
+
+ /* Verify the number of unfreed objects we found during our scan
+ * matches the number stored in the list
+ */
+ VERIFY(obj_list.remaining, found, "incorrect number of objects remaining");
+ if (obj_list.remaining != found)
+ goto error;
+
+ /* Make sure the HDF5 library confirms our count */
+ ret = H5Inmembers(obj_type, &nmembers);
+ CHECK(ret, FAIL, "H5Inmembers");
+ if (ret == FAIL)
+ goto error;
+ VERIFY(nmembers, found, "The number of members remaining in the type did not match our count");
+ if (nmembers != (hsize_t)found)
+ goto error;
+
+ /*****************************************
+ * Clear the type with force set to TRUE *
+ *****************************************/
+
+ /* Clear the type. Since force is TRUE, all IDs will be cleared. */
+ ret = H5Iclear_type(obj_type, TRUE);
+ CHECK(ret, FAIL, "H5Iclear_type");
+ if (ret == FAIL)
+ goto error;
+
+ /* Verify that the object struct fields are sane */
+ for (j = 0; j < obj_list.count; j++) {
+
+ /* Every object should have been freed exactly once */
+ VERIFY(objects[j].nfrees, 1, "object freed more than once");
+ if (objects[j].nfrees != 1)
+ goto error;
+
+ /* No object should still be marked as "freeing" */
+ VERIFY(objects[j].freeing, FALSE, "object marked as freeing");
+ if (objects[j].freeing != FALSE)
+ goto error;
+ }
+
+ /* Verify the number of objects is 0 */
+ VERIFY(obj_list.remaining, 0, "objects remaining was not zero");
+ if (obj_list.remaining != 0)
+ goto error;
+
+ /* Make sure the HDF5 library confirms zero members in the type */
+ ret = H5Inmembers(obj_type, &nmembers);
+ CHECK(ret, FAIL, "H5Inmembers");
+ if (ret == FAIL)
+ goto error;
+ VERIFY(nmembers, 0, "The number of members remaining in the type was not zero");
+ if (nmembers != 0)
+ goto error;
+ }
+
+ /* Destroy the type */
+ ret = H5Idestroy_type(obj_type);
+ CHECK(ret, FAIL, "H5Idestroy_type");
+ if (ret == FAIL)
+ goto error;
+
+ /* Free the object array */
+ HDfree(obj_list.objects);
+
+ return 0;
+
+error:
+ /* Cleanup. For simplicity, just destroy the types and ignore errors. */
+ H5E_BEGIN_TRY
+ {
+ H5Idestroy_type(obj_type);
+ }
+ H5E_END_TRY
+
+ HDfree(obj_list.objects);
+
+ return -1;
+} /* end test_remove_clear_type() */
+
+#if defined(H5VL_VERSION) && H5VL_VERSION >= 2
+/* Typedef for future objects */
+typedef struct {
+ H5I_type_t obj_type; /* ID type for actual object */
+} future_obj_t;
+
+/* Global (static) future ID object type */
+H5I_type_t future_obj_type_g = H5I_BADID;
+
+/* Callback to free the actual object for future object test */
+static herr_t
+free_actual_object(void *_p, void H5_ATTR_UNUSED **_ctx)
+{
+ int *p = (int *)_p;
+
+ if (7 != *p)
+ return FAIL;
+
+ HDfree(p);
+
+ return SUCCEED;
+}
+
+/* Callback to realize a future object */
+static herr_t
+realize_future_cb(void *_future_obj, hid_t *actual_id)
+{
+ future_obj_t *future_obj = (future_obj_t *)_future_obj; /* Future object */
+ int *actual_obj; /* Pointer to the actual object */
+
+ /* Check for bad future object */
+ if (NULL == future_obj)
+ return FAIL;
+
+ /* Determine type of object to realize */
+ if (H5I_DATASPACE == future_obj->obj_type) {
+ hsize_t dims = 13;
+
+ if ((*actual_id = H5Screate_simple(1, &dims, NULL)) < 0)
+ return FAIL;
+ }
+ else if (H5I_DATATYPE == future_obj->obj_type) {
+ if ((*actual_id = H5Tcopy(H5T_NATIVE_INT)) < 0)
+ return FAIL;
+ }
+ else if (H5I_GENPROP_LST == future_obj->obj_type) {
+ if ((*actual_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ return FAIL;
+ }
+ else {
+ /* Create a new object (the 'actual object') of the correct type */
+ if (NULL == (actual_obj = HDmalloc(sizeof(int))))
+ return FAIL;
+ *actual_obj = 7;
+
+ /* Register actual object of the user-defined type */
+ *actual_id = H5Iregister(future_obj->obj_type, actual_obj);
+ CHECK(*actual_id, FAIL, "H5Iregister");
+ if (*actual_id == FAIL)
+ return FAIL;
+ }
+
+ return SUCCEED;
+}
+
+/* Callback to discard a future object */
+static herr_t
+discard_future_cb(void *future_obj)
+{
+ if (NULL == future_obj)
+ return FAIL;
+
+ HDfree(future_obj);
+
+ return SUCCEED;
+}
+
+/* Callback to realize a future object when future objects are NULL*/
+static herr_t
+realize_future_generate_cb(void *_future_obj, hid_t *actual_id)
+{
+ future_obj_t *future_obj = (future_obj_t *)_future_obj; /* Future object */
+ int *actual_obj; /* Pointer to the actual object */
+
+ if (NULL != future_obj)
+ return FAIL;
+ /* Create a new object (the 'actual object') of the correct type */
+ if (NULL == (actual_obj = HDmalloc(sizeof(int))))
+ return FAIL;
+ *actual_obj = 7;
+
+ /* Register actual object without using future object info */
+ *actual_id = H5Iregister(future_obj_type_g, actual_obj);
+ CHECK(*actual_id, FAIL, "H5Iregister");
+ if (*actual_id == FAIL)
+ return FAIL;
+
+ return SUCCEED;
+}
+
+/* Callback to discard a future object when future objects are NULL */
+static herr_t
+discard_future_generate_cb(void *future_obj)
+{
+ if (NULL != future_obj)
+ return FAIL;
+
+ return SUCCEED;
+}
+
+/* Test function */
+static int
+test_future_ids(void)
+{
+ H5I_type_t obj_type; /* New user-defined ID type */
+ hid_t future_id; /* ID for future object */
+ int fake_future_obj; /* "Fake" future object for tests */
+ future_obj_t *future_obj; /* Future object */
+ int *actual_obj; /* Actual object */
+ int *actual_obj2; /* Another actual object */
+ H5I_type_t id_type; /* Type of ID */
+ H5T_class_t type_class; /* Datatype class */
+ herr_t ret; /* Return value */
+
+ /* Register a user-defined type with our custom ID-deleting callback */
+ obj_type = H5Iregister_type((size_t)15, 0, free_actual_object);
+ CHECK(obj_type, H5I_BADID, "H5Iregister_type");
+ if (H5I_BADID == obj_type)
+ goto error;
+
+ /* Test basic error conditions */
+ fake_future_obj = 0;
+ H5E_BEGIN_TRY
+ {
+ future_id = H5Iregister_future(obj_type, &fake_future_obj, NULL, NULL);
+ }
+ H5E_END_TRY
+ VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID != future_id)
+ goto error;
+
+ H5E_BEGIN_TRY
+ {
+ future_id = H5Iregister_future(obj_type, &fake_future_obj, realize_future_cb, NULL);
+ }
+ H5E_END_TRY
+ VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID != future_id)
+ goto error;
+
+ H5E_BEGIN_TRY
+ {
+ future_id = H5Iregister_future(obj_type, &fake_future_obj, NULL, discard_future_cb);
+ }
+ H5E_END_TRY
+ VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID != future_id)
+ goto error;
+
+ H5E_BEGIN_TRY
+ {
+ future_id = H5Iregister_future(H5I_BADID, &fake_future_obj, realize_future_cb, discard_future_cb);
+ }
+ H5E_END_TRY
+ VERIFY(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID != future_id)
+ goto error;
+
+ /* Test base use-case: create a future object and destroy type without
+ * realizing the future object.
+ */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = obj_type;
+ future_id = H5Iregister_future(obj_type, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* Destroy the type */
+ ret = H5Idestroy_type(obj_type);
+ CHECK(ret, FAIL, "H5Idestroy_type");
+ if (FAIL == ret)
+ goto error;
+
+ /* Re-register a user-defined type with our custom ID-deleting callback */
+ obj_type = H5Iregister_type((size_t)15, 0, free_actual_object);
+ CHECK(obj_type, H5I_BADID, "H5Iregister_type");
+ if (H5I_BADID == obj_type)
+ goto error;
+
+ /* Test base use-case: create a future object and realize the actual object. */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = obj_type;
+ future_id = H5Iregister_future(obj_type, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ actual_obj = H5Iobject_verify(future_id, obj_type);
+ CHECK_PTR(actual_obj, "H5Iobject_verify");
+ if (NULL == actual_obj)
+ goto error;
+ VERIFY(*actual_obj, 7, "H5Iobject_verify");
+ if (7 != *actual_obj)
+ goto error;
+
+ /* Retrieve the object again and verify that it's the same actual object */
+ actual_obj2 = H5Iobject_verify(future_id, obj_type);
+ CHECK_PTR(actual_obj2, "H5Iobject_verify");
+ if (NULL == actual_obj2)
+ goto error;
+ VERIFY(*actual_obj2, 7, "H5Iobject_verify");
+ if (7 != *actual_obj2)
+ goto error;
+ CHECK_PTR_EQ(actual_obj, actual_obj2, "H5Iobject_verify");
+ if (actual_obj != actual_obj2)
+ goto error;
+
+ /* Destroy the type */
+ ret = H5Idestroy_type(obj_type);
+ CHECK(ret, FAIL, "H5Idestroy_type");
+ if (FAIL == ret)
+ goto error;
+
+ /* Re-register a user-defined type with our custom ID-deleting callback */
+ obj_type = H5Iregister_type((size_t)15, 0, free_actual_object);
+ CHECK(obj_type, H5I_BADID, "H5Iregister_type");
+ if (H5I_BADID == obj_type)
+ goto error;
+
+ /* Set the global future object type */
+ future_obj_type_g = obj_type;
+
+ /* Test "actual object generator" use-case: create a future object with
+ * NULL object pointer, to create new object of predefined type when
+ * future object is realized.
+ */
+ future_id = H5Iregister_future(obj_type, NULL, realize_future_generate_cb, discard_future_generate_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* Realize the actual object, with will be dynamically allocated within
+ * the 'realize' callback.
+ */
+ actual_obj = H5Iobject_verify(future_id, obj_type);
+ CHECK_PTR(actual_obj, "H5Iobject_verify");
+ if (NULL == actual_obj)
+ goto error;
+ VERIFY(*actual_obj, 7, "H5Iobject_verify");
+ if (7 != *actual_obj)
+ goto error;
+
+ /* Reset the global future object type */
+ future_obj_type_g = H5I_BADID;
+
+ /* Retrieve the object again and verify that it's the same actual object */
+ /* (Will fail if global future object type used) */
+ actual_obj2 = H5Iobject_verify(future_id, obj_type);
+ CHECK_PTR(actual_obj2, "H5Iobject_verify");
+ if (NULL == actual_obj2)
+ goto error;
+ VERIFY(*actual_obj2, 7, "H5Iobject_verify");
+ if (7 != *actual_obj2)
+ goto error;
+ CHECK_PTR_EQ(actual_obj, actual_obj2, "H5Iobject_verify");
+ if (actual_obj != actual_obj2)
+ goto error;
+
+ /* Destroy the type */
+ ret = H5Idestroy_type(obj_type);
+ CHECK(ret, FAIL, "H5Idestroy_type");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ /* (DATASPACE) */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_DATASPACE;
+ future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* (Can't verify the type of the future ID, because the library's current
+ * implementation realizes the object during sanity checks on the ID)
+ */
+
+ /* Close future object for pre-defined type without realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_DATASPACE;
+ future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* Verify that the application believes the future ID is a dataspace */
+ /* (Currently realizes the object "implicitly" during a sanity check) */
+ id_type = H5Iget_type(future_id);
+ CHECK(id_type, H5I_BADID, "H5Iget_type");
+ if (H5I_BADID == id_type)
+ goto error;
+ if (H5I_DATASPACE != id_type)
+ goto error;
+
+ /* Close future object for pre-defined type without realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_DATASPACE;
+ future_id = H5Iregister_future(H5I_DATASPACE, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* Realize future dataspace by requesting its rank */
+ ret = H5Sget_simple_extent_ndims(future_id);
+ CHECK(ret, FAIL, "H5Sget_simple_extent_ndims");
+ if (FAIL == ret)
+ goto error;
+ if (1 != ret)
+ goto error;
+
+ /* Verify that the application believes the ID is still a dataspace */
+ id_type = H5Iget_type(future_id);
+ CHECK(id_type, H5I_BADID, "H5Iget_type");
+ if (H5I_BADID == id_type)
+ goto error;
+ if (H5I_DATASPACE != id_type)
+ goto error;
+
+ /* Close future object for pre-defined type after realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ /* (DATATYPE) */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_DATATYPE;
+ future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* (Can't verify the type of the future ID, because the library's current
+ * implementation realizes the object during sanity checks on the ID)
+ */
+
+ /* Close future object for pre-defined type without realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_DATATYPE;
+ future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* Verify that the application believes the future ID is a datatype */
+ /* (Currently realizes the object "implicitly" during a sanity check) */
+ id_type = H5Iget_type(future_id);
+ CHECK(id_type, H5I_BADID, "H5Iget_type");
+ if (H5I_BADID == id_type)
+ goto error;
+ if (H5I_DATATYPE != id_type)
+ goto error;
+
+ /* Close future object for pre-defined type without realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_DATATYPE;
+ future_id = H5Iregister_future(H5I_DATATYPE, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* Realize future datatype by requesting its class */
+ type_class = H5Tget_class(future_id);
+ CHECK(ret, FAIL, "H5Tget_class");
+ if (FAIL == ret)
+ goto error;
+ if (H5T_INTEGER != type_class)
+ goto error;
+
+ /* Verify that the application believes the ID is still a datatype */
+ id_type = H5Iget_type(future_id);
+ CHECK(id_type, H5I_BADID, "H5Iget_type");
+ if (H5I_BADID == id_type)
+ goto error;
+ if (H5I_DATATYPE != id_type)
+ goto error;
+
+ /* Close future object for pre-defined type after realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ /* (PROPERTY LIST) */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_GENPROP_LST;
+ future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* (Can't verify the type of the future ID, because the library's current
+ * implementation realizes the object during sanity checks on the ID)
+ */
+
+ /* Close future object for pre-defined type without realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_GENPROP_LST;
+ future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* Verify that the application believes the future ID is a property list */
+ /* (Currently realizes the object "implicitly" during a sanity check) */
+ id_type = H5Iget_type(future_id);
+ CHECK(id_type, H5I_BADID, "H5Iget_type");
+ if (H5I_BADID == id_type)
+ goto error;
+ if (H5I_GENPROP_LST != id_type)
+ goto error;
+
+ /* Close future object for pre-defined type without realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ /* Test base use-case: create a future object for a pre-defined type */
+ future_obj = HDmalloc(sizeof(future_obj_t));
+ future_obj->obj_type = H5I_GENPROP_LST;
+ future_id = H5Iregister_future(H5I_GENPROP_LST, future_obj, realize_future_cb, discard_future_cb);
+ CHECK(future_id, H5I_INVALID_HID, "H5Iregister_future");
+ if (H5I_INVALID_HID == future_id)
+ goto error;
+
+ /* Realize future property list by verifying its class */
+ ret = H5Pisa_class(future_id, H5P_DATASET_XFER);
+ CHECK(ret, FAIL, "H5Pisa_class");
+ if (FAIL == ret)
+ goto error;
+ if (TRUE != ret)
+ goto error;
+
+ /* Verify that the application believes the ID is still a property list */
+ id_type = H5Iget_type(future_id);
+ CHECK(id_type, H5I_BADID, "H5Iget_type");
+ if (H5I_BADID == id_type)
+ goto error;
+ if (H5I_GENPROP_LST != id_type)
+ goto error;
+
+ /* Close future object for pre-defined type after realizing it */
+ ret = H5Idec_ref(future_id);
+ CHECK(ret, FAIL, "H5Idec_ref");
+ if (FAIL == ret)
+ goto error;
+
+ return 0;
+
+error:
+ /* Cleanup. For simplicity, just destroy the types and ignore errors. */
+ H5E_BEGIN_TRY
+ {
+ H5Idestroy_type(obj_type);
+ }
+ H5E_END_TRY
+
+ return -1;
+} /* end test_future_ids() */
+#endif
+
+void
+test_ids(void)
+{
+ /* Set the random # seed */
+ HDsrandom((unsigned)HDtime(NULL));
+
+ if (basic_id_test() < 0)
+ TestErrPrintf("Basic ID test failed\n");
+ if (id_predefined_test() < 0)
+ TestErrPrintf("Predefined ID type test failed\n");
+ if (test_is_valid() < 0)
+ TestErrPrintf("H5Iis_valid test failed\n");
+ if (test_get_type() < 0)
+ TestErrPrintf("H5Iget_type test failed\n");
+ if (test_id_type_list() < 0)
+ TestErrPrintf("ID type list test failed\n");
+ if (test_remove_clear_type() < 0)
+ TestErrPrintf("ID remove during H5Iclear_type test failed\n");
+#if defined(H5VL_VERSION) && H5VL_VERSION >= 2
+ if (test_future_ids() < 0)
+ TestErrPrintf("Future ID test failed\n");
+#endif
+}
diff --git a/test/API/titerate.c b/test/API/titerate.c
new file mode 100644
index 0000000..6cbebbd
--- /dev/null
+++ b/test/API/titerate.c
@@ -0,0 +1,1263 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: titerate
+ *
+ * Test the Group & Attribute functionality
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+/* #include "H5srcdir.h" */
+
+#define DATAFILE "titerate.h5"
+
+/* Number of datasets for group iteration test */
+#define NDATASETS 50
+
+/* Number of attributes for attribute iteration test */
+#define NATTR 50
+
+/* Number of groups for second group iteration test */
+#define ITER_NGROUPS 150
+
+/* General maximum length of names used */
+#define NAMELEN 80
+
+/* 1-D dataset with fixed dimensions */
+#define SPACE1_RANK 1
+#define SPACE1_DIM1 4
+
+typedef enum { RET_ZERO, RET_TWO, RET_CHANGE, RET_CHANGE2 } iter_enum;
+
+/* Custom group iteration callback data */
+typedef struct {
+ char name[NAMELEN]; /* The name of the object */
+ H5O_type_t type; /* The type of the object */
+ iter_enum command; /* The type of return value */
+} iter_info;
+
+/* Definition for test_corrupted_attnamelen */
+#define CORRUPTED_ATNAMELEN_FILE "memleak_H5O_dtype_decode_helper_H5Odtype.h5"
+#define DSET_NAME "image"
+typedef struct searched_err_t {
+ char message[256];
+ hbool_t found;
+} searched_err_t;
+#if 0
+/* Call back function for test_corrupted_attnamelen */
+static int find_err_msg_cb(unsigned n, const H5E_error2_t *err_desc, void *_client_data);
+#endif
+/* Local functions */
+int iter_strcmp(const void *s1, const void *s2);
+int iter_strcmp2(const void *s1, const void *s2);
+#ifndef NO_ITERATION_RESTART
+static herr_t liter_cb(hid_t group, const char *name, const H5L_info2_t *info, void *op_data);
+static herr_t liter_cb2(hid_t group, const char *name, const H5L_info2_t *info, void *op_data);
+#endif
+herr_t aiter_cb(hid_t group, const char *name, const H5A_info_t *ainfo, void *op_data);
+
+/****************************************************************
+**
+** iter_strcmp(): String comparison routine for qsort
+**
+****************************************************************/
+H5_ATTR_PURE int
+iter_strcmp(const void *s1, const void *s2)
+{
+ return (HDstrcmp(*(const char *const *)s1, *(const char *const *)s2));
+}
+
+/****************************************************************
+**
+** liter_cb(): Custom link iteration callback routine.
+**
+****************************************************************/
+#ifndef NO_ITERATION_RESTART
+static herr_t
+liter_cb(hid_t H5_ATTR_UNUSED group, const char *name, const H5L_info2_t H5_ATTR_UNUSED *link_info,
+ void *op_data)
+{
+ iter_info *info = (iter_info *)op_data;
+ static int count = 0;
+ static int count2 = 0;
+
+ HDstrcpy(info->name, name);
+
+ switch (info->command) {
+ case RET_ZERO:
+ return (0);
+
+ case RET_TWO:
+ return (2);
+
+ case RET_CHANGE:
+ count++;
+ return (count > 10 ? 1 : 0);
+
+ case RET_CHANGE2:
+ count2++;
+ return (count2 > 10 ? 1 : 0);
+
+ default:
+ HDprintf("invalid iteration command");
+ return (-1);
+ } /* end switch */
+} /* end liter_cb() */
+#endif
+
+/****************************************************************
+**
+** test_iter_group(): Test group iteration functionality
+**
+****************************************************************/
+static void
+test_iter_group(hid_t fapl, hbool_t new_format)
+{
+#ifndef NO_ITERATION_RESTART
+ hid_t file; /* File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t datatype; /* Common datatype ID */
+ hid_t filespace; /* Common dataspace ID */
+ hid_t root_group, grp; /* Root group ID */
+ int i; /* counting variable */
+ hsize_t idx; /* Index in the group */
+ char name[NAMELEN]; /* temporary name buffer */
+ char *lnames[NDATASETS + 2]; /* Names of the links created */
+ char dataset_name[NAMELEN]; /* dataset name */
+ iter_info info; /* Custom iteration information */
+ H5G_info_t ginfo; /* Buffer for querying object's info */
+ herr_t ret; /* Generic return value */
+#else
+ (void)fapl;
+ (void)new_format;
+#endif
+
+ /* Output message about test being performed */
+ MESSAGE(
+ 5, ("Testing Group Iteration Functionality - SKIPPED for now due to no iteration restart support\n"));
+#ifndef NO_ITERATION_RESTART
+ /* Create the test file with the datasets */
+ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Test iterating over empty group */
+ info.command = RET_ZERO;
+ idx = 0;
+ ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info);
+ VERIFY(ret, SUCCEED, "H5Literate2");
+
+ datatype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(datatype, FAIL, "H5Tcopy");
+
+ filespace = H5Screate(H5S_SCALAR);
+ CHECK(filespace, FAIL, "H5Screate");
+
+ for (i = 0; i < NDATASETS; i++) {
+ HDsnprintf(name, sizeof(name), "Dataset %d", i);
+ dataset = H5Dcreate2(file, name, datatype, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Keep a copy of the dataset names around for later */
+ lnames[i] = HDstrdup(name);
+ CHECK_PTR(lnames[i], "strdup");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end for */
+
+ /* Create a group and named datatype under root group for testing */
+ grp = H5Gcreate2(file, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Gcreate2");
+
+ lnames[NDATASETS] = HDstrdup("grp");
+ CHECK_PTR(lnames[NDATASETS], "strdup");
+
+ ret = H5Tcommit2(file, "dtype", datatype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ lnames[NDATASETS + 1] = HDstrdup("dtype");
+ CHECK_PTR(lnames[NDATASETS], "strdup");
+
+ /* Close everything up */
+ ret = H5Tclose(datatype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Sclose(filespace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Sort the dataset names */
+ HDqsort(lnames, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp);
+
+ /* Iterate through the datasets in the root group in various ways */
+ file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl);
+ CHECK(file, FAIL, "H5Fopen");
+
+ /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually
+ * iterate through B-tree for group members in internal library design.
+ */
+ root_group = H5Gopen2(file, "/", H5P_DEFAULT);
+ CHECK(root_group, FAIL, "H5Gopen2");
+
+ ret = H5Gget_info(root_group, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, (NDATASETS + 2), "H5Gget_info");
+
+ for (i = 0; i < (int)ginfo.nlinks; i++) {
+ H5O_info2_t oinfo; /* Object info */
+
+ ret = (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i,
+ dataset_name, (size_t)NAMELEN, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_name_by_idx");
+
+ //! [H5Oget_info_by_idx3_snip]
+
+ ret = H5Oget_info_by_idx3(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo,
+ H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_idx");
+
+ //! [H5Oget_info_by_idx3_snip]
+
+ } /* end for */
+
+ H5E_BEGIN_TRY
+ {
+ ret =
+ (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)(NDATASETS + 3),
+ dataset_name, (size_t)NAMELEN, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Lget_name_by_idx");
+
+ ret = H5Gclose(root_group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually
+ * iterate through B-tree for group members in internal library design.
+ * (Same as test above, but with the file ID instead of opening the root group)
+ */
+ ret = H5Gget_info(file, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, NDATASETS + 2, "H5Gget_info");
+
+ for (i = 0; i < (int)ginfo.nlinks; i++) {
+ H5O_info2_t oinfo; /* Object info */
+
+ ret = (herr_t)H5Lget_name_by_idx(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, dataset_name,
+ (size_t)NAMELEN, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_name_by_idx");
+
+ ret = H5Oget_info_by_idx3(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, H5O_INFO_BASIC,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_idx3");
+ } /* end for */
+
+ H5E_BEGIN_TRY
+ {
+ ret = (herr_t)H5Lget_name_by_idx(file, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)(NDATASETS + 3),
+ dataset_name, (size_t)NAMELEN, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Lget_name_by_idx");
+
+ /* Test invalid indices for starting iteration */
+ info.command = RET_ZERO;
+ idx = (hsize_t)-1;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Literate2");
+
+ /* Test skipping exactly as many entries as in the group */
+ idx = NDATASETS + 2;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Literate2");
+
+ /* Test skipping more entries than are in the group */
+ idx = NDATASETS + 3;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Literate2");
+
+ /* Test all objects in group, when callback always returns 0 */
+ info.command = RET_ZERO;
+ idx = 0;
+ if ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) > 0)
+ TestErrPrintf("Group iteration function didn't return zero correctly!\n");
+
+ /* Test all objects in group, when callback always returns 1 */
+ /* This also tests the "restarting" ability, because the index changes */
+ info.command = RET_TWO;
+ i = 0;
+ idx = 0;
+ memset(info.name, 0, NAMELEN);
+ while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) > 0) {
+ /* Verify return value from iterator gets propagated correctly */
+ VERIFY(ret, 2, "H5Literate2");
+
+ /* Increment the number of times "2" is returned */
+ i++;
+
+ /* Verify that the index is the correct value */
+ VERIFY(idx, (hsize_t)i, "H5Literate2");
+ if (idx != (hsize_t)i)
+ break;
+ if (idx > (NDATASETS + 2))
+ TestErrPrintf("Group iteration function walked too far!\n");
+
+ /* Verify that the correct name is retrieved */
+ if (HDstrncmp(info.name, lnames[(size_t)(idx - 1)], NAMELEN) != 0)
+ TestErrPrintf(
+ "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n",
+ (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]);
+ } /* end while */
+ VERIFY(ret, -1, "H5Literate2");
+
+ if (i != (NDATASETS + 2))
+ TestErrPrintf("%u: Group iteration function didn't perform multiple iterations correctly!\n",
+ __LINE__);
+
+ /* Test all objects in group, when callback changes return value */
+ /* This also tests the "restarting" ability, because the index changes */
+ info.command = new_format ? RET_CHANGE2 : RET_CHANGE;
+ i = 0;
+ idx = 0;
+ memset(info.name, 0, NAMELEN);
+ while ((ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info)) >= 0) {
+ /* Verify return value from iterator gets propagated correctly */
+ VERIFY(ret, 1, "H5Literate2");
+
+ /* Increment the number of times "1" is returned */
+ i++;
+
+ /* Verify that the index is the correct value */
+ VERIFY(idx, (hsize_t)(i + 10), "H5Literate2");
+ if (idx != (hsize_t)(i + 10))
+ break;
+ if (idx > (NDATASETS + 2))
+ TestErrPrintf("Group iteration function walked too far!\n");
+
+ /* Verify that the correct name is retrieved */
+ if (HDstrncmp(info.name, lnames[(size_t)(idx - 1)], NAMELEN) != 0)
+ TestErrPrintf(
+ "Group iteration function didn't return name correctly for link - lnames[%u] = '%s'!\n",
+ (unsigned)(idx - 1), lnames[(size_t)(idx - 1)]);
+ } /* end while */
+ VERIFY(ret, -1, "H5Literate2");
+
+ if (i != 42 || idx != 52)
+ TestErrPrintf("%u: Group iteration function didn't perform multiple iterations correctly!\n",
+ __LINE__);
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free the dataset names */
+ for (i = 0; i < (NDATASETS + 2); i++)
+ HDfree(lnames[i]);
+#endif
+} /* test_iter_group() */
+
+/****************************************************************
+**
+** aiter_cb(): Custom group iteration callback routine.
+**
+****************************************************************/
+herr_t
+aiter_cb(hid_t H5_ATTR_UNUSED group, const char *name, const H5A_info_t H5_ATTR_UNUSED *ainfo, void *op_data)
+{
+ iter_info *info = (iter_info *)op_data;
+ static int count = 0;
+ static int count2 = 0;
+
+ HDstrcpy(info->name, name);
+
+ switch (info->command) {
+ case RET_ZERO:
+ return (0);
+
+ case RET_TWO:
+ return (2);
+
+ case RET_CHANGE:
+ count++;
+ return (count > 10 ? 1 : 0);
+
+ case RET_CHANGE2:
+ count2++;
+ return (count2 > 10 ? 1 : 0);
+
+ default:
+ HDprintf("invalid iteration command");
+ return (-1);
+ } /* end switch */
+} /* end aiter_cb() */
+
+/****************************************************************
+**
+** test_iter_attr(): Test attribute iteration functionality
+**
+****************************************************************/
+static void
+test_iter_attr(hid_t fapl, hbool_t new_format)
+{
+#ifndef NO_ITERATION_RESTART
+ hid_t file; /* File ID */
+ hid_t dataset; /* Common Dataset ID */
+ hid_t filespace; /* Common dataspace ID */
+ hid_t attribute; /* Attribute ID */
+ int i; /* counting variable */
+ hsize_t idx; /* Index in the attribute list */
+ char name[NAMELEN]; /* temporary name buffer */
+ char *anames[NATTR]; /* Names of the attributes created */
+ iter_info info; /* Custom iteration information */
+ herr_t ret; /* Generic return value */
+#else
+ (void)fapl;
+ (void)new_format;
+#endif
+
+ /* Output message about test being performed */
+ MESSAGE(
+ 5,
+ ("Testing Attribute Iteration Functionality - SKIPPED for no due to no iteration restart support\n"));
+#ifndef NO_ITERATION_RESTART
+ HDmemset(&info, 0, sizeof(iter_info));
+
+ /* Create the test file with the datasets */
+ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ filespace = H5Screate(H5S_SCALAR);
+ CHECK(filespace, FAIL, "H5Screate");
+
+ dataset = H5Dcreate2(file, "Dataset", H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ for (i = 0; i < NATTR; i++) {
+ HDsnprintf(name, sizeof(name), "Attribute %02d", i);
+ attribute = H5Acreate2(dataset, name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attribute, FAIL, "H5Acreate2");
+
+ /* Keep a copy of the attribute names around for later */
+ anames[i] = HDstrdup(name);
+ CHECK_PTR(anames[i], "strdup");
+
+ ret = H5Aclose(attribute);
+ CHECK(ret, FAIL, "H5Aclose");
+ } /* end for */
+
+ /* Close everything up */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(filespace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Iterate through the attributes on the dataset in various ways */
+ file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl);
+ CHECK(file, FAIL, "H5Fopen");
+
+ dataset = H5Dopen2(file, "Dataset", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Test invalid indices for starting iteration */
+ info.command = RET_ZERO;
+
+ /* Test skipping exactly as many attributes as there are */
+ idx = NATTR;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate2");
+
+ /* Test skipping more attributes than there are */
+ idx = NATTR + 1;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aiterate2");
+
+ /* Test all attributes on dataset, when callback always returns 0 */
+ info.command = RET_ZERO;
+ idx = 0;
+ if ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0)
+ TestErrPrintf("Attribute iteration function didn't return zero correctly!\n");
+
+ /* Test all attributes on dataset, when callback always returns 2 */
+ /* This also tests the "restarting" ability, because the index changes */
+ info.command = RET_TWO;
+ i = 0;
+ idx = 0;
+ while ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) {
+ /* Verify return value from iterator gets propagated correctly */
+ VERIFY(ret, 2, "H5Aiterate2");
+
+ /* Increment the number of times "2" is returned */
+ i++;
+
+ /* Verify that the index is the correct value */
+ VERIFY(idx, (unsigned)i, "H5Aiterate2");
+
+ /* Don't check name when new format is used */
+ if (!new_format) {
+ /* Verify that the correct name is retrieved */
+ if (idx > 0) {
+ if (HDstrcmp(info.name, anames[(size_t)idx - 1]) != 0)
+ TestErrPrintf("%u: Attribute iteration function didn't set names correctly, info.name = "
+ "'%s', anames[%u] = '%s'!\n",
+ __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]);
+ } /* end if */
+ else
+ TestErrPrintf("%u: 'idx' was not set correctly!\n", __LINE__);
+ } /* end if */
+ } /* end while */
+ VERIFY(ret, -1, "H5Aiterate2");
+ if (i != 50 || idx != 50)
+ TestErrPrintf("%u: Attribute iteration function didn't perform multiple iterations correctly!\n",
+ __LINE__);
+
+ /* Test all attributes on dataset, when callback changes return value */
+ /* This also tests the "restarting" ability, because the index changes */
+ info.command = new_format ? RET_CHANGE2 : RET_CHANGE;
+ i = 0;
+ idx = 0;
+ while ((ret = H5Aiterate2(dataset, H5_INDEX_NAME, H5_ITER_INC, &idx, aiter_cb, &info)) > 0) {
+ /* Verify return value from iterator gets propagated correctly */
+ VERIFY(ret, 1, "H5Aiterate2");
+
+ /* Increment the number of times "1" is returned */
+ i++;
+
+ /* Verify that the index is the correct value */
+ VERIFY(idx, (unsigned)i + 10, "H5Aiterate2");
+
+ /* Don't check name when new format is used */
+ if (!new_format) {
+ /* Verify that the correct name is retrieved */
+ if (idx > 0) {
+ if (HDstrcmp(info.name, anames[(size_t)idx - 1]) != 0)
+ TestErrPrintf("%u: Attribute iteration function didn't set names correctly, info.name = "
+ "'%s', anames[%u] = '%s'!\n",
+ __LINE__, info.name, (unsigned)(idx - 1), anames[(size_t)idx - 1]);
+ }
+ else
+ TestErrPrintf("%u: 'idx' was not set correctly!\n", __LINE__);
+ } /* end if */
+ } /* end while */
+ VERIFY(ret, -1, "H5Aiterate2");
+ if (i != 40 || idx != 50)
+ TestErrPrintf("%u: Attribute iteration function didn't perform multiple iterations correctly!\n",
+ __LINE__);
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Free the attribute names */
+ for (i = 0; i < NATTR; i++)
+ HDfree(anames[i]);
+#endif
+} /* test_iter_attr() */
+
+/****************************************************************
+**
+** iter_strcmp2(): String comparison routine for qsort
+**
+****************************************************************/
+H5_ATTR_PURE int
+iter_strcmp2(const void *s1, const void *s2)
+{
+ return (HDstrcmp((const char *)s1, (const char *)s2));
+} /* end iter_strcmp2() */
+
+/****************************************************************
+**
+** liter_cb2(): Custom link iteration callback routine.
+**
+****************************************************************/
+#ifndef NO_ITERATION_RESTART
+static herr_t
+liter_cb2(hid_t loc_id, const char *name, const H5L_info2_t H5_ATTR_UNUSED *link_info, void *opdata)
+{
+ const iter_info *test_info = (const iter_info *)opdata;
+ H5O_info2_t oinfo;
+ herr_t ret; /* Generic return value */
+
+ if (HDstrcmp(name, test_info->name) != 0) {
+ TestErrPrintf("name = '%s', test_info = '%s'\n", name, test_info->name);
+ return (H5_ITER_ERROR);
+ } /* end if */
+
+ /*
+ * Get type of the object and check it.
+ */
+ ret = H5Oget_info_by_name3(loc_id, name, &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+
+ if (test_info->type != oinfo.type) {
+ TestErrPrintf("test_info->type = %d, oinfo.type = %d\n", test_info->type, (int)oinfo.type);
+ return (H5_ITER_ERROR);
+ } /* end if */
+
+ return (H5_ITER_STOP);
+} /* liter_cb2() */
+#endif
+
+/****************************************************************
+**
+** test_iter_group_large(): Test group iteration functionality
+** for groups with large #'s of objects
+**
+****************************************************************/
+static void
+test_iter_group_large(hid_t fapl)
+{
+#ifndef NO_ITERATION_RESTART
+ hid_t file; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t group; /* Group ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hsize_t dims[] = {SPACE1_DIM1};
+ herr_t ret; /* Generic return value */
+ char gname[20]; /* Temporary group name */
+ iter_info *names; /* Names of objects in the root group */
+ iter_info *curr_name; /* Pointer to the current name in the root group */
+ int i;
+
+ /* Compound datatype */
+ typedef struct s1_t {
+ unsigned int a;
+ unsigned int b;
+ float c;
+ } s1_t;
+
+ /* Allocate & initialize array */
+ names = (iter_info *)HDcalloc(sizeof(iter_info), (ITER_NGROUPS + 2));
+ CHECK_PTR(names, "HDcalloc");
+#else
+ (void)fapl;
+#endif
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Large Group Iteration Functionality - SKIPPED for now due to no iteration restart "
+ "support\n"));
+#ifndef NO_ITERATION_RESTART
+ /* Create file */
+ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid = H5Screate_simple(SPACE1_RANK, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create a bunch of groups */
+ for (i = 0; i < ITER_NGROUPS; i++) {
+ HDsnprintf(gname, sizeof(gname), "Group_%d", i);
+
+ /* Add the name to the list of objects in the root group */
+ HDstrcpy(names[i].name, gname);
+ names[i].type = H5O_TYPE_GROUP;
+
+ /* Create a group */
+ group = H5Gcreate2(file, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, FAIL, "H5Gcreate2");
+
+ /* Close a group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+ } /* end for */
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(file, "Dataset1", H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Add the name to the list of objects in the root group */
+ HDstrcpy(names[ITER_NGROUPS].name, "Dataset1");
+ names[ITER_NGROUPS].type = H5O_TYPE_DATASET;
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close Dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create a datatype */
+ tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid, FAIL, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(file, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Add the name to the list of objects in the root group */
+ HDstrcpy(names[ITER_NGROUPS + 1].name, "Datatype1");
+ names[ITER_NGROUPS + 1].type = H5O_TYPE_NAMED_DATATYPE;
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Need to sort the names in the root group, cause that's what the library does */
+ HDqsort(names, (size_t)(ITER_NGROUPS + 2), sizeof(iter_info), iter_strcmp2);
+
+ /* Iterate through the file to see members of the root group */
+ curr_name = &names[0];
+ ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, NULL, liter_cb2, curr_name);
+ CHECK(ret, FAIL, "H5Literate2");
+ for (i = 1; i < 100; i++) {
+ hsize_t idx = (hsize_t)i;
+
+ curr_name = &names[i];
+ ret = H5Literate2(file, H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb2, curr_name);
+ CHECK(ret, FAIL, "H5Literate2");
+ } /* end for */
+
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Release memory */
+ HDfree(names);
+#endif
+} /* test_iterate_group_large() */
+
+/****************************************************************
+**
+** test_grp_memb_funcs(): Test group member information
+** functionality
+**
+****************************************************************/
+static void
+test_grp_memb_funcs(hid_t fapl)
+{
+ hid_t file; /* File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t datatype; /* Common datatype ID */
+ hid_t filespace; /* Common dataspace ID */
+ hid_t root_group, grp; /* Root group ID */
+ int i; /* counting variable */
+ char name[NAMELEN]; /* temporary name buffer */
+ char *dnames[NDATASETS + 2]; /* Names of the datasets created */
+ char *obj_names[NDATASETS + 2]; /* Names of the objects in group */
+ char dataset_name[NAMELEN]; /* dataset name */
+ ssize_t name_len; /* Length of object's name */
+ H5G_info_t ginfo; /* Buffer for querying object's info */
+ herr_t ret = SUCCEED; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Group Member Information Functionality\n"));
+
+ /* Create the test file with the datasets */
+ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ datatype = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(datatype, FAIL, "H5Tcopy");
+
+ filespace = H5Screate(H5S_SCALAR);
+ CHECK(filespace, FAIL, "H5Screate");
+
+ for (i = 0; i < NDATASETS; i++) {
+ HDsnprintf(name, sizeof(name), "Dataset %d", i);
+ dataset = H5Dcreate2(file, name, datatype, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Keep a copy of the dataset names around for later */
+ dnames[i] = HDstrdup(name);
+ CHECK_PTR(dnames[i], "strdup");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end for */
+
+ /* Create a group and named datatype under root group for testing */
+ grp = H5Gcreate2(file, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Gcreate2");
+
+ dnames[NDATASETS] = HDstrdup("grp");
+ CHECK_PTR(dnames[NDATASETS], "strdup");
+
+ ret = H5Tcommit2(file, "dtype", datatype, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ dnames[NDATASETS + 1] = HDstrdup("dtype");
+ CHECK_PTR(dnames[NDATASETS], "strdup");
+
+ /* Close everything up */
+ ret = H5Tclose(datatype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Gclose(grp);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Sclose(filespace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Sort the dataset names */
+ HDqsort(dnames, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp);
+
+ /* Iterate through the datasets in the root group in various ways */
+ file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, fapl);
+ CHECK(file, FAIL, "H5Fopen");
+
+ /* These two functions, H5Oget_info_by_idx and H5Lget_name_by_idx, actually
+ * iterate through B-tree for group members in internal library design.
+ */
+ root_group = H5Gopen2(file, "/", H5P_DEFAULT);
+ CHECK(root_group, FAIL, "H5Gopen2");
+
+ ret = H5Gget_info(root_group, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, (NDATASETS + 2), "H5Gget_info");
+
+ for (i = 0; i < (int)ginfo.nlinks; i++) {
+ H5O_info2_t oinfo; /* Object info */
+
+ /* Test with NULL for name, to query length */
+ name_len = H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, NULL,
+ (size_t)NAMELEN, H5P_DEFAULT);
+ CHECK(name_len, FAIL, "H5Lget_name_by_idx");
+
+ ret = (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i,
+ dataset_name, (size_t)(name_len + 1), H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_name_by_idx");
+
+ /* Double-check that the length is the same */
+ VERIFY(ret, name_len, "H5Lget_name_by_idx");
+
+ /* Keep a copy of the dataset names around for later */
+ obj_names[i] = HDstrdup(dataset_name);
+ CHECK_PTR(obj_names[i], "strdup");
+
+ ret = H5Oget_info_by_idx3(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo,
+ H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_idx3");
+
+ if (!HDstrcmp(dataset_name, "grp"))
+ VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx");
+ if (!HDstrcmp(dataset_name, "dtype"))
+ VERIFY(oinfo.type, H5O_TYPE_NAMED_DATATYPE, "H5Lget_name_by_idx");
+ if (!HDstrncmp(dataset_name, "Dataset", (size_t)7))
+ VERIFY(oinfo.type, H5O_TYPE_DATASET, "H5Lget_name_by_idx");
+ } /* end for */
+
+ H5E_BEGIN_TRY
+ {
+ ret =
+ (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)(NDATASETS + 3),
+ dataset_name, (size_t)NAMELEN, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Lget_name_by_idx");
+
+ /* Sort the dataset names */
+ HDqsort(obj_names, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp);
+
+ /* Compare object names */
+ for (i = 0; i < (int)ginfo.nlinks; i++) {
+ ret = HDstrcmp(dnames[i], obj_names[i]);
+ VERIFY(ret, 0, "HDstrcmp");
+ } /* end for */
+
+ ret = H5Gclose(root_group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free the dataset names */
+ for (i = 0; i < (NDATASETS + 2); i++) {
+ HDfree(dnames[i]);
+ HDfree(obj_names[i]);
+ } /* end for */
+} /* test_grp_memb_funcs() */
+
+/****************************************************************
+**
+** test_links(): Test soft and hard link iteration
+**
+****************************************************************/
+static void
+test_links(hid_t fapl)
+{
+ hid_t file; /* File ID */
+ char obj_name[NAMELEN]; /* Names of the object in group */
+ ssize_t name_len; /* Length of object's name */
+ hid_t gid, gid1;
+ H5G_info_t ginfo; /* Buffer for querying object's info */
+ hsize_t i;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Soft and Hard Link Iteration Functionality\n"));
+
+ /* Create the test file with the datasets */
+ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* create groups */
+ gid = H5Gcreate2(file, "/g1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ gid1 = H5Gcreate2(file, "/g1/g1.1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
+
+ /* create soft and hard links to the group "/g1". */
+ ret = H5Lcreate_soft("something", gid, "softlink", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_soft");
+
+ ret = H5Lcreate_hard(gid, "/g1", H5L_SAME_LOC, "hardlink", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_hard");
+
+ ret = H5Gget_info(gid, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, 3, "H5Gget_info");
+
+ /* Test these two functions, H5Oget_info_by_idx and H5Lget_name_by_idx */
+ for (i = 0; i < ginfo.nlinks; i++) {
+ H5O_info2_t oinfo; /* Object info */
+ H5L_info2_t linfo; /* Link info */
+
+ /* Get link name */
+ name_len = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, i, obj_name, (size_t)NAMELEN,
+ H5P_DEFAULT);
+ CHECK(name_len, FAIL, "H5Lget_name_by_idx");
+
+ /* Get link type */
+ ret = H5Lget_info_by_idx2(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &linfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info_by_idx2");
+
+ /* Get object type */
+ if (linfo.type == H5L_TYPE_HARD) {
+ ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo,
+ H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_idx3");
+ } /* end if */
+
+ if (!HDstrcmp(obj_name, "g1.1"))
+ VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx");
+ else if (!HDstrcmp(obj_name, "hardlink"))
+ VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx");
+ else if (!HDstrcmp(obj_name, "softlink"))
+ VERIFY(linfo.type, H5L_TYPE_SOFT, "H5Lget_name_by_idx");
+ else
+ ERROR("unknown object name");
+ } /* end for */
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_links() */
+
+/*-------------------------------------------------------------------------
+ * Function: find_err_msg_cb
+ *
+ * Purpose: Callback function to find the given error message.
+ * Helper function for test_corrupted_attnamelen().
+ *
+ * Return: H5_ITER_STOP when the message is found
+ * H5_ITER_CONT, otherwise
+ *
+ *-------------------------------------------------------------------------
+ */
+#if 0
+static int
+find_err_msg_cb(unsigned H5_ATTR_UNUSED n, const H5E_error2_t *err_desc, void *_client_data)
+{
+ int status = H5_ITER_CONT;
+ searched_err_t *searched_err = (searched_err_t *)_client_data;
+
+ if (searched_err == NULL)
+ return H5_ITER_ERROR;
+
+ /* If the searched error message is found, stop the iteration */
+ if (err_desc->desc != NULL && HDstrcmp(err_desc->desc, searched_err->message) == 0) {
+ searched_err->found = TRUE;
+ status = H5_ITER_STOP;
+ }
+
+ return status;
+} /* end find_err_msg_cb() */
+#endif
+
+/**************************************************************************
+**
+** test_corrupted_attnamelen(): Test the fix for the JIRA issue HDFFV-10588,
+** where corrupted attribute's name length can be
+** detected and invalid read can be avoided.
+**
+**************************************************************************/
+#if 0
+static void
+test_corrupted_attnamelen(void)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t did = -1; /* Dataset ID */
+ searched_err_t err_caught; /* Data to be passed to callback func */
+ int err_status; /* Status returned by H5Aiterate2 */
+ herr_t ret; /* Return value */
+ hbool_t driver_is_default_compatible;
+ const char *testfile = H5_get_srcdir_filename(CORRUPTED_ATNAMELEN_FILE); /* Corrected test file name */
+
+ const char *err_message = "attribute name has different length than stored length";
+ /* the error message produced when the failure occurs */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing the Handling of Corrupted Attribute's Name Length\n"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, DSET_NAME, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Call H5Aiterate2 to trigger the failure in HDFFV-10588. Failure should
+ occur in the decoding stage, so some arguments are not needed. */
+ err_status = H5Aiterate2(did, H5_INDEX_NAME, H5_ITER_INC, NULL, NULL, NULL);
+ VERIFY(err_status, FAIL, "H5Aiterate2");
+
+ /* Make sure the intended error was caught */
+ if (err_status == -1) {
+ /* Initialize client data */
+ HDstrcpy(err_caught.message, err_message);
+ err_caught.found = FALSE;
+
+ /* Look for the correct error message */
+ ret = H5Ewalk2(H5E_DEFAULT, H5E_WALK_UPWARD, find_err_msg_cb, &err_caught);
+ CHECK(ret, FAIL, "H5Ewalk2");
+
+ /* Fail if the indicated message is not found */
+ CHECK(err_caught.found, FALSE, "test_corrupted_attnamelen: Expected error not found");
+ }
+
+ /* Close the dataset and file */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_corrupted_attnamelen() */
+#endif
+
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+/****************************************************************
+**
+** test_links_deprec(): Test soft and hard link iteration
+**
+****************************************************************/
+static void
+test_links_deprec(hid_t fapl)
+{
+ hid_t file; /* File ID */
+ char obj_name[NAMELEN]; /* Names of the object in group */
+ ssize_t name_len; /* Length of object's name */
+ hid_t gid, gid1;
+ H5G_info_t ginfo; /* Buffer for querying object's info */
+ hsize_t i;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Soft and Hard Link Iteration Functionality Using Deprecated Routines\n"));
+
+ /* Create the test file with the datasets */
+ file = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* create groups */
+ gid = H5Gcreate2(file, "/g1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ gid1 = H5Gcreate2(file, "/g1/g1.1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
+
+ /* create soft and hard links to the group "/g1". */
+ ret = H5Lcreate_soft("something", gid, "softlink", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_soft");
+
+ ret = H5Lcreate_hard(gid, "/g1", H5L_SAME_LOC, "hardlink", H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_hard");
+
+ ret = H5Gget_info(gid, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, 3, "H5Gget_info");
+
+ /* Test these two functions, H5Oget_info_by_idx and H5Lget_name_by_idx */
+ for (i = 0; i < ginfo.nlinks; i++) {
+ H5O_info2_t oinfo; /* Object info */
+ H5L_info2_t linfo; /* Link info */
+
+ /* Get link name */
+ name_len = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, i, obj_name, (size_t)NAMELEN,
+ H5P_DEFAULT);
+ CHECK(name_len, FAIL, "H5Lget_name_by_idx");
+
+ /* Get link type */
+ ret = H5Lget_info_by_idx2(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &linfo, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lget_info_by_idx1");
+
+ /* Get object type */
+ if (linfo.type == H5L_TYPE_HARD) {
+ ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo,
+ H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_idx");
+ } /* end if */
+
+ if (!HDstrcmp(obj_name, "g1.1"))
+ VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx");
+ else if (!HDstrcmp(obj_name, "hardlink"))
+ VERIFY(oinfo.type, H5O_TYPE_GROUP, "H5Lget_name_by_idx");
+ else if (!HDstrcmp(obj_name, "softlink"))
+ VERIFY(linfo.type, H5L_TYPE_SOFT, "H5Lget_name_by_idx");
+ else
+ ERROR("unknown object name");
+ } /* end for */
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_links_deprec() */
+#endif
+#endif
+
+/****************************************************************
+**
+** test_iterate(): Main iteration testing routine.
+**
+****************************************************************/
+void
+test_iterate(void)
+{
+ hid_t fapl, fapl2; /* File access property lists */
+ unsigned new_format; /* Whether to use the new format or not */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Iteration Operations\n"));
+
+ /* Get the default FAPL */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Copy the file access property list */
+ fapl2 = H5Pcopy(fapl);
+ CHECK(fapl2, FAIL, "H5Pcopy");
+
+ /* Set the "use the latest version of the format" bounds for creating objects in the file */
+ ret = H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* These next tests use the same file */
+ for (new_format = FALSE; new_format <= TRUE; new_format++) {
+ test_iter_group(new_format ? fapl2 : fapl, new_format); /* Test group iteration */
+ test_iter_group_large(new_format ? fapl2 : fapl); /* Test group iteration for large # of objects */
+ test_iter_attr(new_format ? fapl2 : fapl, new_format); /* Test attribute iteration */
+ test_grp_memb_funcs(new_format ? fapl2 : fapl); /* Test group member information functions */
+ test_links(new_format ? fapl2 : fapl); /* Test soft and hard link iteration */
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ test_links_deprec(new_format ? fapl2 : fapl); /* Test soft and hard link iteration */
+#endif
+#endif
+ } /* end for */
+#if 0
+ /* Test the fix for issue HDFFV-10588 */
+ test_corrupted_attnamelen();
+#endif
+ /* Close FAPLs */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl2);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* test_iterate() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_iterate
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Quincey Koziol
+ * April 5, 2000
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_iterate(void)
+{
+ H5Fdelete(DATAFILE, H5P_DEFAULT);
+}
diff --git a/test/API/tmisc.c b/test/API/tmisc.c
new file mode 100644
index 0000000..d35a00b
--- /dev/null
+++ b/test/API/tmisc.c
@@ -0,0 +1,6349 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tmisc
+ *
+ * Test miscellaneous features not tested elsewhere. Generally
+ * regression tests for bugs that are reported and don't
+ * have an existing test to add them to.
+ *
+ *************************************************************/
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+
+/* Define this macro to indicate that the testing APIs should be available */
+#define H5D_TESTING
+
+#include "testhdf5.h"
+/* #include "H5srcdir.h" */
+/* #include "H5Dpkg.h" */ /* Datasets */
+/* #include "H5MMprivate.h" */ /* Memory */
+
+/* Definitions for misc. test #1 */
+#define MISC1_FILE "tmisc1.h5"
+#define MISC1_VAL (13417386) /* 0xccbbaa */
+#define MISC1_VAL2 (15654348) /* 0xeeddcc */
+#define MISC1_DSET_NAME "/scalar_set"
+
+/* Definitions for misc. test #2 */
+#define MISC2_FILE_1 "tmisc2a.h5"
+#define MISC2_FILE_2 "tmisc2b.h5"
+#define MISC2_ATT_NAME_1 "scalar_att_1"
+#define MISC2_ATT_NAME_2 "scalar_att_2"
+
+typedef struct {
+ char *string;
+} misc2_struct;
+
+/* Definitions for misc. test #3 */
+#define MISC3_FILE "tmisc3.h5"
+#define MISC3_RANK 2
+#define MISC3_DIM1 6
+#define MISC3_DIM2 6
+#define MISC3_CHUNK_DIM1 2
+#define MISC3_CHUNK_DIM2 2
+#define MISC3_FILL_VALUE 2
+#define MISC3_DSET_NAME "/chunked"
+
+/* Definitions for misc. test #4 */
+#define MISC4_FILE_1 "tmisc4a.h5"
+#define MISC4_FILE_2 "tmisc4b.h5"
+#define MISC4_GROUP_1 "/Group1"
+#define MISC4_GROUP_2 "/Group2"
+
+/* Definitions for misc. test #5 */
+#define MISC5_FILE "tmisc5.h5"
+#define MISC5_DSETNAME "dset1"
+#define MISC5_DSETRANK 1
+#define MISC5_NELMTOPLVL 1
+#define MISC5_DBGNELM1 2
+#define MISC5_DBGNELM2 1
+#define MISC5_DBGNELM3 1
+#define MISC5_DBGELVAL1 999999999
+#define MISC5_DBGELVAL2 888888888
+#define MISC5_DBGELVAL3 777777777
+
+typedef struct {
+ int st1_el1;
+ hvl_t st1_el2;
+} misc5_struct1;
+
+typedef struct {
+ int st2_el1;
+ hvl_t st2_el2;
+} misc5_struct2;
+
+typedef struct {
+ int st3_el1;
+} misc5_struct3;
+
+typedef struct {
+ hid_t st3h_base;
+ hid_t st3h_id;
+} misc5_struct3_hndl;
+
+typedef struct {
+ hid_t st2h_base;
+ hid_t st2h_id;
+ misc5_struct3_hndl *st2h_st3hndl;
+} misc5_struct2_hndl;
+
+typedef struct {
+ hid_t st1h_base;
+ hid_t st1h_id;
+ misc5_struct2_hndl *st1h_st2hndl;
+} misc5_struct1_hndl;
+
+/* Definitions for misc. test #6 */
+#define MISC6_FILE "tmisc6.h5"
+#define MISC6_DSETNAME1 "dset1"
+#define MISC6_DSETNAME2 "dset2"
+#define MISC6_NUMATTR 16
+
+/* Definitions for misc. test #7 */
+#define MISC7_FILE "tmisc7.h5"
+#define MISC7_DSETNAME1 "Dataset1"
+#define MISC7_DSETNAME2 "Dataset2"
+#define MISC7_TYPENAME1 "Datatype1"
+#define MISC7_TYPENAME2 "Datatype2"
+
+/* Definitions for misc. test #8 */
+#define MISC8_FILE "tmisc8.h5"
+#define MISC8_DSETNAME1 "Dataset1"
+#define MISC8_DSETNAME4 "Dataset4"
+#define MISC8_DSETNAME5 "Dataset5"
+#define MISC8_DSETNAME8 "Dataset8"
+
+#ifndef H5_HAVE_PARALLEL
+#define MISC8_DSETNAME2 "Dataset2"
+#define MISC8_DSETNAME3 "Dataset3"
+#define MISC8_DSETNAME6 "Dataset6"
+#define MISC8_DSETNAME7 "Dataset7"
+#define MISC8_DSETNAME9 "Dataset9"
+#define MISC8_DSETNAME10 "Dataset10"
+#endif
+
+#define MISC8_RANK 2
+#define MISC8_DIM0 50
+#define MISC8_DIM1 50
+#define MISC8_CHUNK_DIM0 10
+#define MISC8_CHUNK_DIM1 10
+
+/* Definitions for misc. test #9 */
+#define MISC9_FILE "tmisc9.h5"
+
+/* Definitions for misc. test #10 */
+#define MISC10_FILE_OLD "tmtimeo.h5"
+#define MISC10_FILE_NEW "tmisc10.h5"
+#define MISC10_DSETNAME "Dataset1"
+
+/* Definitions for misc. test #11 */
+#define MISC11_FILE "tmisc11.h5"
+#define MISC11_USERBLOCK 1024
+#define MISC11_SIZEOF_OFF 4
+#define MISC11_SIZEOF_LEN 4
+#define MISC11_SYM_LK 8
+#define MISC11_SYM_IK 32
+#define MISC11_ISTORE_IK 64
+#define MISC11_NINDEXES 1
+
+/* Definitions for misc. test #12 */
+#define MISC12_FILE "tmisc12.h5"
+#define MISC12_DSET_NAME "Dataset"
+#define MISC12_SPACE1_RANK 1
+#define MISC12_SPACE1_DIM1 4
+#define MISC12_CHUNK_SIZE 2
+#define MISC12_APPEND_SIZE 5
+
+/* Definitions for misc. test #13 */
+#define MISC13_FILE_1 "tmisc13a.h5"
+#define MISC13_FILE_2 "tmisc13b.h5"
+#define MISC13_DSET1_NAME "Dataset1"
+#define MISC13_DSET2_NAME "Dataset2"
+#define MISC13_DSET3_NAME "Dataset3"
+#define MISC13_GROUP1_NAME "Group1"
+#define MISC13_GROUP2_NAME "Group2"
+#define MISC13_DTYPE_NAME "Datatype"
+#define MISC13_RANK 1
+#define MISC13_DIM1 600
+#define MISC13_CHUNK_DIM1 10
+#define MISC13_USERBLOCK_SIZE 512
+#define MISC13_COPY_BUF_SIZE 4096
+
+/* Definitions for misc. test #14 */
+#define MISC14_FILE "tmisc14.h5"
+#define MISC14_DSET1_NAME "Dataset1"
+#define MISC14_DSET2_NAME "Dataset2"
+#define MISC14_DSET3_NAME "Dataset3"
+#define MISC14_METADATA_SIZE 4096
+
+/* Definitions for misc. test #15 */
+#define MISC15_FILE "tmisc15.h5"
+#define MISC15_BUF_SIZE 1024
+
+/* Definitions for misc. test #16 */
+#define MISC16_FILE "tmisc16.h5"
+#define MISC16_SPACE_DIM 4
+#define MISC16_SPACE_RANK 1
+#define MISC16_STR_SIZE 8
+#define MISC16_DSET_NAME "Dataset"
+
+/* Definitions for misc. test #17 */
+#define MISC17_FILE "tmisc17.h5"
+#define MISC17_SPACE_RANK 2
+#define MISC17_SPACE_DIM1 4
+#define MISC17_SPACE_DIM2 8
+#define MISC17_DSET_NAME "Dataset"
+
+/* Definitions for misc. test #18 */
+#define MISC18_FILE "tmisc18.h5"
+#define MISC18_DSET1_NAME "Dataset1"
+#define MISC18_DSET2_NAME "Dataset2"
+
+/* Definitions for misc. test #19 */
+#define MISC19_FILE "tmisc19.h5"
+#define MISC19_DSET_NAME "Dataset"
+#define MISC19_ATTR_NAME "Attribute"
+#define MISC19_GROUP_NAME "Group"
+
+/* Definitions for misc. test #20 */
+#define MISC20_FILE "tmisc20.h5"
+#define MISC20_FILE_OLD "tlayouto.h5"
+#define MISC20_DSET_NAME "Dataset"
+#define MISC20_DSET2_NAME "Dataset2"
+#define MISC20_SPACE_RANK 2
+/* Make sure the product of the following 2 does not get too close to */
+/* 64 bits, risking an overflow. */
+#define MISC20_SPACE_DIM0 (8 * 1024 * 1024 * (uint64_t)1024)
+#define MISC20_SPACE_DIM1 ((256 * 1024 * (uint64_t)1024) + 1)
+#define MISC20_SPACE2_DIM0 8
+#define MISC20_SPACE2_DIM1 4
+
+#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS)
+/* Definitions for misc. test #21 */
+#define MISC21_FILE "tmisc21.h5"
+#define MISC21_DSET_NAME "Dataset"
+#define MISC21_SPACE_RANK 2
+#define MISC21_SPACE_DIM0 7639
+#define MISC21_SPACE_DIM1 6308
+#define MISC21_CHUNK_DIM0 2048
+#define MISC21_CHUNK_DIM1 2048
+
+/* Definitions for misc. test #22 */
+#define MISC22_FILE "tmisc22.h5"
+#define MISC22_DSET_NAME "Dataset"
+#define MISC22_SPACE_RANK 2
+#define MISC22_CHUNK_DIM0 512
+#define MISC22_CHUNK_DIM1 512
+#define MISC22_SPACE_DIM0 639
+#define MISC22_SPACE_DIM1 1308
+#endif /* H5_HAVE_FILTER_SZIP */
+
+/* Definitions for misc. test #23 */
+#define MISC23_FILE "tmisc23.h5"
+#define MISC23_NAME_BUF_SIZE 40
+
+/* Definitions for misc. test #24 */
+#define MISC24_FILE "tmisc24.h5"
+#define MISC24_GROUP_NAME "group"
+#define MISC24_GROUP_LINK "group_link"
+#define MISC24_DATASET_NAME "dataset"
+#define MISC24_DATASET_LINK "dataset_link"
+#define MISC24_DATATYPE_NAME "datatype"
+#define MISC24_DATATYPE_LINK "datatype_link"
+
+/* Definitions for misc. test #25 'a', 'b' & 'c' */
+#define MISC25A_FILE "foo.h5"
+#define MISC25A_GROUP0_NAME "grp0"
+#define MISC25A_GROUP1_NAME "/grp0/grp1"
+#define MISC25A_GROUP2_NAME "/grp0/grp2"
+#define MISC25A_GROUP3_NAME "/grp0/grp3"
+#define MISC25A_ATTR1_NAME "_long attribute_"
+#define MISC25A_ATTR1_LEN 11
+#define MISC25A_ATTR2_NAME "_short attr__"
+#define MISC25A_ATTR2_LEN 11
+#define MISC25A_ATTR3_NAME "_short attr__"
+#define MISC25A_ATTR3_LEN 1
+#define MISC25B_FILE "mergemsg.h5"
+#define MISC25B_GROUP "grp1"
+#define MISC25C_FILE "nc4_rename.h5"
+#define MISC25C_DSETNAME "da"
+#define MISC25C_DSETNAME2 "dz"
+#define MISC25C_DSETGRPNAME "ga"
+#define MISC25C_GRPNAME "gb"
+#define MISC25C_GRPNAME2 "gc"
+#define MISC25C_ATTRNAME "aa"
+#define MISC25C_ATTRNAME2 "ab"
+
+/* Definitions for misc. test #26 */
+#define MISC26_FILE "dcpl_file"
+
+/* Definitions for misc. test #27 */
+/* (Note that this test file is generated by the "gen_bad_ohdr.c" code) */
+#define MISC27_FILE "tbad_msg_count.h5"
+#define MISC27_GROUP "Group"
+
+/* Definitions for misc. test #28 */
+#define MISC28_FILE "tmisc28.h5"
+#define MISC28_SIZE 10
+#define MISC28_NSLOTS 10000
+
+/* Definitions for misc. test #29 */
+#define MISC29_ORIG_FILE "specmetaread.h5"
+#define MISC29_COPY_FILE "tmisc29.h5"
+#define MISC29_DSETNAME "dset2"
+
+/* Definitions for misc. test #30 */
+#define MISC30_FILE "tmisc30.h5"
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+/* Definitions for misc. test #31 */
+#define MISC31_FILE "tmisc31.h5"
+#define MISC31_DSETNAME "dset"
+#define MISC31_ATTRNAME1 "attr1"
+#define MISC31_ATTRNAME2 "attr2"
+#define MISC31_GROUPNAME "group"
+#define MISC31_PROPNAME "misc31_prop"
+#define MISC31_DTYPENAME "dtype"
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+/* Definitions for misc. test #33 */
+/* Note that this test file is generated by "gen_bad_offset.c" */
+/* and bad offset values are written to that file for testing */
+#define MISC33_FILE "bad_offset.h5"
+
+/* Definitions for misc. test #35 */
+#define MISC35_SPACE_RANK 3
+#define MISC35_SPACE_DIM1 3
+#define MISC35_SPACE_DIM2 15
+#define MISC35_SPACE_DIM3 13
+#define MISC35_NPOINTS 10
+
+/* Definitions for misc. test #37 */
+/* The test file is formerly named h5_nrefs_POC.
+ See https://nvd.nist.gov/vuln/detail/CVE-2020-10812 */
+#define CVE_2020_10812_FILENAME "cve_2020_10812.h5"
+
+#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS)
+/*-------------------------------------------------------------------------
+ * Function: h5_szip_can_encode
+ *
+ * Purpose: Retrieve the filter config flags for szip, tell if
+ * encoder is available.
+ *
+ * Return: 1: decode+encode is enabled
+ * 0: only decode is enabled
+ * -1: other
+ *
+ * Programmer:
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+h5_szip_can_encode(void)
+{
+ unsigned int filter_config_flags;
+
+ H5Zget_filter_info(H5Z_FILTER_SZIP, &filter_config_flags);
+ if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == 0) {
+ /* filter present but neither encode nor decode is supported (???) */
+ return -1;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ H5Z_FILTER_CONFIG_DECODE_ENABLED) {
+ /* decoder only: read but not write */
+ return 0;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ H5Z_FILTER_CONFIG_ENCODE_ENABLED) {
+ /* encoder only: write but not read (???) */
+ return -1;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) {
+ return 1;
+ }
+ return (-1);
+}
+#endif /* H5_HAVE_FILTER_SZIP */
+
+/****************************************************************
+**
+** test_misc1(): test unlinking a dataset from a group and immediately
+** re-using the dataset name
+**
+****************************************************************/
+static void
+test_misc1(void)
+{
+ int i;
+ int i_check;
+ hid_t file, dataspace, dataset;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Unlinking Dataset and Re-creating It\n"));
+
+ file = H5Fcreate(MISC1_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ dataspace = H5Screate(H5S_SCALAR);
+ CHECK(dataspace, FAIL, "H5Screate");
+
+ /* Write the dataset the first time. */
+ dataset =
+ H5Dcreate2(file, MISC1_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ i = MISC1_VAL;
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Remove the dataset. */
+ ret = H5Ldelete(file, MISC1_DSET_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Write the dataset for the second time with a different value. */
+ dataset =
+ H5Dcreate2(file, MISC1_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ i = MISC1_VAL2;
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(dataspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Now, check the value written to the dataset, after it was re-created */
+ file = H5Fopen(MISC1_FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fopen");
+
+ dataspace = H5Screate(H5S_SCALAR);
+ CHECK(dataspace, FAIL, "H5Screate");
+
+ dataset = H5Dopen2(file, MISC1_DSET_NAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &i_check);
+ CHECK(ret, FAIL, "H5Dread");
+ VERIFY(i_check, MISC1_VAL2, "H5Dread");
+
+ ret = H5Sclose(dataspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_misc1() */
+
+static hid_t
+misc2_create_type(void)
+{
+ hid_t type, type_tmp;
+ herr_t ret;
+
+ type_tmp = H5Tcopy(H5T_C_S1);
+ CHECK(type_tmp, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(type_tmp, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ type = H5Tcreate(H5T_COMPOUND, sizeof(misc2_struct));
+ CHECK(type, FAIL, "H5Tcreate");
+
+ ret = H5Tinsert(type, "string", offsetof(misc2_struct, string), type_tmp);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tclose(type_tmp);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ return type;
+}
+
+static void
+test_misc2_write_attribute(void)
+{
+ hid_t file1, file2, root1, root2, dataspace, att1, att2;
+ hid_t type;
+ herr_t ret;
+ misc2_struct data, data_check;
+ char *string_att1 = HDstrdup("string attribute in file one");
+ char *string_att2 = HDstrdup("string attribute in file two");
+
+ HDmemset(&data, 0, sizeof(data));
+ HDmemset(&data_check, 0, sizeof(data_check));
+
+ type = misc2_create_type();
+
+ dataspace = H5Screate(H5S_SCALAR);
+ CHECK(dataspace, FAIL, "H5Screate");
+
+ file2 = H5Fcreate(MISC2_FILE_2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file2, FAIL, "H5Fcreate");
+
+ file1 = H5Fcreate(MISC2_FILE_1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1, FAIL, "H5Fcreate");
+
+ root1 = H5Gopen2(file1, "/", H5P_DEFAULT);
+ CHECK(root1, FAIL, "H5Gopen2");
+
+ att1 = H5Acreate2(root1, MISC2_ATT_NAME_1, type, dataspace, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(att1, FAIL, "H5Acreate2");
+
+ data.string = string_att1;
+
+ ret = H5Awrite(att1, type, &data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ ret = H5Aread(att1, type, &data_check);
+ CHECK(ret, FAIL, "H5Aread");
+
+ ret = H5Treclaim(type, dataspace, H5P_DEFAULT, &data_check);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ ret = H5Aclose(att1);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Gclose(root1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ root2 = H5Gopen2(file2, "/", H5P_DEFAULT);
+ CHECK(root2, FAIL, "H5Gopen2");
+
+ att2 = H5Acreate2(root2, MISC2_ATT_NAME_2, type, dataspace, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(att2, FAIL, "H5Acreate2");
+
+ data.string = string_att2;
+
+ ret = H5Awrite(att2, type, &data);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ ret = H5Aread(att2, type, &data_check);
+ CHECK(ret, FAIL, "H5Aread");
+
+ ret = H5Treclaim(type, dataspace, H5P_DEFAULT, &data_check);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ ret = H5Aclose(att2);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Gclose(root2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Sclose(dataspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ HDfree(string_att1);
+ HDfree(string_att2);
+}
+
+static void
+test_misc2_read_attribute(const char *filename, const char *att_name)
+{
+ hid_t file, root, att;
+ hid_t type;
+ hid_t space;
+ herr_t ret;
+ misc2_struct data_check;
+
+ type = misc2_create_type();
+
+ file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fopen");
+
+ root = H5Gopen2(file, "/", H5P_DEFAULT);
+ CHECK(root, FAIL, "H5Gopen2");
+
+ att = H5Aopen(root, att_name, H5P_DEFAULT);
+ CHECK(att, FAIL, "H5Aopen");
+
+ space = H5Aget_space(att);
+ CHECK(space, FAIL, "H5Aget_space");
+
+ ret = H5Aread(att, type, &data_check);
+ CHECK(ret, FAIL, "H5Aread");
+
+ ret = H5Treclaim(type, space, H5P_DEFAULT, &data_check);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ ret = H5Sclose(space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Aclose(att);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Gclose(root);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+/****************************************************************
+**
+** test_misc2(): test using the same VL-derived datatype in two
+** different files, which was causing problems with the
+** datatype conversion functions
+**
+****************************************************************/
+static void
+test_misc2(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing VL datatype in two different files\n"));
+
+ test_misc2_write_attribute();
+ test_misc2_read_attribute(MISC2_FILE_1, MISC2_ATT_NAME_1);
+ test_misc2_read_attribute(MISC2_FILE_2, MISC2_ATT_NAME_2);
+} /* end test_misc2() */
+
+/****************************************************************
+**
+** test_misc3(): Test reading from chunked dataset with non-zero
+** fill value
+**
+****************************************************************/
+static void
+test_misc3(void)
+{
+ hid_t file, dataspace, dataset, dcpl;
+ int rank = MISC3_RANK;
+ hsize_t dims[MISC3_RANK] = {MISC3_DIM1, MISC3_DIM2};
+ hsize_t chunk_dims[MISC3_RANK] = {MISC3_CHUNK_DIM1, MISC3_CHUNK_DIM2};
+ int fill = MISC3_FILL_VALUE;
+ int read_buf[MISC3_DIM1][MISC3_DIM2];
+ int i, j;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing reading from chunked dataset with non-zero fill-value\n"));
+
+ file = H5Fcreate(MISC3_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create a simple dataspace */
+ dataspace = H5Screate_simple(rank, dims, NULL);
+ CHECK(dataspace, FAIL, "H5Screate_simple");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set the chunk information */
+ ret = H5Pset_chunk(dcpl, rank, chunk_dims);
+ CHECK(dcpl, FAIL, "H5Pset_chunk");
+
+ /* Set the fill-value information */
+ ret = H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fill);
+ CHECK(dcpl, FAIL, "H5Pset_fill_value");
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(file, MISC3_DSET_NAME, H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Read from the dataset (should be fill-values) */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &read_buf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ for (i = 0; i < MISC3_DIM1; i++)
+ for (j = 0; j < MISC3_DIM2; j++)
+ VERIFY(read_buf[i][j], fill, "H5Dread");
+
+ /* Release resources */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Sclose(dataspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc3() */
+
+/****************************************************************
+**
+** test_misc4(): Test the that 'fileno' field in H5O_info_t is
+** valid.
+**
+****************************************************************/
+static void
+test_misc4(void)
+{
+ hid_t file1, file2, group1, group2, group3;
+ H5O_info2_t oinfo1, oinfo2, oinfo3;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing fileno working in H5O_info2_t\n"));
+
+ file1 = H5Fcreate(MISC4_FILE_1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1, FAIL, "H5Fcreate");
+
+ /* Create the first group */
+ group1 = H5Gcreate2(file1, MISC4_GROUP_1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group1, FAIL, "H5Gcreate2");
+
+ /* Create the second group */
+ group2 = H5Gcreate2(file1, MISC4_GROUP_2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group2, FAIL, "H5Gcreate2");
+
+ file2 = H5Fcreate(MISC4_FILE_2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file2, FAIL, "H5Fcreate");
+
+ /* Create the first group */
+ group3 = H5Gcreate2(file2, MISC4_GROUP_1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group3, FAIL, "H5Gcreate2");
+
+ /* Get the stat information for each group */
+ ret = H5Oget_info_by_name3(file1, MISC4_GROUP_1, &oinfo1, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(file1, MISC4_GROUP_2, &oinfo2, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+ ret = H5Oget_info_by_name3(file2, MISC4_GROUP_1, &oinfo3, H5O_INFO_BASIC, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name3");
+
+ /* Verify that the fileno values are the same for groups from file1 */
+ VERIFY(oinfo1.fileno, oinfo2.fileno, "H5Oget_info_by_name");
+
+ /* Verify that the fileno values are not the same between file1 & file2 */
+ if (oinfo1.fileno == oinfo3.fileno)
+ TestErrPrintf("Error on line %d: oinfo1.fileno != oinfo3.fileno\n", __LINE__);
+ if (oinfo2.fileno == oinfo3.fileno)
+ TestErrPrintf("Error on line %d: oinfo2.fileno != oinfo3.fileno\n", __LINE__);
+
+ /* Close the objects */
+ ret = H5Gclose(group1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(group2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Gclose(group3);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Fclose(file2);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc4() */
+
+/****************************************************************
+**
+** test_misc5(): Test several level deep nested compound & VL datatypes
+**
+****************************************************************/
+
+/*********************** struct3 ***********************/
+
+static misc5_struct3_hndl *
+create_struct3(void)
+{
+ misc5_struct3_hndl *str3hndl; /* New 'struct3' created */
+ herr_t ret; /* For error checking */
+
+ str3hndl = (misc5_struct3_hndl *)HDmalloc(sizeof(misc5_struct3_hndl));
+ CHECK_PTR(str3hndl, "malloc");
+
+ str3hndl->st3h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct3));
+ CHECK(str3hndl->st3h_base, FAIL, "H5Tcreate");
+
+ ret = H5Tinsert(str3hndl->st3h_base, "st3_el1", HOFFSET(misc5_struct3, st3_el1), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ str3hndl->st3h_id = H5Tvlen_create(str3hndl->st3h_base);
+ CHECK(str3hndl->st3h_id, FAIL, "H5Tvlen_create");
+
+ return str3hndl;
+}
+
+static void
+delete_struct3(misc5_struct3_hndl *str3hndl)
+{
+ herr_t ret; /* For error checking */
+
+ ret = H5Tclose(str3hndl->st3h_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Tclose(str3hndl->st3h_base);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ HDfree(str3hndl);
+}
+
+static void
+set_struct3(misc5_struct3 *buf)
+{
+ buf->st3_el1 = MISC5_DBGELVAL3;
+}
+
+/*********************** struct2 ***********************/
+
+static misc5_struct2_hndl *
+create_struct2(void)
+{
+ misc5_struct2_hndl *str2hndl; /* New 'struct2' created */
+ herr_t ret; /* For error checking */
+
+ str2hndl = (misc5_struct2_hndl *)HDmalloc(sizeof(misc5_struct2_hndl));
+ CHECK_PTR(str2hndl, "HDmalloc");
+
+ str2hndl->st2h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct2));
+ CHECK(str2hndl->st2h_base, FAIL, "H5Tcreate");
+
+ ret = H5Tinsert(str2hndl->st2h_base, "st2_el1", HOFFSET(misc5_struct2, st2_el1), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ str2hndl->st2h_st3hndl = create_struct3();
+ CHECK_PTR(str2hndl->st2h_st3hndl, "create_struct3");
+
+ ret = H5Tinsert(str2hndl->st2h_base, "st2_el2", HOFFSET(misc5_struct2, st2_el2),
+ str2hndl->st2h_st3hndl->st3h_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ str2hndl->st2h_id = H5Tvlen_create(str2hndl->st2h_base);
+ CHECK(str2hndl->st2h_id, FAIL, "H5Tvlen_create");
+
+ return str2hndl;
+}
+
+static void
+delete_struct2(misc5_struct2_hndl *str2hndl)
+{
+ herr_t ret; /* For error checking */
+
+ ret = H5Tclose(str2hndl->st2h_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ delete_struct3(str2hndl->st2h_st3hndl);
+
+ H5Tclose(str2hndl->st2h_base);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ HDfree(str2hndl);
+}
+
+static void
+set_struct2(misc5_struct2 *buf)
+{
+ unsigned i; /* Local index variable */
+
+ buf->st2_el1 = MISC5_DBGELVAL2;
+ buf->st2_el2.len = MISC5_DBGNELM3;
+
+ buf->st2_el2.p = HDmalloc((buf->st2_el2.len) * sizeof(misc5_struct3));
+ CHECK_PTR(buf->st2_el2.p, "HDmalloc");
+
+ for (i = 0; i < (buf->st2_el2.len); i++)
+ set_struct3(&(((misc5_struct3 *)(buf->st2_el2.p))[i]));
+}
+
+static void
+clear_struct2(misc5_struct2 *buf)
+{
+ HDfree(buf->st2_el2.p);
+}
+
+/*********************** struct1 ***********************/
+
+static misc5_struct1_hndl *
+create_struct1(void)
+{
+ misc5_struct1_hndl *str1hndl; /* New 'struct1' created */
+ herr_t ret; /* For error checking */
+
+ str1hndl = (misc5_struct1_hndl *)HDmalloc(sizeof(misc5_struct1_hndl));
+ CHECK_PTR(str1hndl, "HDmalloc");
+
+ str1hndl->st1h_base = H5Tcreate(H5T_COMPOUND, sizeof(misc5_struct1));
+ CHECK(str1hndl->st1h_base, FAIL, "H5Tcreate");
+
+ ret = H5Tinsert(str1hndl->st1h_base, "st1_el1", HOFFSET(misc5_struct1, st1_el1), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ str1hndl->st1h_st2hndl = create_struct2();
+ CHECK_PTR(str1hndl->st1h_st2hndl, "create_struct2");
+
+ ret = H5Tinsert(str1hndl->st1h_base, "st1_el2", HOFFSET(misc5_struct1, st1_el2),
+ str1hndl->st1h_st2hndl->st2h_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ str1hndl->st1h_id = H5Tvlen_create(str1hndl->st1h_base);
+ CHECK(str1hndl->st1h_id, FAIL, "H5Tvlen_create");
+
+ return str1hndl;
+}
+
+static void
+delete_struct1(misc5_struct1_hndl *str1hndl)
+{
+ herr_t ret; /* For error checking */
+
+ ret = H5Tclose(str1hndl->st1h_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ delete_struct2(str1hndl->st1h_st2hndl);
+
+ ret = H5Tclose(str1hndl->st1h_base);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ HDfree(str1hndl);
+}
+
+static void
+set_struct1(misc5_struct1 *buf)
+{
+ unsigned i; /* Local index variable */
+
+ buf->st1_el1 = MISC5_DBGELVAL1;
+ buf->st1_el2.len = MISC5_DBGNELM2;
+
+ buf->st1_el2.p = HDmalloc((buf->st1_el2.len) * sizeof(misc5_struct2));
+ CHECK_PTR(buf->st1_el2.p, "HDmalloc");
+
+ for (i = 0; i < (buf->st1_el2.len); i++)
+ set_struct2(&(((misc5_struct2 *)(buf->st1_el2.p))[i]));
+}
+
+static void
+clear_struct1(misc5_struct1 *buf)
+{
+ unsigned i;
+
+ for (i = 0; i < buf->st1_el2.len; i++)
+ clear_struct2(&(((misc5_struct2 *)(buf->st1_el2.p))[i]));
+ HDfree(buf->st1_el2.p);
+}
+
+static void
+test_misc5(void)
+{
+ hid_t loc_id, space_id, dataset_id;
+ hid_t mem_type_id;
+ misc5_struct1_hndl *str1hndl;
+ hsize_t dims[MISC5_DSETRANK];
+ hvl_t buf;
+ unsigned i, j, k;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing several level deep nested compound & VL datatypes \n"));
+
+ /* Write the dataset out */
+ loc_id = H5Fcreate(MISC5_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(loc_id, FAIL, "H5Fcreate");
+
+ /* Create the memory structure to write */
+ str1hndl = create_struct1();
+ CHECK_PTR(str1hndl, "create_struct1");
+
+ /* Create the dataspace */
+ dims[0] = MISC5_NELMTOPLVL;
+ space_id = H5Screate_simple(MISC5_DSETRANK, dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+
+ /* Create the dataset */
+ dataset_id = H5Dcreate2(loc_id, MISC5_DSETNAME, str1hndl->st1h_id, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dcreate2");
+
+ /* Create the variable-length buffer */
+ buf.len = MISC5_DBGNELM1;
+ buf.p = HDmalloc((buf.len) * sizeof(misc5_struct1));
+ CHECK_PTR(buf.p, "HDmalloc");
+
+ /* Create the top-level VL information */
+ for (i = 0; i < MISC5_DBGNELM1; i++)
+ set_struct1(&(((misc5_struct1 *)(buf.p))[i]));
+
+ /* Write the data out */
+ ret = H5Dwrite(dataset_id, str1hndl->st1h_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Release the top-level VL information */
+ for (j = 0; j < MISC5_DBGNELM1; j++)
+ clear_struct1(&(((misc5_struct1 *)(buf.p))[j]));
+
+ /* Free the variable-length buffer */
+ HDfree(buf.p);
+
+ /* Close dataset */
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Delete memory structures */
+ delete_struct1(str1hndl);
+
+ /* Close file */
+ ret = H5Fclose(loc_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Read the dataset back in & verify it */
+ loc_id = H5Fopen(MISC5_FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(loc_id, FAIL, "H5Fopen");
+
+ /* Open dataset again */
+ dataset_id = H5Dopen2(loc_id, MISC5_DSETNAME, H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dopen2");
+
+ /* Get the dataset's datatype */
+ mem_type_id = H5Dget_type(dataset_id);
+ CHECK(mem_type_id, FAIL, "H5Dget_type");
+
+ /* Get the dataset's dataspace */
+ space_id = H5Dget_space(dataset_id);
+ CHECK(space_id, FAIL, "H5Dget_space");
+
+ /* Read the data back in */
+ ret = H5Dread(dataset_id, mem_type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &buf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the correct information was read in */
+ for (i = 0; i < (buf.len); i++) {
+ /* HDprintf("[%d]=%d\n",i, ((misc5_struct1 *)(buf.p))[i].st1_el1); */
+ VERIFY(((misc5_struct1 *)(buf.p))[i].st1_el1, MISC5_DBGELVAL1, "H5Dread");
+ for (j = 0; j < (((misc5_struct1 *)(buf.p))[i].st1_el2.len); j++) {
+ /* HDprintf(" [%d]=%d\n",j, ((misc5_struct2 *)(((misc5_struct1 *)
+ * (buf.p))[i].st1_el2.p))[j].st2_el1); */
+ VERIFY(((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j].st2_el1, MISC5_DBGELVAL2,
+ "H5Dread");
+ for (k = 0; k < (((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j].st2_el2.len);
+ k++) {
+ /* HDprintf(" [%d]=%d\n",k, ((misc5_struct3 *)(((misc5_struct2 *) (((misc5_struct1
+ * *)(buf.p))[i]. st1_el2.p))[j].st2_el2.p))[k].st3_el1); */
+ VERIFY(((misc5_struct3 *)(((misc5_struct2 *)(((misc5_struct1 *)(buf.p))[i].st1_el2.p))[j]
+ .st2_el2.p))[k]
+ .st3_el1,
+ MISC5_DBGELVAL3, "H5Dread");
+ } /* end for */
+ }
+ }
+
+ /* Reclaim the memory for the VL information */
+ ret = H5Treclaim(mem_type_id, space_id, H5P_DEFAULT, &buf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close dataspace */
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset */
+ ret = H5Tclose(mem_type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(loc_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_misc5() */
+
+/****************************************************************
+**
+** test_misc6(): Test that object header continuation messages are
+** created correctly.
+**
+****************************************************************/
+static void
+test_misc6(void)
+{
+ hid_t loc_id, space_id, dataset_id;
+ hid_t attr_id;
+ char attr_name[16];
+ unsigned u;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing object header continuation code \n"));
+
+ /* Create the file */
+ loc_id = H5Fcreate(MISC6_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(loc_id, FAIL, "H5Fcreate");
+
+ /* Create the dataspace */
+ space_id = H5Screate(H5S_SCALAR);
+ CHECK(space_id, FAIL, "H5Screate");
+
+ /* Create the first dataset */
+ dataset_id =
+ H5Dcreate2(loc_id, MISC6_DSETNAME1, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dcreate2");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create the second dataset */
+ dataset_id =
+ H5Dcreate2(loc_id, MISC6_DSETNAME2, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dcreate2");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(loc_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Loop through adding attributes to each dataset */
+ for (u = 0; u < MISC6_NUMATTR; u++) {
+ /* Create name for attribute */
+ HDsnprintf(attr_name, sizeof(attr_name), "Attr#%u", u);
+
+ /* Open the file */
+ loc_id = H5Fopen(MISC6_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(loc_id, FAIL, "H5Fopen");
+
+ /* Open first dataset */
+ dataset_id = H5Dopen2(loc_id, MISC6_DSETNAME1, H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dopen2");
+
+ /* Add attribute to dataset */
+ attr_id = H5Acreate2(dataset_id, attr_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open second dataset */
+ dataset_id = H5Dopen2(loc_id, MISC6_DSETNAME2, H5P_DEFAULT);
+ CHECK(dataset_id, FAIL, "H5Dopen2");
+
+ /* Add attribute to dataset */
+ attr_id = H5Acreate2(dataset_id, attr_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+
+ /* Close attribute */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(loc_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+ /* Close dataspace */
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* end test_misc6() */
+
+/****************************************************************
+**
+** test_misc7(): Test that datatypes are sensible to store on
+** disk. (i.e. not partially initialized)
+**
+****************************************************************/
+#if 0
+static void
+test_misc7(void)
+{
+ hid_t fid, did, tid, sid;
+ int enum_value = 1;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing sensible datatype on disk code \n"));
+
+ /* Attempt to commit a non-sensible datatype */
+
+ /* Create the file */
+ fid = H5Fcreate(MISC7_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create the dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create the compound datatype to commit*/
+ tid = H5Tcreate(H5T_COMPOUND, (size_t)32);
+ CHECK(tid, FAIL, "H5Tcreate");
+
+ /* Attempt to commit an empty compound datatype */
+ ret = H5Tcommit2(fid, MISC7_TYPENAME1, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VERIFY(ret, FAIL, "H5Tcommit2");
+
+ /* Attempt to use empty compound datatype to create dataset */
+ did = H5Dcreate2(fid, MISC7_DSETNAME1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VERIFY(ret, FAIL, "H5Dcreate2");
+
+ /* Add a field to the compound datatype */
+ ret = H5Tinsert(tid, "a", (size_t)0, H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Attempt to commit the compound datatype now - should work */
+ ret = H5Tcommit2(fid, MISC7_TYPENAME1, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Attempt to use compound datatype to create dataset now - should work */
+ did = H5Dcreate2(fid, MISC7_DSETNAME1, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Close dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close compound datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create the enum datatype to commit*/
+ tid = H5Tenum_create(H5T_NATIVE_INT);
+ CHECK(tid, FAIL, "H5Tenum_create");
+
+ /* Attempt to commit an empty enum datatype */
+ ret = H5Tcommit2(fid, MISC7_TYPENAME2, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VERIFY(ret, FAIL, "H5Tcommit2");
+
+ /* Attempt to use empty enum datatype to create dataset */
+ did = H5Dcreate2(fid, MISC7_DSETNAME2, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VERIFY(did, FAIL, "H5Dcreate2");
+
+ /* Add a member to the enum datatype */
+ ret = H5Tenum_insert(tid, "a", &enum_value);
+ CHECK(ret, FAIL, "H5Tenum_insert");
+
+ /* Attempt to commit the enum datatype now - should work */
+ ret = H5Tcommit2(fid, MISC7_TYPENAME2, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Attempt to use enum datatype to create dataset now - should work */
+ did = H5Dcreate2(fid, MISC7_DSETNAME2, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Close dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close enum datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_misc7() */
+#endif
+
+/****************************************************************
+**
+** test_misc8(): Test storage size of various types of dataset
+** storage methods.
+**
+****************************************************************/
+#if 0
+static void
+test_misc8(void)
+{
+ hid_t fid, did, sid;
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ int rank = MISC8_RANK;
+ hsize_t dims[MISC8_RANK] = {MISC8_DIM0, MISC8_DIM1};
+ hsize_t chunk_dims[MISC8_RANK] = {MISC8_CHUNK_DIM0, MISC8_CHUNK_DIM1};
+ hsize_t storage_size; /* Number of bytes of raw data storage used */
+ int *wdata; /* Data to write */
+ int *tdata; /* Temporary pointer to data write */
+#ifdef VERIFY_DATA
+ int *rdata; /* Data to read */
+ int *tdata2; /* Temporary pointer to data to read */
+#endif /* VERIFY_DATA */
+ unsigned u, v; /* Local index variables */
+ int mdc_nelmts; /* Metadata number of elements */
+ size_t rdcc_nelmts; /* Raw data number of elements */
+ size_t rdcc_nbytes; /* Raw data number of bytes */
+ double rdcc_w0; /* Raw data write percentage */
+ hsize_t start[MISC8_RANK]; /* Hyperslab start */
+ hsize_t count[MISC8_RANK]; /* Hyperslab block count */
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing dataset storage sizes\n"));
+
+ /* Allocate space for the data to write & read */
+ wdata = (int *)HDmalloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1);
+ CHECK_PTR(wdata, "HDmalloc");
+#ifdef VERIFY_DATA
+ rdata = (int *)HDmalloc(sizeof(int) * MISC8_DIM0 * MISC8_DIM1);
+ CHECK_PTR(rdata, "HDmalloc");
+#endif /* VERIFY_DATA */
+
+ /* Initialize values */
+ tdata = wdata;
+ for (u = 0; u < MISC8_DIM0; u++)
+ for (v = 0; v < MISC8_DIM1; v++)
+ *tdata++ = (int)(((u * MISC8_DIM1) + v) % 13);
+
+ /* Create a file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Get the default file access properties for caching */
+ ret = H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ CHECK(ret, FAIL, "H5Pget_cache");
+
+ /* Decrease the size of the raw data cache */
+ rdcc_nbytes = 0;
+
+ /* Set the file access properties for caching */
+ ret = H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ CHECK(ret, FAIL, "H5Pset_cache");
+
+ /* Create the file */
+ fid = H5Fcreate(MISC8_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Create a simple dataspace */
+ sid = H5Screate_simple(rank, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Select a hyperslab which coincides with chunk boundaries */
+ /* (For later use) */
+ start[0] = 1;
+ start[1] = 1;
+ count[0] = (MISC8_CHUNK_DIM0 * 2) - 1;
+ count[1] = (MISC8_CHUNK_DIM1 * 2) - 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* I. contiguous dataset tests */
+
+ ret = H5Pset_layout(dcpl, H5D_CONTIGUOUS);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a contiguous dataset, with space allocation early */
+ did = H5Dcreate2(fid, MISC8_DSETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+ VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5Dget_storage_size");
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+#ifndef H5_HAVE_PARALLEL
+ /* Set the space allocation time to late */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a contiguous dataset, with space allocation late */
+ did = H5Dcreate2(fid, MISC8_DSETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size before data is written */
+ storage_size = H5Dget_storage_size(did);
+ VERIFY(storage_size, 0, "H5Dget_storage_size");
+
+ /* Write data */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check the storage size after data is written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+ VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5Dget_storage_size");
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Set the space allocation time to incremental */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a contiguous dataset, with space allocation late */
+ did = H5Dcreate2(fid, MISC8_DSETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size before data is written */
+ storage_size = H5Dget_storage_size(did);
+ VERIFY(storage_size, 0, "H5Dget_storage_size");
+
+ /* Write data */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check the storage size after data is written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+ VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5Dget_storage_size");
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+#endif /* H5_HAVE_PARALLEL */
+
+ /* II. compact dataset tests */
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Set the space allocation time to late */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a contiguous dataset, with space allocation late */
+ /* Should fail */
+ H5E_BEGIN_TRY
+ {
+ did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(did, FAIL, "H5Dcreate2");
+
+ /* Set the space allocation time to incremental */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a contiguous dataset, with space allocation incremental */
+ /* Should fail */
+ H5E_BEGIN_TRY
+ {
+ did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(did, FAIL, "H5Dcreate2");
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Set the fill time to allocation */
+ ret = H5Pset_fill_time(dcpl, H5D_FILL_TIME_ALLOC);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a contiguous dataset, with space allocation early */
+ did = H5Dcreate2(fid, MISC8_DSETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+ VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5Dget_storage_size");
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* III. chunked dataset tests */
+
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Use chunked storage for this dataset */
+ ret = H5Pset_chunk(dcpl, rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create a chunked dataset, with space allocation early */
+ did = H5Dcreate2(fid, MISC8_DSETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size after data is written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+ VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5Dget_storage_size");
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+#ifndef H5_HAVE_PARALLEL
+ /* Set the space allocation time to late */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Use chunked storage for this dataset */
+ ret = H5Pset_chunk(dcpl, rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create a chunked dataset, with space allocation late */
+ did = H5Dcreate2(fid, MISC8_DSETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size after dataset is created */
+ storage_size = H5Dget_storage_size(did);
+ VERIFY(storage_size, 0, "H5Dget_storage_size");
+
+ /* Write part of the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check the storage size after data is written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+ VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5Dget_storage_size");
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Set the space allocation time to incremental */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a chunked dataset, with space allocation incremental */
+ did = H5Dcreate2(fid, MISC8_DSETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size before data is written */
+ storage_size = H5Dget_storage_size(did);
+ VERIFY(storage_size, 0, "H5Dget_storage_size");
+
+ /* Write part of the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check the storage size after only four chunks are written */
+ storage_size = H5Dget_storage_size(did);
+ VERIFY(storage_size, (hsize_t)(4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5Dget_storage_size");
+
+ /* Write entire dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+#ifdef VERIFY_DATA
+ /* Read data */
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check values written */
+ tdata = wdata;
+ tdata2 = rdata;
+ for (u = 0; u < MISC8_DIM0; u++)
+ for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++)
+ if (*tdata != *tdata2)
+ TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u,
+ (unsigned)v, (int)*tdata, (int)*tdata2);
+#endif /* VERIFY_DATA */
+
+ /* Check the storage size after data is written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+ VERIFY(storage_size, (hsize_t)(MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5Dget_storage_size");
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Use compression as well as chunking for these datasets */
+#ifdef H5_HAVE_FILTER_DEFLATE
+ ret = H5Pset_deflate(dcpl, 9);
+ CHECK(ret, FAIL, "H5Pset_deflate");
+#endif /* end H5_HAVE_FILTER_DEFLATE */
+
+ /* Create a chunked dataset, with space allocation early */
+ did = H5Dcreate2(fid, MISC8_DSETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Write part of the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check the storage size after data is written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#else /* Compression is not configured */
+ if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+#ifndef H5_HAVE_PARALLEL
+ /* Set the space allocation time to late */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a chunked dataset, with space allocation late */
+ did = H5Dcreate2(fid, MISC8_DSETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size before data is written */
+ storage_size = H5Dget_storage_size(did);
+ VERIFY(storage_size, 0, "H5Dget_storage_size");
+
+ /* Write part of the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check the storage size after only four chunks are written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#else /* Compression is not configured */
+ if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Write entire dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+#ifdef VERIFY_DATA
+ /* Read data */
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check values written */
+ tdata = wdata;
+ tdata2 = rdata;
+ for (u = 0; u < MISC8_DIM0; u++)
+ for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++)
+ if (*tdata != *tdata2)
+ TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u,
+ (unsigned)v, (int)*tdata, (int)*tdata2);
+#endif /* VERIFY_DATA */
+
+ /* Check the storage size after data is written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#else
+ if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#endif /*H5_HAVE_FILTER_DEFLATE*/
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Set the space allocation time to incremental */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_INCR);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create a chunked dataset, with space allocation incremental */
+ did = H5Dcreate2(fid, MISC8_DSETNAME10, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the storage size before data is written */
+ storage_size = H5Dget_storage_size(did);
+ VERIFY(storage_size, 0, "H5Dget_storage_size");
+
+ /* Write part of the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check the storage size after only four chunks are written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (storage_size >= (4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#else /* Compression is not configured */
+ if (storage_size != (4 * MISC8_CHUNK_DIM0 * MISC8_CHUNK_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Write entire dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+#ifdef VERIFY_DATA
+ /* Read data */
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check values written */
+ tdata = wdata;
+ tdata2 = rdata;
+ for (u = 0; u < MISC8_DIM0; u++)
+ for (v = 0; v < MISC8_DIM1; v++, tdata++, tdata2++)
+ if (*tdata != *tdata2)
+ TestErrPrintf("Error on line %d: u=%u, v=%d, *tdata=%d, *tdata2=%d\n", __LINE__, (unsigned)u,
+ (unsigned)v, (int)*tdata, (int)*tdata2);
+#endif /* VERIFY_DATA */
+
+ /* Check the storage size after data is written */
+ storage_size = H5Dget_storage_size(did);
+ CHECK(storage_size, 0, "H5Dget_storage_size");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (storage_size >= (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: data wasn't compressed! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#else
+ if (storage_size != (MISC8_DIM0 * MISC8_DIM1 * H5Tget_size(H5T_NATIVE_INT)))
+ TestErrPrintf("Error on line %d: wrong storage size! storage_size=%u\n", __LINE__,
+ (unsigned)storage_size);
+#endif /*H5_HAVE_FILTER_DEFLATE*/
+
+ /* Close dataset ID */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free the read & write buffers */
+ HDfree(wdata);
+#ifdef VERIFY_DATA
+ HDfree(rdata);
+#endif /* VERIFY_DATA */
+} /* end test_misc8() */
+#endif
+
+/****************************************************************
+**
+** test_misc9(): Test that H5Fopen() does not succeed for core
+** files, H5Fcreate() must be used to open them.
+**
+****************************************************************/
+static void
+test_misc9(void)
+{
+ hid_t fapl, fid;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing core file opening\n"));
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ ret = H5Pset_fapl_core(fapl, (size_t)1024, 0);
+ CHECK(ret, FAIL, "H5Pset_fapl_core");
+
+ H5E_BEGIN_TRY
+ {
+ fid = H5Fopen(MISC9_FILE, H5F_ACC_RDWR, fapl);
+ }
+ H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fopen");
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pset_fapl_core");
+} /* end test_misc9() */
+
+/****************************************************************
+**
+** test_misc10(): Test opening a dataset created with an older
+** version of the library (shares the tmtimeo.h5 file with the mtime.c
+** test - see notes in gen_old_mtime.c for notes on generating this
+** data file) and using the dataset creation property list from
+** that dataset to create a dataset with the current version of
+** the library. Also tests using file creation property in same way.
+**
+****************************************************************/
+#if 0
+static void
+test_misc10(void)
+{
+ hid_t file, file_new; /* File IDs for old & new files */
+ hid_t fcpl; /* File creation property list */
+ hid_t dataset, dataset_new; /* Dataset IDs for old & new datasets */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t space, type; /* Old dataset's dataspace & datatype */
+ const char *testfile = H5_get_srcdir_filename(MISC10_FILE_OLD); /* Corrected test file name */
+ hbool_t driver_is_default_compatible;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing using old dataset creation property list\n"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ /*
+ * Open the old file and the dataset and get old settings.
+ */
+ file = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fopen");
+ fcpl = H5Fget_create_plist(file);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ dataset = H5Dopen2(file, MISC10_DSETNAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+ dcpl = H5Dget_create_plist(dataset);
+ CHECK(dcpl, FAIL, "H5Dget_create_plist");
+ space = H5Dget_space(dataset);
+ CHECK(space, FAIL, "H5Dget_space");
+ type = H5Dget_type(dataset);
+ CHECK(type, FAIL, "H5Dget_type");
+
+ /* Create new file & dataset */
+ file_new = H5Fcreate(MISC10_FILE_NEW, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
+ CHECK(file_new, FAIL, "H5Fcreate");
+
+ dataset_new = H5Dcreate2(file_new, MISC10_DSETNAME, type, space, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset_new, FAIL, "H5Dcreate2");
+
+ /* Close new dataset & file */
+ ret = H5Dclose(dataset_new);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(file_new);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close old dataset information */
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close old file information */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* end test_misc10() */
+#endif
+
+/****************************************************************
+**
+** test_misc11(): Test that all properties in a file creation property
+** list are stored correctly in the file and can be retrieved
+** when the file is re-opened.
+**
+****************************************************************/
+static void
+test_misc11(void)
+{
+ hid_t file; /* File IDs for old & new files */
+ hid_t fcpl; /* File creation property list */
+ hsize_t userblock; /* Userblock size retrieved from FCPL */
+ size_t off_size; /* Size of offsets in the file */
+ size_t len_size; /* Size of lengths in the file */
+ unsigned sym_ik; /* Symbol table B-tree initial 'K' value */
+ unsigned istore_ik; /* Indexed storage B-tree initial 'K' value */
+ unsigned sym_lk; /* Symbol table B-tree leaf 'K' value */
+ unsigned nindexes; /* Shared message number of indexes */
+#if 0
+ H5F_info2_t finfo; /* global information about file */
+#endif
+ H5F_fspace_strategy_t strategy; /* File space strategy */
+ hsize_t threshold; /* Free-space section threshold */
+ hbool_t persist; /* To persist free-space or not */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing file creation properties retrieved correctly\n"));
+
+ /* Creating a file with the default file creation property list should
+ * create a version 0 superblock
+ */
+
+ /* Create file with default file creation property list */
+ file = H5Fcreate(MISC11_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+#if 0
+ /* Get the file's version information */
+ ret = H5Fget_info2(file, &finfo);
+ CHECK(ret, FAIL, "H5Fget_info2");
+ VERIFY(finfo.super.version, 0, "H5Fget_info2");
+ VERIFY(finfo.free.version, 0, "H5Fget_info2");
+ VERIFY(finfo.sohm.version, 0, "H5Fget_info2");
+#endif
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create a file creation property list */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Set all the properties in the FCPL */
+ ret = H5Pset_userblock(fcpl, (hsize_t)MISC11_USERBLOCK);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+
+ ret = H5Pset_sizes(fcpl, (size_t)MISC11_SIZEOF_OFF, (size_t)MISC11_SIZEOF_LEN);
+ CHECK(ret, FAIL, "H5Pset_sizes");
+
+ /* This should fail as (32770*2) will exceed ^16 - 2 bytes for storing btree entries */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_sym_k(fcpl, 32770, 0);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_sym_k");
+
+ ret = H5Pset_sym_k(fcpl, MISC11_SYM_IK, MISC11_SYM_LK);
+ CHECK(ret, FAIL, "H5Pset_sym_k");
+
+ /* This should fail as (32770*2) will exceed ^16 - 2 bytes for storing btree entries */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pset_istore_k(fcpl, 32770);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_istore_k");
+
+ ret = H5Pset_istore_k(fcpl, MISC11_ISTORE_IK);
+ CHECK(ret, FAIL, "H5Pset_istore_k");
+
+ ret = H5Pset_shared_mesg_nindexes(fcpl, MISC11_NINDEXES);
+ CHECK(ret, FAIL, "H5Pset_shared_mesg");
+
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space");
+
+ /* Creating a file with the non-default file creation property list should
+ * create a version 2 superblock
+ */
+
+ /* Create file with custom file creation property list */
+ file = H5Fcreate(MISC11_FILE, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Close FCPL */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+#if 0
+ /* Get the file's version information */
+ ret = H5Fget_info2(file, &finfo);
+ CHECK(ret, FAIL, "H5Fget_info2");
+ VERIFY(finfo.super.version, 2, "H5Fget_info2");
+ VERIFY(finfo.free.version, 0, "H5Fget_info2");
+ VERIFY(finfo.sohm.version, 0, "H5Fget_info2");
+#endif
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ file = H5Fopen(MISC11_FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Get the file's creation property list */
+ fcpl = H5Fget_create_plist(file);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+#if 0
+ /* Get the file's version information */
+ ret = H5Fget_info2(file, &finfo);
+ CHECK(ret, FAIL, "H5Fget_info2");
+ VERIFY(finfo.super.version, 2, "H5Fget_info2");
+ VERIFY(finfo.free.version, 0, "H5Fget_info2");
+ VERIFY(finfo.sohm.version, 0, "H5Fget_info2");
+#endif
+ /* Retrieve all the property values & check them */
+ ret = H5Pget_userblock(fcpl, &userblock);
+ CHECK(ret, FAIL, "H5Pget_userblock");
+ VERIFY(userblock, MISC11_USERBLOCK, "H5Pget_userblock");
+
+ ret = H5Pget_sizes(fcpl, &off_size, &len_size);
+ CHECK(ret, FAIL, "H5Pget_sizes");
+ VERIFY(off_size, MISC11_SIZEOF_OFF, "H5Pget_sizes");
+ VERIFY(len_size, MISC11_SIZEOF_LEN, "H5Pget_sizes");
+
+ ret = H5Pget_sym_k(fcpl, &sym_ik, &sym_lk);
+ CHECK(ret, FAIL, "H5Pget_sym_k");
+ VERIFY(sym_ik, MISC11_SYM_IK, "H5Pget_sym_k");
+ VERIFY(sym_lk, MISC11_SYM_LK, "H5Pget_sym_k");
+
+ ret = H5Pget_istore_k(fcpl, &istore_ik);
+ CHECK(ret, FAIL, "H5Pget_istore_k");
+ VERIFY(istore_ik, MISC11_ISTORE_IK, "H5Pget_istore_k");
+
+ ret = H5Pget_shared_mesg_nindexes(fcpl, &nindexes);
+ CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes");
+ VERIFY(nindexes, MISC11_NINDEXES, "H5Pget_shared_mesg_nindexes");
+
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+ VERIFY(strategy, 3, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close FCPL */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+} /* end test_misc11() */
+
+/****************************************************************
+**
+** test_misc12(): Test that VL-types operate correctly in chunked
+** datasets that are extended.
+**
+****************************************************************/
+static void
+test_misc12(void)
+{
+ const char *wdata[MISC12_SPACE1_DIM1] = {
+ "Four score and seven years ago our forefathers brought forth on this continent a new nation,",
+ "conceived in liberty and dedicated to the proposition that all men are created equal.",
+ "Now we are engaged in a great civil war,",
+ "testing whether that nation or any nation so conceived and so dedicated can long endure."};
+ const char *wdata1[MISC12_APPEND_SIZE] = {
+ "O Gloria inmarcesible! O Jubilo inmortal! En surcos de dolores, el",
+ "bien germina ya! Ceso la horrible noche, La libertad sublime",
+ "derrama las auroras de su invencible luz.", "La humanidad entera, que entre cadenas gime, comprende",
+ "las palabras del que murio en la cruz."};
+ char *rdata[MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE]; /* Information read in */
+ hid_t fid1;
+ hid_t dataset;
+ hid_t sid1, space, memspace;
+ hid_t tid1, cparms;
+ hsize_t dims1[] = {MISC12_SPACE1_DIM1};
+ hsize_t dimsn[] = {MISC12_APPEND_SIZE};
+ hsize_t maxdims1[1] = {H5S_UNLIMITED};
+ hsize_t chkdims1[1] = {MISC12_CHUNK_SIZE};
+ hsize_t newsize[1] = {MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE};
+ hsize_t offset[1] = {MISC12_SPACE1_DIM1};
+ hsize_t count[1] = {MISC12_APPEND_SIZE};
+ int i; /* counting variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing VL-type in chunked dataset\n"));
+
+ /* This test requirese a relatively "fresh" library environment */
+ ret = H5garbage_collect();
+ CHECK(ret, FAIL, "H5garbage_collect");
+
+ /* Create file */
+ fid1 = H5Fcreate(MISC12_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(MISC12_SPACE1_RANK, dims1, maxdims1);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcopy(H5T_C_S1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(tid1, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ cparms = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(cparms, FAIL, "H5Pcreate");
+
+ ret = H5Pset_chunk(cparms, 1, chkdims1);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, MISC12_DSET_NAME, tid1, sid1, H5P_DEFAULT, cparms, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Extend dataset */
+ ret = H5Dset_extent(dataset, newsize);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ memspace = H5Screate_simple(MISC12_SPACE1_RANK, dimsn, NULL);
+ CHECK(memspace, FAIL, "H5Screate_simple");
+
+ space = H5Dget_space(dataset);
+ CHECK(space, FAIL, "H5Dget_space");
+
+ ret = H5Sselect_hyperslab(space, H5S_SELECT_SET, offset, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write data to new portion of dataset */
+ ret = H5Dwrite(dataset, tid1, memspace, space, H5P_DEFAULT, wdata1);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read all data back */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ for (i = 0; i < MISC12_SPACE1_DIM1; i++)
+ if (HDstrcmp(wdata[i], rdata[i]) != 0)
+ TestErrPrintf("Error on line %d: wdata[%d]=%s, rdata[%d]=%s\n", __LINE__, i, wdata[i], i,
+ rdata[i]);
+ for (; i < (MISC12_SPACE1_DIM1 + MISC12_APPEND_SIZE); i++)
+ if (HDstrcmp(wdata1[i - MISC12_SPACE1_DIM1], rdata[i]) != 0)
+ TestErrPrintf("Error on line %d: wdata1[%d]=%s, rdata[%d]=%s\n", __LINE__, i - MISC12_SPACE1_DIM1,
+ wdata1[i - MISC12_SPACE1_DIM1], i, rdata[i]);
+
+ ret = H5Sselect_all(space);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ /* Reclaim VL data memory */
+ ret = H5Treclaim(tid1, space, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Everything */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(memspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(cparms);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc12() */
+#if 0
+/* Various routines for misc. 13 test */
+static void
+misc13_init_data(unsigned *original_data)
+{
+ unsigned u;
+
+ for (u = 0; u < MISC13_DIM1; u++)
+ original_data[u] = u;
+}
+
+static hbool_t
+misc13_verify_data_match(const unsigned *original_data, const unsigned *read_data)
+{
+ unsigned u;
+
+ for (u = 0; u < MISC13_DIM1; u++)
+ if (original_data[u] != read_data[u])
+ return FALSE;
+
+ return TRUE;
+}
+
+static void
+misc13_create_dataset(hid_t loc_id, const char *name, hid_t dcpl, const unsigned *data)
+{
+ hid_t dsid = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hsize_t dims[MISC13_RANK]; /* Dataset dimensions */
+ herr_t ret; /* Generic return value */
+
+ /* Create dataspace for use with dataset */
+ dims[0] = MISC13_DIM1;
+ sid = H5Screate_simple(MISC13_RANK, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create contiguous dataset in root group */
+ dsid = H5Dcreate2(loc_id, name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dsid, FAIL, "H5Dcreate2");
+
+ /* Write some data to dataset */
+ ret = H5Dwrite(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the contiguous dataset */
+ ret = H5Dclose(dsid);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* end misc13_create_dataset() */
+
+static void
+misc13_verify_dataset(hid_t loc_id, const char *name, const unsigned *data)
+{
+ unsigned *read_data = NULL; /* Data to write to dataset */
+ hid_t dsid = -1; /* Dataset ID */
+ herr_t ret; /* Generic return value */
+
+ /* Create a data buffer for the dataset read */
+ read_data = (unsigned *)HDcalloc(MISC13_DIM1, sizeof(unsigned));
+ CHECK_PTR(read_data, "HDcalloc");
+
+ /* Open the contiguous dataset in the root group */
+ dsid = H5Dopen2(loc_id, name, H5P_DEFAULT);
+ CHECK(dsid, FAIL, "H5Dopen2");
+
+ /* Read the data */
+ ret = H5Dread(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_data);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify that the data are correct */
+ ret = misc13_verify_data_match(data, read_data);
+ CHECK(ret, FAIL, "misc13_verify_data_match");
+
+ /* Close the contiguous dataset */
+ ret = H5Dclose(dsid);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Free the dataset read buffer */
+ HDfree(read_data);
+
+} /* end misc13_verify_dataset() */
+
+static void
+misc13_create_hdf_file(const char *name, const unsigned *data)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t gid1 = -1; /* Group ID (level 1) */
+ hid_t gid2 = -1; /* Group ID (level 2) */
+ hid_t tid = -1; /* Datatype ID */
+ hid_t dcplid = -1; /* Dataset creation property list ID */
+ hsize_t chunk_dims[MISC13_RANK]; /* Chunk dimensions */
+ herr_t ret; /* Generic return value */
+
+ /* Create file */
+ fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create DCPL for use with datasets */
+ dcplid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcplid, FAIL, "H5Pcreate");
+
+ /* Set the DCPL to be chunked */
+ ret = H5Pset_layout(dcplid, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Use chunked storage for this DCPL */
+ chunk_dims[0] = MISC13_CHUNK_DIM1;
+ ret = H5Pset_chunk(dcplid, MISC13_RANK, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create contiguous dataset in root group */
+ misc13_create_dataset(fid, MISC13_DSET1_NAME, H5P_DEFAULT, data);
+
+ /* Create chunked dataset in root group */
+ misc13_create_dataset(fid, MISC13_DSET2_NAME, dcplid, data);
+
+ /* Create a datatype to commit to the file */
+ tid = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(tid, FAIL, "H5Tcopy");
+
+ /* Create a named datatype in the root group */
+ ret = H5Tcommit2(fid, MISC13_DTYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close named datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Create a group in the root group */
+ gid1 = H5Gcreate2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
+
+ /* Create another group in the new group */
+ gid2 = H5Gcreate2(gid1, MISC13_GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gcreate2");
+
+ /* Close the second group */
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create contiguous dataset in new group */
+ misc13_create_dataset(gid1, MISC13_DSET1_NAME, H5P_DEFAULT, data);
+
+ /* Create chunked dataset in new group */
+ misc13_create_dataset(gid1, MISC13_DSET2_NAME, dcplid, data);
+
+ /* Create a datatype to commit to the new group */
+ tid = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(tid, FAIL, "H5Tcopy");
+
+ /* Create a named datatype in the new group */
+ ret = H5Tcommit2(gid1, MISC13_DTYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close named datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close the first group */
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the DCPL */
+ ret = H5Pclose(dcplid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end misc13_create_hdf_file() */
+
+static void
+misc13_insert_user_block(const char *old_name, const char *new_name, const char *str, size_t size)
+{
+ FILE *new_fp = NULL; /* Pointers to new & old files */
+ FILE *old_fp = NULL;
+ void *user_block = NULL; /* Pointer to user block to write to file */
+ void *copy_buf = NULL; /* Pointer to buffer for copying data */
+ size_t written; /* Amount of data written to new file */
+ size_t read_in; /* Amount of data read in from old file */
+ int ret; /* Generic status value */
+
+ /* Allocate space for the user block */
+ user_block = HDcalloc(size, (size_t)1);
+ CHECK_PTR(user_block, "HDcalloc");
+
+ /* Copy in the user block data */
+ HDmemcpy(user_block, str, HDstrlen(str));
+
+ /* Open the new file */
+ new_fp = HDfopen(new_name, "wb");
+ CHECK_PTR(new_fp, "HDfopen");
+
+ /* Write the user block to the new file */
+ written = HDfwrite(user_block, (size_t)1, size, new_fp);
+ VERIFY(written, size, "HDfwrite");
+
+ /* Open the old file */
+ old_fp = HDfopen(old_name, "rb");
+ CHECK_PTR(old_fp, "HDfopen");
+
+ /* Allocate space for the copy buffer */
+ copy_buf = HDmalloc((size_t)MISC13_COPY_BUF_SIZE);
+ CHECK_PTR(copy_buf, "HDmalloc");
+
+ /* Copy data from the old file to the new file */
+ while ((read_in = HDfread(copy_buf, (size_t)1, (size_t)MISC13_COPY_BUF_SIZE, old_fp)) > 0) {
+ /* Write the data to the new file */
+ written = HDfwrite(copy_buf, (size_t)1, read_in, new_fp);
+ VERIFY(written, read_in, "HDfwrite");
+ }
+
+ /* Close the old file */
+ ret = HDfclose(old_fp);
+ VERIFY(ret, 0, "HDfclose");
+
+ /* Close the new file */
+ ret = HDfclose(new_fp);
+ VERIFY(ret, 0, "HDfclose");
+
+ /* Free the copy buffer */
+ HDfree(copy_buf);
+
+ /* Free the user block */
+ HDfree(user_block);
+
+} /* end misc13_insert_user_block() */
+
+static void
+misc13_verify_file(const char *name, const unsigned *data, hsize_t userblock_size,
+ hbool_t check_for_new_dataset)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t gid1 = -1; /* Group IDs */
+ hid_t gid2 = -1; /* Group IDs */
+ hid_t tid = -1; /* Datatype ID */
+ hid_t fcplid = -1; /* File creation property list ID */
+ hsize_t ub_size_out; /* Userblock size retrieved from FCPL */
+ herr_t ret; /* Generic return value */
+
+ /* Open the file */
+ fid = H5Fopen(name, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Get the file's FCPL */
+ fcplid = H5Fget_create_plist(fid);
+ CHECK(fcplid, FAIL, "H5Fget_create_plist");
+
+ /* Get the user block size for the file */
+ ret = H5Pget_userblock(fcplid, &ub_size_out);
+ CHECK(ret, FAIL, "H5Pget_userblock");
+
+ /* Check the userblock size */
+ VERIFY(userblock_size, ub_size_out, "H5Pget_userblock");
+
+ /* Close the FCPL */
+ ret = H5Pclose(fcplid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Verify the contiguous dataset in the root group */
+ misc13_verify_dataset(fid, MISC13_DSET1_NAME, data);
+
+ /* Verify the chunked dataset in the root group */
+ misc13_verify_dataset(fid, MISC13_DSET2_NAME, data);
+
+ /* Verify the "new" contiguous dataset in the root group, if asked */
+ if (check_for_new_dataset)
+ misc13_verify_dataset(fid, MISC13_DSET3_NAME, data);
+
+ /* Open the named datatype in the root group */
+ tid = H5Topen2(fid, MISC13_DTYPE_NAME, H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+
+ /* Verify the type is correct */
+ VERIFY(H5Tequal(tid, H5T_NATIVE_INT), TRUE, "H5Tequal");
+
+ /* Close named datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open the first group */
+ gid1 = H5Gopen2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gopen2");
+
+ /* Verify the contiguous dataset in the first group */
+ misc13_verify_dataset(gid1, MISC13_DSET1_NAME, data);
+
+ /* Verify the chunked dataset in the first group */
+ misc13_verify_dataset(gid1, MISC13_DSET2_NAME, data);
+
+ /* Open the named datatype in the first group */
+ tid = H5Topen2(gid1, MISC13_DTYPE_NAME, H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+
+ /* Verify the type is correct */
+ VERIFY(H5Tequal(tid, H5T_NATIVE_INT), TRUE, "H5Tequal");
+
+ /* Close named datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open the second group */
+ gid2 = H5Gopen2(gid1, MISC13_GROUP2_NAME, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gopen2");
+
+ /* Close the second group */
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the first group */
+ ret = H5Gclose(gid1);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end misc13_verify_file() */
+
+static void
+misc13_add_to_new_file(const char *name, const unsigned *data)
+{
+ hid_t fid = -1; /* File ID */
+ herr_t ret; /* Generic return value */
+
+ /* Open the file */
+ fid = H5Fopen(name, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create new contiguous dataset in root group */
+ misc13_create_dataset(fid, MISC13_DSET3_NAME, H5P_DEFAULT, data);
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end misc13_add_to_new_file() */
+
+/****************************************************************
+**
+** test_misc13(): Test that file contents can be "slid down" by
+** inserting a user block in front of an existing file.
+**
+****************************************************************/
+static void
+test_misc13(void)
+{
+ unsigned *data = NULL; /* Data to write to dataset */
+ hsize_t userblock_size; /* Correct size of userblock */
+ hbool_t check_for_new_dataset; /* Whether to check for the post-userblock-creation dataset */
+
+ /* Create a data buffer for the datasets */
+ data = (unsigned *)HDcalloc(MISC13_DIM1, sizeof(unsigned));
+ CHECK_PTR(data, "HDcalloc");
+
+ /* Initialize data to write */
+ misc13_init_data(data);
+
+ /* Create first file, with no user block */
+ misc13_create_hdf_file(MISC13_FILE_1, data);
+
+ /* Verify file contents are correct */
+ userblock_size = 0;
+ check_for_new_dataset = FALSE;
+ misc13_verify_file(MISC13_FILE_1, data, userblock_size, check_for_new_dataset);
+
+ /* Create a new file by inserting a user block in front of the first file */
+ misc13_insert_user_block(MISC13_FILE_1, MISC13_FILE_2, "Test String", (size_t)MISC13_USERBLOCK_SIZE);
+
+ /* Verify file contents are still correct */
+ userblock_size = MISC13_USERBLOCK_SIZE;
+ check_for_new_dataset = FALSE;
+ misc13_verify_file(MISC13_FILE_2, data, userblock_size, check_for_new_dataset);
+
+ /* Make certain we can modify the new file */
+ misc13_add_to_new_file(MISC13_FILE_2, data);
+
+ /* Verify file contents are still correct */
+ userblock_size = MISC13_USERBLOCK_SIZE;
+ check_for_new_dataset = TRUE;
+ misc13_verify_file(MISC13_FILE_2, data, userblock_size, check_for_new_dataset);
+
+ /* Free the dataset buffer */
+ HDfree(data);
+
+} /* end test_misc13() */
+#endif
+
+/****************************************************************
+**
+** test_misc14(): Test that file contents can be "slid down" by
+** inserting a user block in front of an existing file.
+**
+****************************************************************/
+static void
+test_misc14(void)
+{
+ hid_t file_id; /* File ID */
+ hid_t fapl; /* File access property list ID */
+ hid_t DataSpace; /* Dataspace ID */
+ hid_t Dataset1; /* Dataset ID #1 */
+ hid_t Dataset2; /* Dataset ID #2 */
+ hid_t Dataset3; /* Dataset ID #3 */
+ double data1 = 5.0; /* Data to write for dataset #1 */
+ double data2 = 10.0; /* Data to write for dataset #2 */
+ double data3 = 15.0; /* Data to write for dataset #3 */
+ double rdata; /* Data read in */
+ herr_t ret; /* Generic return value */
+
+ /* Test creating two datasets and deleting the second */
+
+ /* Increase the metadata block size */
+ /* (This makes certain that all the data blocks are allocated together) */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ ret = H5Pset_meta_block_size(fapl, (hsize_t)MISC14_METADATA_SIZE);
+ CHECK(ret, FAIL, "H5Pset_meta_block_size");
+
+ /* Create dataspace to use */
+ DataSpace = H5Screate(H5S_SCALAR);
+ CHECK(DataSpace, FAIL, "H5Screate");
+
+ /* Open the file */
+ file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Create first dataset & write data */
+ Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(Dataset1, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Create second dataset (to be unlinked). */
+ Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(Dataset2, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check data from first dataset */
+ ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (!H5_DBL_ABS_EQUAL(rdata, data1))
+ TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__);
+
+ /* Unlink second dataset */
+ ret = H5Ldelete(file_id, MISC14_DSET2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close second dataset */
+ ret = H5Dclose(Dataset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Verify the data from dataset #1 */
+ ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (!H5_DBL_ABS_EQUAL(rdata, data1))
+ TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__);
+
+ /* Close first dataset */
+ ret = H5Dclose(Dataset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the file */
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Test creating two datasets and deleting the first */
+
+ /* Open the file */
+ file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Create first dataset & write data */
+ Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(Dataset1, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Create second dataset */
+ Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(Dataset2, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check data from second dataset */
+ ret = H5Dread(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (!H5_DBL_ABS_EQUAL(rdata, data2))
+ TestErrPrintf("Error on line %d: data2!=rdata\n", __LINE__);
+
+ /* Unlink first dataset */
+ ret = H5Ldelete(file_id, MISC14_DSET1_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close first dataset */
+ ret = H5Dclose(Dataset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Verify the data from dataset #2 */
+ ret = H5Dread(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (!H5_DBL_ABS_EQUAL(rdata, data2))
+ TestErrPrintf("Error on line %d: data2!=rdata\n", __LINE__);
+
+ /* Close second dataset */
+ ret = H5Dclose(Dataset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the file */
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Test creating three datasets and deleting the second */
+
+ /* Open the file */
+ file_id = H5Fcreate(MISC14_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Create first dataset & write data */
+ Dataset1 = H5Dcreate2(file_id, MISC14_DSET1_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(Dataset1, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data1);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Create second dataset */
+ Dataset2 = H5Dcreate2(file_id, MISC14_DSET2_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(Dataset2, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(Dataset2, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data2);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Create third dataset */
+ Dataset3 = H5Dcreate2(file_id, MISC14_DSET3_NAME, H5T_NATIVE_DOUBLE, DataSpace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(Dataset2, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &data3);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Check data from first dataset */
+ ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (!H5_DBL_ABS_EQUAL(rdata, data1))
+ TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__);
+
+ /* Check data from third dataset */
+ ret = H5Dread(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (!H5_DBL_ABS_EQUAL(rdata, data3))
+ TestErrPrintf("Error on line %d: data3!=rdata\n", __LINE__);
+
+ /* Unlink second dataset */
+ ret = H5Ldelete(file_id, MISC14_DSET2_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close second dataset */
+ ret = H5Dclose(Dataset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Verify the data from dataset #1 */
+ ret = H5Dread(Dataset1, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (!H5_DBL_ABS_EQUAL(rdata, data1))
+ TestErrPrintf("Error on line %d: data1!=rdata\n", __LINE__);
+
+ /* Verify the data from dataset #3 */
+ ret = H5Dread(Dataset3, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdata);
+ CHECK(ret, FAIL, "H5Dread");
+ if (!H5_DBL_ABS_EQUAL(rdata, data3))
+ TestErrPrintf("Error on line %d: data3!=rdata\n", __LINE__);
+
+ /* Close first dataset */
+ ret = H5Dclose(Dataset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close third dataset */
+ ret = H5Dclose(Dataset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the file */
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close shared objects (dataspace & fapl) */
+ ret = H5Sclose(DataSpace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_misc14() */
+
+/****************************************************************
+**
+** test_misc15(): Test that checking a file's access property list
+** more than once correctly increments internal reference counts.
+**
+****************************************************************/
+static void
+test_misc15(void)
+{
+ char filename[MISC15_BUF_SIZE];
+ hid_t file; /* File ID */
+ hid_t fapl; /* File access property list */
+ herr_t ret; /* Generic return value */
+
+ fapl = h5_fileaccess();
+ h5_fixname(MISC15_FILE, fapl, filename, MISC15_BUF_SIZE);
+
+ /* Create the file & get it's FAPL */
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ fapl = H5Fget_access_plist(file);
+ CHECK(fapl, FAIL, "H5Fget_access_plist");
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open the file & get it's FAPL again */
+ file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fopen");
+
+ fapl = H5Fget_access_plist(file);
+ CHECK(fapl, FAIL, "H5Fget_access_plist");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Verify that the file is still OK */
+ ret = H5Fis_accessible(filename, fapl);
+ CHECK(ret, FAIL, "H5Fis_accessible");
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ file = H5Fopen(filename, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fopen");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc15() */
+
+/****************************************************************
+**
+** test_misc16(): Test array of NULL-terminated
+** fixed-length string. It creates a dataset of fixed-length
+** strings. Each string is MISC16_STR_SIZE long. There are
+** totally MISC16_SPACE_DIM by MISC16_SPACE_RANK strings.
+**
+****************************************************************/
+static void
+test_misc16(void)
+{
+ hid_t file; /* File ID */
+ herr_t ret; /* Generic return value */
+ char wdata[MISC16_SPACE_DIM][MISC16_STR_SIZE];
+ char rdata[MISC16_SPACE_DIM][MISC16_STR_SIZE]; /* Information read in */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hsize_t dims[] = {MISC16_SPACE_DIM};
+ int i;
+
+ HDmemset(wdata, 0, sizeof(wdata));
+ HDmemset(rdata, 0, sizeof(rdata));
+
+ /* Initialize the data */
+ /* (Note that these are supposed to stress the code, so are a little weird) */
+ HDmemcpy(wdata[0], "1234567", MISC16_STR_SIZE);
+ HDmemcpy(wdata[1], "1234567\0", MISC16_STR_SIZE);
+ HDmemcpy(wdata[2], "12345678", MISC16_STR_SIZE);
+ HDmemcpy(wdata[3], "\0\0\0\0\0\0\0\0", MISC16_STR_SIZE);
+
+ /* Create the file */
+ file = H5Fcreate(MISC16_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid = H5Screate_simple(MISC16_SPACE_RANK, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid = H5Tcopy(H5T_C_S1);
+ CHECK(tid, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(tid, (size_t)MISC16_STR_SIZE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /*ret = H5Tset_strpad(tid,H5T_STR_NULLPAD);
+ CHECK(ret, FAIL, "H5Tset_strpad");*/
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(file, MISC16_DSET_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < MISC16_SPACE_DIM; i++) {
+ if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) {
+ TestErrPrintf(
+ "Line %u: VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n",
+ (unsigned)__LINE__, (int)i, (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i]));
+ continue;
+ } /* end if */
+ if (HDstrcmp(wdata[i], rdata[i]) != 0) {
+ TestErrPrintf("Line %u: VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n",
+ (unsigned)__LINE__, (int)i, wdata[i], (int)i, rdata[i]);
+ continue;
+ } /* end if */
+ } /* end for */
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc16() */
+
+/****************************************************************
+**
+** test_misc17(): Test array of characters. It creates a dataset
+** of ASCII characters, with dimensionality of MISC17_SPACE_DIM1
+** by MISC17_SPACE_DIM2.
+**
+****************************************************************/
+static void
+test_misc17(void)
+{
+ hid_t file; /* File ID */
+ herr_t ret; /* Generic return value */
+ char wdata[MISC17_SPACE_DIM1][MISC17_SPACE_DIM2];
+ char rdata[MISC17_SPACE_DIM1][MISC17_SPACE_DIM2]; /* Information read in */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hsize_t dims[] = {MISC17_SPACE_DIM1, MISC17_SPACE_DIM2};
+ int i;
+
+ HDmemset(wdata, 0, sizeof(wdata));
+ HDmemset(rdata, 0, sizeof(rdata));
+
+ /* Initialize the data */
+ /* (Note that these are supposed to stress the code, so are a little weird) */
+ HDmemcpy(wdata[0], "1234567", MISC17_SPACE_DIM2);
+ HDmemcpy(wdata[1], "1234567\0", MISC17_SPACE_DIM2);
+ HDmemcpy(wdata[2], "12345678", MISC17_SPACE_DIM2);
+ HDmemcpy(wdata[3], "\0\0\0\0\0\0\0\0", MISC17_SPACE_DIM2);
+
+ /* Create the file */
+ file = H5Fcreate(MISC17_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid = H5Screate_simple(MISC17_SPACE_RANK, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid = H5Tcopy(H5T_C_S1);
+ CHECK(tid, FAIL, "H5Tcopy");
+
+ ret = H5Tset_strpad(tid, H5T_STR_NULLPAD);
+ CHECK(ret, FAIL, "H5Tset_strpad");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(file, MISC17_DSET_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data in the way of strings. */
+ for (i = 0; i < MISC17_SPACE_DIM1; i++) {
+ if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) {
+ TestErrPrintf(
+ "Line %u: VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n",
+ (unsigned)__LINE__, (int)i, (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i]));
+ continue;
+ } /* end if */
+ if (HDstrcmp(wdata[i], rdata[i]) != 0) {
+ TestErrPrintf("Line %u: VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n",
+ (unsigned)__LINE__, (int)i, wdata[i], (int)i, rdata[i]);
+ continue;
+ } /* end if */
+ } /* end for */
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc17() */
+
+/****************************************************************
+**
+** test_misc18(): Test new object header information in H5O_info_t
+** struct.
+**
+****************************************************************/
+static void
+test_misc18(void)
+{
+ hid_t fid; /* File ID */
+ hid_t sid; /* 'Space ID */
+ hid_t did1, did2; /* Dataset IDs */
+ hid_t aid; /* Attribute ID */
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ H5O_info1_t old_oinfo; /* (deprecated) information about object */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+#endif
+ H5O_info2_t oinfo; /* Data model information about object */
+#if 0
+ H5O_native_info_t ninfo; /* Native file format information about object */
+#endif
+ char attr_name[32]; /* Attribute name buffer */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Create the file */
+ fid = H5Fcreate(MISC18_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataspace for attributes */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create first dataset */
+ did1 = H5Dcreate2(fid, MISC18_DSET1_NAME, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did1, FAIL, "H5Dcreate2");
+
+ /* Get object information */
+ ret = H5Oget_info_by_name3(fid, MISC18_DSET1_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name");
+ VERIFY(oinfo.num_attrs, 0, "H5Oget_info_by_name");
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name");
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+ ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name");
+#endif
+
+ /* Create second dataset */
+ did2 = H5Dcreate2(fid, MISC18_DSET2_NAME, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did2, FAIL, "H5Dcreate2");
+
+ /* Get object information */
+ ret = H5Oget_info_by_name3(fid, MISC18_DSET2_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name");
+ VERIFY(oinfo.num_attrs, 0, "H5Oget_info_by_name");
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.nmesgs, 6, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.nchunks, 1, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.space.total, 272, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.space.free, 152, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.num_attrs, 0, "H5Oget_info_by_name");
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+ ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.nmesgs, 6, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.nchunks, 1, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.space.total, 272, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.space.free, 152, "H5Oget_native_info_by_name");
+#endif
+
+ /* Loop creating attributes on each dataset, flushing them to the file each time */
+ for (u = 0; u < 10; u++) {
+ /* Set up attribute name */
+ HDsnprintf(attr_name, sizeof(attr_name), "Attr %u", u);
+
+ /* Create & close attribute on first dataset */
+ aid = H5Acreate2(did1, attr_name, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create & close attribute on second dataset */
+ aid = H5Acreate2(did2, attr_name, H5T_STD_U32LE, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Flush file, to 'fix' size of dataset object headers */
+ ret = H5Fflush(fid, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+ } /* end for */
+
+ /* Get object information for dataset #1 now */
+ ret = H5Oget_info_by_name3(fid, MISC18_DSET1_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name");
+ VERIFY(oinfo.num_attrs, 10, "H5Oget_info_by_name");
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ ret = H5Oget_info_by_name2(fid, MISC18_DSET1_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name");
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+ ret = H5Oget_native_info_by_name(fid, MISC18_DSET1_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name");
+#endif
+
+ /* Get object information for dataset #2 now */
+ ret = H5Oget_info_by_name3(fid, MISC18_DSET2_NAME, &oinfo, H5O_INFO_NUM_ATTRS, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name");
+ VERIFY(oinfo.num_attrs, 10, "H5Oget_info_by_name");
+#if 0
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ ret = H5Oget_info_by_name2(fid, MISC18_DSET2_NAME, &old_oinfo, H5O_INFO_HDR | H5O_INFO_NUM_ATTRS,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.nmesgs, 24, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.nchunks, 9, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.space.total, 888, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.hdr.space.free, 16, "H5Oget_info_by_name");
+ VERIFY(old_oinfo.num_attrs, 10, "H5Oget_info_by_name");
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+ ret = H5Oget_native_info_by_name(fid, MISC18_DSET2_NAME, &ninfo, H5O_NATIVE_INFO_HDR, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_mative_info_by_name");
+ VERIFY(ninfo.hdr.nmesgs, 24, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.nchunks, 9, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.space.total, 888, "H5Oget_native_info_by_name");
+ VERIFY(ninfo.hdr.space.free, 16, "H5Oget_native_info_by_name");
+#endif
+
+ /* Close second dataset */
+ ret = H5Dclose(did2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close first dataset */
+ ret = H5Dclose(did1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc18() */
+
+/****************************************************************
+**
+** test_misc19(): Test incrementing & decrementing ref count on IDs
+**
+****************************************************************/
+static void
+test_misc19(void)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t did = -1; /* Dataset ID */
+ hid_t tid = -1; /* Datatype ID */
+ hid_t aid = -1; /* Attribute ID */
+ hid_t plid = -1; /* Property List ID */
+ hid_t pcid = -1; /* Property Class ID */
+ hid_t gid = -1; /* Group ID */
+ hid_t ecid = -1; /* Error Class ID */
+ hid_t emid = -1; /* Error Message ID */
+ hid_t esid = -1; /* Error Stack ID */
+#if 0
+ hid_t vfdid = -1; /* Virtual File Driver ID */
+ hid_t volid = -1; /* Virtual Object Layer ID */
+ H5FD_class_t *vfd_cls = NULL; /* VFD class */
+ H5VL_class_t *vol_cls = NULL; /* VOL class */
+#endif
+ int rc; /* Reference count */
+ herr_t ret; /* Generic return value */
+
+ /* Check H5I operations on files */
+
+ /* Create the file */
+ fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(fid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(fid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the file normally */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(fid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the file by decrementing the reference count */
+ rc = H5Idec_ref(fid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the file again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fclose(fid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fclose");
+
+ /* Check H5I operations on property lists */
+
+ /* Create the property list */
+ plid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plid, FAIL, "H5Pcreate");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(plid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(plid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the property list normally */
+ ret = H5Pclose(plid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(plid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the property list by decrementing the reference count */
+ rc = H5Idec_ref(plid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the property list again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pclose(plid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pclose");
+
+ /* Check H5I operations on property classes */
+
+ /* Create a property class */
+ pcid = H5Pcreate_class(H5P_DATASET_CREATE, "foo", NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK(pcid, FAIL, "H5Pcreate_class");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(pcid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(pcid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the property class normally */
+ ret = H5Pclose_class(pcid);
+ CHECK(ret, FAIL, "H5Pclose_class");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(pcid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the property class by decrementing the reference count */
+ rc = H5Idec_ref(pcid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the property class again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Pclose_class(pcid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pclose_class");
+
+ /* Check H5I operations on datatypes */
+
+ /* Create a datatype */
+ tid = H5Tcreate(H5T_OPAQUE, (size_t)16);
+ CHECK(tid, FAIL, "H5Tcreate");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(tid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(tid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the datatype normally */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(tid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the datatype by decrementing the reference count */
+ rc = H5Idec_ref(tid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the datatype again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Tclose(tid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Tclose");
+
+ /* Check H5I operations on dataspaces */
+
+ /* Create a dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(sid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(sid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the dataspace normally */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(sid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the dataspace by decrementing the reference count */
+ rc = H5Idec_ref(sid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the dataspace again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sclose(sid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sclose");
+
+ /* Check H5I operations on datasets */
+
+ /* Create a file */
+ fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a dataset */
+ did = H5Dcreate2(fid, MISC19_DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(did);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(did);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the dataset normally */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(did);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the dataset by decrementing the reference count */
+ rc = H5Idec_ref(did);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the dataset again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dclose(did);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Check H5I operations on attributes */
+
+ /* Create a file */
+ fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Open the root group */
+ gid = H5Gopen2(fid, "/", H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Create a dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create an attribute */
+ aid = H5Acreate2(gid, MISC19_ATTR_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(aid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(aid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the dataset normally */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(aid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the attribute by decrementing the reference count */
+ rc = H5Idec_ref(aid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the attribute again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Aclose(aid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Aclose");
+
+ /* Close the root group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Check H5I operations on groups */
+
+ /* Create a file */
+ fid = H5Fcreate(MISC19_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a group */
+ gid = H5Gcreate2(fid, MISC19_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(gid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(gid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the group normally */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(gid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the group by decrementing the reference count */
+ rc = H5Idec_ref(gid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the group again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Gclose(gid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Gclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Check H5I operations on error classes */
+
+ /* Create an error class */
+ ecid = H5Eregister_class("foo", "bar", "baz");
+ CHECK(ecid, FAIL, "H5Eregister_class");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(ecid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(ecid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the error class normally */
+ ret = H5Eunregister_class(ecid);
+ CHECK(ret, FAIL, "H5Eunregister_class");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(ecid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the error class by decrementing the reference count */
+ rc = H5Idec_ref(ecid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the error class again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Eunregister_class(ecid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Eunregister_class");
+
+ /* Check H5I operations on error messages */
+
+ /* Create an error class */
+ ecid = H5Eregister_class("foo", "bar", "baz");
+ CHECK(ecid, FAIL, "H5Eregister_class");
+
+ /* Create an error message */
+ emid = H5Ecreate_msg(ecid, H5E_MAJOR, "mumble");
+ CHECK(emid, FAIL, "H5Ecreate_msg");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(emid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(emid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the error message normally */
+ ret = H5Eclose_msg(emid);
+ CHECK(ret, FAIL, "H5Eclose_msg");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(emid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the error message by decrementing the reference count */
+ rc = H5Idec_ref(emid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the error message again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Eclose_msg(emid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Eclose_msg");
+
+ /* Close the error class */
+ ret = H5Eunregister_class(ecid);
+ CHECK(ret, FAIL, "H5Eunregister_class");
+
+ /* Check H5I operations on error stacks */
+
+ /* Create an error stack */
+ esid = H5Eget_current_stack();
+ CHECK(esid, FAIL, "H5Eget_current_stack");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(esid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Inc the reference count */
+ rc = H5Iinc_ref(esid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Close the error stack normally */
+ ret = H5Eclose_stack(esid);
+ CHECK(ret, FAIL, "H5Eclose_stack");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(esid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Close the error stack by decrementing the reference count */
+ rc = H5Idec_ref(esid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try closing the error stack again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Eclose_stack(esid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Eclose_stack");
+
+#if 0
+ /* Check H5I operations on virtual file drivers */
+
+ /* Get a VFD class to register */
+ vfd_cls = h5_get_dummy_vfd_class();
+ CHECK_PTR(vfd_cls, "h5_get_dummy_vfd_class");
+
+ /* Register a virtual file driver */
+ vfdid = H5FDregister(vfd_cls);
+ CHECK(vfdid, FAIL, "H5FDregister");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(vfdid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Increment the reference count */
+ rc = H5Iinc_ref(vfdid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Unregister the VFD normally */
+ ret = H5FDunregister(vfdid);
+ CHECK(ret, FAIL, "H5FDunregister");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(vfdid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Unregister the VFD by decrementing the reference count */
+ rc = H5Idec_ref(vfdid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try unregistering the VFD again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5FDunregister(vfdid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5FDunregister");
+
+ HDfree(vfd_cls);
+
+ /* Check H5I operations on virtual object connectors */
+
+ /* Get a VOL class to register */
+ vol_cls = h5_get_dummy_vol_class();
+ CHECK_PTR(vol_cls, "h5_get_dummy_vol_class");
+
+ /* Register a VOL connector */
+ volid = H5VLregister_connector(vol_cls, H5P_DEFAULT);
+ CHECK(volid, FAIL, "H5VLregister_connector");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(volid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Increment the reference count */
+ rc = H5Iinc_ref(volid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Unregister the VOL connector normally */
+ ret = H5VLunregister_connector(volid);
+ CHECK(ret, FAIL, "H5VLunregister_connector");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(volid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Unregister the VOL connector by decrementing the reference count */
+ rc = H5Idec_ref(volid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try unregistering the VOL connector again (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5VLunregister_connector(volid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5VLunregister_connector");
+
+ HDfree(vol_cls);
+#endif
+} /* end test_misc19() */
+
+/****************************************************************
+**
+** test_misc20(): Test problems with version 2 of storage layout
+** message truncating dimensions
+**
+****************************************************************/
+#if 0
+static void
+test_misc20(void)
+{
+ hid_t fid; /* File ID */
+ hid_t sid; /* 'Space ID */
+ hid_t did; /* Dataset ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ int rank = MISC20_SPACE_RANK; /* Rank of dataspace */
+ hsize_t big_dims[MISC20_SPACE_RANK] = {MISC20_SPACE_DIM0, MISC20_SPACE_DIM1}; /* Large dimensions */
+ hsize_t small_dims[MISC20_SPACE_RANK] = {MISC20_SPACE2_DIM0, MISC20_SPACE2_DIM1}; /* Small dimensions */
+ unsigned version; /* Version of storage layout info */
+ hsize_t contig_size; /* Size of contiguous storage size from layout into */
+ const char *testfile = H5_get_srcdir_filename(MISC20_FILE_OLD); /* Corrected test file name */
+ hbool_t driver_is_default_compatible;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing large dimension truncation fix\n"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ /* Verify that chunks with dimensions that are too large get rejected */
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Try to use chunked storage for this dataset */
+ ret = H5Pset_chunk(dcpl, rank, big_dims);
+ VERIFY(ret, FAIL, "H5Pset_chunk");
+
+ /* Verify that the storage for the dataset is the correct size and hasn't
+ * been truncated.
+ */
+
+ /* Create the file */
+ fid = H5Fcreate(MISC20_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataspace with _really_ big dimensions */
+ sid = H5Screate_simple(rank, big_dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Make certain that the dataset's storage doesn't get allocated :-) */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create dataset with big dataspace */
+ did = H5Dcreate2(fid, MISC20_DSET_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Close datasset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace with small dimensions */
+ sid = H5Screate_simple(rank, small_dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create dataset with big dataspace */
+ did = H5Dcreate2(fid, MISC20_DSET2_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Close datasset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid = H5Fopen(MISC20_FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open dataset with big dimensions */
+ did = H5Dopen2(fid, MISC20_DSET_NAME, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Get the layout version */
+ ret = H5D__layout_version_test(did, &version);
+ CHECK(ret, FAIL, "H5D__layout_version_test");
+ VERIFY(version, 3, "H5D__layout_version_test");
+
+ /* Get the layout contiguous storage size */
+ ret = H5D__layout_contig_size_test(did, &contig_size);
+ CHECK(ret, FAIL, "H5D__layout_contig_size_test");
+ VERIFY(contig_size, (MISC20_SPACE_DIM0 * MISC20_SPACE_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5D__layout_contig_size_test");
+
+ /* Close datasset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open dataset with small dimensions */
+ did = H5Dopen2(fid, MISC20_DSET2_NAME, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Get the layout version */
+ ret = H5D__layout_version_test(did, &version);
+ CHECK(ret, FAIL, "H5D__layout_version_test");
+ VERIFY(version, 3, "H5D__layout_version_test");
+
+ /* Get the layout contiguous storage size */
+ ret = H5D__layout_contig_size_test(did, &contig_size);
+ CHECK(ret, FAIL, "H5D__layout_contig_size_test");
+ VERIFY(contig_size, (MISC20_SPACE2_DIM0 * MISC20_SPACE2_DIM1 * H5Tget_size(H5T_NATIVE_INT)),
+ "H5D__layout_contig_size_test");
+
+ /* Close datasset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Verify that the storage size is computed correctly for older versions of layout info */
+
+ /*
+ * Open the old file and the dataset and get old settings.
+ */
+ fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open dataset with small dimensions */
+ did = H5Dopen2(fid, MISC20_DSET_NAME, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+
+ /* Get the layout version */
+ ret = H5D__layout_version_test(did, &version);
+ CHECK(ret, FAIL, "H5D__layout_version_test");
+ VERIFY(version, 2, "H5D__layout_version_test");
+
+ /* Get the layout contiguous storage size */
+ ret = H5D__layout_contig_size_test(did, &contig_size);
+ CHECK(ret, FAIL, "H5D__layout_contig_size_test");
+ VERIFY(contig_size, (MISC20_SPACE_DIM0 * MISC20_SPACE_DIM1 * H5Tget_size(H5T_STD_I32LE)),
+ "H5D__layout_contig_size_test");
+
+ /* Close datasset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_misc20() */
+#endif
+
+/*
+ test_misc21 and test_misc22 should be executed when SZIP is present
+ and encoder is available.
+ EIP 2004/8/04
+*/
+#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS)
+
+/****************************************************************
+**
+** test_misc21(): Test that late allocation time is treated the same
+** as incremental allocation time, for chunked datasets
+** when overwriting entire dataset where the chunks
+** don't exactly match the dataspace.
+**
+****************************************************************/
+static void
+test_misc21(void)
+{
+ hid_t fid, sid, dcpl, dsid;
+ char *buf;
+ hsize_t dims[2] = {MISC21_SPACE_DIM0, MISC21_SPACE_DIM1},
+ chunk_size[2] = {MISC21_CHUNK_DIM0, MISC21_CHUNK_DIM1};
+ herr_t ret; /* Generic return value */
+
+ if (h5_szip_can_encode() != 1)
+ return;
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing late allocation time w/chunks & filters\n"));
+
+ /* Allocate space for the buffer */
+ buf = (char *)HDcalloc(MISC21_SPACE_DIM0 * MISC21_SPACE_DIM1, 1);
+ CHECK(buf, NULL, "HDcalloc");
+
+ /* Create the file */
+ fid = H5Fcreate(MISC21_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create the DCPL */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set custom DCPL properties */
+ ret = H5Pset_chunk(dcpl, MISC21_SPACE_RANK, chunk_size);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ ret = H5Pset_szip(dcpl, H5_SZIP_NN_OPTION_MASK, 8);
+ CHECK(ret, FAIL, "H5Pset_deflate");
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create the dataspace for the dataset */
+ sid = H5Screate_simple(MISC21_SPACE_RANK, dims, NULL);
+ CHECK(ret, FAIL, "H5Screate_simple");
+
+ /* Create the dataset */
+ dsid = H5Dcreate2(fid, MISC21_DSET_NAME, H5T_NATIVE_UINT8, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dsid, FAIL, "H5Dcreate2");
+
+ /* Write out the whole dataset */
+ ret = H5Dwrite(dsid, H5T_NATIVE_UINT8, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close everything */
+ ret = H5Dclose(dsid);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ HDfree(buf);
+} /* end test_misc21() */
+
+/****************************************************************
+**
+** test_misc22(): Test SZIP bits-per-pixel parameter.
+** This should be set according to the datatype.
+** Tests for precision and offset combo's.
+**
+****************************************************************/
+static void
+test_misc22(void)
+{
+ hid_t fid, sid, dcpl, dsid, dcpl2;
+ char *buf;
+ hsize_t dims[2] = {MISC22_SPACE_DIM0, MISC22_SPACE_DIM1},
+ chunk_size[2] = {MISC22_CHUNK_DIM0, MISC22_CHUNK_DIM1};
+ herr_t ret; /* Generic return value */
+ hid_t dtype;
+ /* should extend test to signed ints */
+ hid_t idts[4];
+ /* do the same for floats
+ hid_t fdts[2]={H5T_NATIVE_FLOAT32,
+ H5T_NATIVE_FLOAT64}
+ */
+ size_t prec[4] = {3, 11, 19, 27};
+ size_t offsets[5] = {0, 3, 11, 19, 27};
+ int i, j, k;
+ unsigned int flags;
+ size_t cd_nelmts = 32;
+ unsigned int cd_values[32];
+ size_t correct;
+
+ if (h5_szip_can_encode() != 1)
+ return;
+ idts[0] = H5Tcopy(H5T_NATIVE_UINT8);
+ idts[1] = H5Tcopy(H5T_NATIVE_UINT16);
+ idts[2] = H5Tcopy(H5T_NATIVE_UINT32);
+ idts[3] = H5Tcopy(H5T_NATIVE_UINT64);
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing datatypes with SZIP filter\n"));
+
+ /* Allocate space for the buffer */
+ buf = (char *)HDcalloc(MISC22_SPACE_DIM0 * MISC22_SPACE_DIM1, 8);
+ CHECK(buf, NULL, "HDcalloc");
+
+ /* Create the file */
+ fid = H5Fcreate(MISC22_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create the dataspace for the dataset */
+ sid = H5Screate_simple(MISC22_SPACE_RANK, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 4; j++) {
+ if (prec[j] > (H5Tget_size(idts[i]) * 8))
+ continue; /* skip irrelevant combination */
+ for (k = 0; k < 5; k++) {
+ if (offsets[k] > (H5Tget_size(idts[i]) * 8))
+ continue; /* skip irrelevant combinations */
+ if ((prec[j] + offsets[k]) > (H5Tget_size(idts[i]) * 8))
+ continue;
+
+ MESSAGE(5, (" Testing datatypes size=%zu precision=%u offset=%d\n", H5Tget_size(idts[i]),
+ (unsigned)prec[j], (unsigned)offsets[k]));
+
+ /* Create the DCPL */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set DCPL properties */
+ ret = H5Pset_chunk(dcpl, MISC22_SPACE_RANK, chunk_size);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ /* Set custom DCPL properties */
+ ret = H5Pset_szip(dcpl, H5_SZIP_NN_OPTION_MASK, 32); /* vary the PPB */
+ CHECK(ret, FAIL, "H5Pset_szip");
+
+ /* set up the datatype according to the loop */
+ dtype = H5Tcopy(idts[i]);
+ CHECK(dtype, FAIL, "H5Tcopy");
+ ret = H5Tset_precision(dtype, prec[j]);
+ CHECK(ret, FAIL, "H5Tset_precision");
+ ret = H5Tset_offset(dtype, offsets[k]);
+ CHECK(ret, FAIL, "H5Tset_precision");
+
+ /* compute the correct PPB that should be set by SZIP */
+ if (offsets[k] == 0)
+ correct = prec[j];
+ else
+ correct = H5Tget_size(idts[i]) * 8;
+ if (correct > 24) {
+ if (correct <= 32)
+ correct = 32;
+ else if (correct <= 64)
+ correct = 64;
+ } /* end if */
+
+ /* Create the dataset */
+ dsid = H5Dcreate2(fid, MISC22_DSET_NAME, dtype, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dsid, FAIL, "H5Dcreate2");
+
+ /* Write out the whole dataset */
+ ret = H5Dwrite(dsid, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close everything */
+ ret = H5Dclose(dsid);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ dsid = H5Dopen2(fid, MISC22_DSET_NAME, H5P_DEFAULT);
+ CHECK(dsid, FAIL, "H5Dopen2");
+
+ dcpl2 = H5Dget_create_plist(dsid);
+ CHECK(dcpl2, FAIL, "H5Dget_create_plist");
+
+ ret = H5Pget_filter_by_id2(dcpl2, H5Z_FILTER_SZIP, &flags, &cd_nelmts, cd_values, 0, NULL,
+ NULL);
+ CHECK(ret, FAIL, "H5Pget_filter_by_id2");
+
+ VERIFY(cd_values[2], (unsigned)correct, "SZIP filter returned value for precision");
+
+ ret = H5Dclose(dsid);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Ldelete(fid, MISC22_DSET_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ ret = H5Pclose(dcpl2);
+ CHECK(ret, FAIL, "H5Pclose");
+ }
+ }
+ }
+ ret = H5Tclose(idts[0]);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(idts[1]);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(idts[2]);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(idts[3]);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ HDfree(buf);
+} /* end test_misc22() */
+#endif /* H5_HAVE_FILTER_SZIP */
+
+/****************************************************************
+**
+** test_misc23(): Test intermediate group creation.
+**
+****************************************************************/
+static void
+test_misc23(void)
+{
+ hsize_t dims[] = {10};
+ hid_t file_id = 0, group_id = 0, type_id = 0, space_id = 0, tmp_id = 0, create_id = H5P_DEFAULT,
+ access_id = H5P_DEFAULT;
+#ifndef NO_OBJECT_GET_NAME
+ char objname[MISC23_NAME_BUF_SIZE]; /* Name of object */
+#endif
+ H5O_info2_t oinfo;
+ htri_t tri_status;
+#ifndef NO_OBJECT_GET_NAME
+ ssize_t namelen;
+#endif
+ herr_t status;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing intermediate group creation\n"));
+
+ /* Create a new file using default properties. */
+ file_id = H5Fcreate(MISC23_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Build some infrastructure */
+ group_id = H5Gcreate2(file_id, "/A", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gcreate2");
+
+ space_id = H5Screate_simple(1, dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+
+ type_id = H5Tcopy(H5T_STD_I32BE);
+ CHECK(type_id, FAIL, "H5Tcopy");
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ /**********************************************************************
+ * test the old APIs
+ **********************************************************************/
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gcreate1(file_id, "/A/B00a/grp", (size_t)0);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gcreate1");
+
+ /* Make sure that size_hint values that can't fit into a 32-bit
+ * unsigned integer are rejected. Only necessary on systems where
+ * size_t is a 64-bit type.
+ */
+ if (SIZE_MAX > UINT32_MAX) {
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gcreate1(file_id, "/size_hint_too_large", SIZE_MAX);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gcreate1");
+ }
+
+ /* Make sure the largest size_hint value works */
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gcreate1(file_id, "/largest_size_hint", UINT32_MAX);
+ }
+ H5E_END_TRY;
+ CHECK(tmp_id, FAIL, "H5Gcreate1");
+ status = H5Gclose(tmp_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ tmp_id = H5Gcreate1(file_id, "/A/grp", (size_t)0);
+ CHECK(tmp_id, FAIL, "H5Gcreate1");
+ status = H5Gclose(tmp_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dcreate1(file_id, "/A/B00c/dset", type_id, space_id, create_id);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dcreate1");
+
+ tmp_id = H5Dcreate1(file_id, "/A/dset", type_id, space_id, create_id);
+ CHECK(tmp_id, FAIL, "H5Dcreate1");
+ status = H5Dclose(tmp_id);
+ CHECK(status, FAIL, "H5Dclose");
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+ /**********************************************************************
+ * test H5Gcreate2()
+ **********************************************************************/
+
+ /* Create link creation property list */
+ create_id = H5Pcreate(H5P_LINK_CREATE);
+ CHECK(create_id, FAIL, "H5Pcreate");
+
+ /* Set flag for intermediate group creation */
+ status = H5Pset_create_intermediate_group(create_id, TRUE);
+ CHECK(status, FAIL, "H5Pset_create_intermediate_group");
+
+ tmp_id = H5Gcreate2(file_id, "/A/B01/grp", create_id, H5P_DEFAULT, access_id);
+ CHECK(tmp_id, FAIL, "H5Gcreate2");
+#ifndef NO_OBJECT_GET_NAME
+ /* Query that the name of the new group is correct */
+ namelen = H5Iget_name(tmp_id, objname, (size_t)MISC23_NAME_BUF_SIZE);
+ CHECK(namelen, FAIL, "H5Iget_name");
+ VERIFY_STR(objname, "/A/B01/grp", "H5Iget_name");
+#endif
+ status = H5Gclose(tmp_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ /* Check that intermediate group is set up correctly */
+ tmp_id = H5Gopen2(file_id, "/A/B01", H5P_DEFAULT);
+ CHECK(tmp_id, FAIL, "H5Gopen2");
+
+ status = H5Oget_info3(tmp_id, &oinfo, H5O_INFO_BASIC);
+ CHECK(status, FAIL, "H5Oget_info3");
+ VERIFY(oinfo.rc, 1, "H5Oget_info3");
+
+ status = H5Gclose(tmp_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ tmp_id = H5Gcreate2(file_id, "/A/B02/C02/grp", create_id, H5P_DEFAULT, access_id);
+ CHECK(tmp_id, FAIL, "H5Gcreate2");
+
+ status = H5Gclose(tmp_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ tmp_id = H5Gcreate2(group_id, "B03/grp/", create_id, H5P_DEFAULT, access_id);
+ CHECK(tmp_id, FAIL, "H5Gcreate2");
+
+ status = H5Gclose(tmp_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ tmp_id = H5Gcreate2(group_id, "/A/B04/grp/", create_id, H5P_DEFAULT, access_id);
+ CHECK(tmp_id, FAIL, "H5Gcreate2");
+
+ status = H5Gclose(tmp_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ tmp_id = H5Gcreate2(file_id, "/A/B05/C05/A", create_id, H5P_DEFAULT, access_id);
+ CHECK(tmp_id, FAIL, "H5Gcreate2");
+
+ status = H5Gclose(tmp_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ status = H5Pclose(create_id);
+ CHECK(status, FAIL, "H5Pclose");
+
+ /**********************************************************************
+ * test H5Dcreate2()
+ **********************************************************************/
+
+ /* Create link creation property list */
+ create_id = H5Pcreate(H5P_LINK_CREATE);
+ CHECK(create_id, FAIL, "H5Pcreate");
+
+ /* Set flag for intermediate group creation */
+ status = H5Pset_create_intermediate_group(create_id, TRUE);
+ CHECK(status, FAIL, "H5Pset_create_intermediate_group");
+
+ tmp_id = H5Dcreate2(file_id, "/A/B06/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tmp_id, FAIL, "H5Dcreate2");
+
+ status = H5Dclose(tmp_id);
+ CHECK(status, FAIL, "H5Dclose");
+
+ tmp_id = H5Dcreate2(file_id, "/A/B07/B07/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tmp_id, FAIL, "H5Dcreate2");
+
+ status = H5Dclose(tmp_id);
+ CHECK(status, FAIL, "H5Dclose");
+
+ tmp_id = H5Dcreate2(group_id, "B08/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tmp_id, FAIL, "H5Dcreate2");
+
+ status = H5Dclose(tmp_id);
+ CHECK(status, FAIL, "H5Dclose");
+
+ tmp_id = H5Dcreate2(group_id, "/A/B09/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tmp_id, FAIL, "H5Dcreate2");
+
+ status = H5Dclose(tmp_id);
+ CHECK(status, FAIL, "H5Dclose");
+
+ tmp_id = H5Dcreate2(file_id, "/A/B10/C10/A/dset", type_id, space_id, create_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tmp_id, FAIL, "H5Dcreate2");
+
+ status = H5Dclose(tmp_id);
+ CHECK(status, FAIL, "H5Dclose");
+
+ status = H5Tclose(type_id);
+ CHECK(status, FAIL, "H5Tclose");
+
+ status = H5Sclose(space_id);
+ CHECK(status, FAIL, "H5Sclose");
+
+ status = H5Pclose(create_id);
+ CHECK(status, FAIL, "H5Pclose");
+
+ /**********************************************************************
+ * test H5Tcommit2()
+ **********************************************************************/
+
+ /* Create link creation property list */
+ create_id = H5Pcreate(H5P_LINK_CREATE);
+ CHECK(create_id, FAIL, "H5Pcreate");
+
+ /* Set flag for intermediate group creation */
+ status = H5Pset_create_intermediate_group(create_id, TRUE);
+ CHECK(status, FAIL, "H5Pset_create_intermediate_group");
+
+ tmp_id = H5Tcopy(H5T_NATIVE_INT16);
+ CHECK(tmp_id, FAIL, "H5Tcopy");
+
+ status = H5Tcommit2(file_id, "/A/B11/dtype", tmp_id, create_id, H5P_DEFAULT, access_id);
+ CHECK(status, FAIL, "H5Tcommit2");
+
+ status = H5Tclose(tmp_id);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tmp_id = H5Tcopy(H5T_NATIVE_INT32);
+ CHECK(tmp_id, FAIL, "H5Tcopy");
+
+ status = H5Tcommit2(file_id, "/A/B12/C12/dtype", tmp_id, create_id, H5P_DEFAULT, access_id);
+ CHECK(status, FAIL, "H5Tcommit2");
+
+ status = H5Tclose(tmp_id);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tmp_id = H5Tcopy(H5T_NATIVE_INT64);
+ CHECK(tmp_id, FAIL, "H5Tcopy");
+
+ status = H5Tcommit2(group_id, "B13/C12/dtype", tmp_id, create_id, H5P_DEFAULT, access_id);
+ CHECK(status, FAIL, "H5Tcommit2");
+
+ status = H5Tclose(tmp_id);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tmp_id = H5Tcopy(H5T_NATIVE_FLOAT);
+ CHECK(tmp_id, FAIL, "H5Tcopy");
+
+ status = H5Tcommit2(group_id, "/A/B14/dtype", tmp_id, create_id, H5P_DEFAULT, access_id);
+ CHECK(status, FAIL, "H5Tcommit2");
+
+ status = H5Tclose(tmp_id);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tmp_id = H5Tcopy(H5T_NATIVE_DOUBLE);
+ CHECK(tmp_id, FAIL, "H5Tcopy");
+
+ status = H5Tcommit2(file_id, "/A/B15/C15/A/dtype", tmp_id, create_id, H5P_DEFAULT, access_id);
+ CHECK(status, FAIL, "H5Tcommit2");
+
+ status = H5Tclose(tmp_id);
+ CHECK(status, FAIL, "H5Tclose");
+
+ status = H5Pclose(create_id);
+ CHECK(status, FAIL, "H5Pclose");
+
+ /**********************************************************************
+ * test H5Lcopy()
+ **********************************************************************/
+
+ /* Create link creation property list */
+ create_id = H5Pcreate(H5P_LINK_CREATE);
+ CHECK(create_id, FAIL, "H5Pcreate");
+
+ /* Set flag for intermediate group creation */
+ status = H5Pset_create_intermediate_group(create_id, TRUE);
+ CHECK(status, FAIL, "H5Pset_create_intermediate_group");
+
+ status = H5Lcopy(file_id, "/A/B01/grp", file_id, "/A/B16/grp", create_id, access_id);
+ CHECK(status, FAIL, "H5Lcopy");
+
+ tri_status = H5Lexists(file_id, "/A/B16/grp", access_id);
+ VERIFY(tri_status, TRUE, "H5Lexists");
+
+ tri_status = H5Lexists(file_id, "/A/B01/grp", access_id);
+ VERIFY(tri_status, TRUE, "H5Lexists");
+
+ /**********************************************************************
+ * test H5Lmove()
+ **********************************************************************/
+
+ status = H5Lmove(file_id, "/A/B16/grp", file_id, "/A/B17/grp", create_id, access_id);
+ CHECK(status, FAIL, "H5Lmove");
+
+ tri_status = H5Lexists(file_id, "/A/B17/grp", access_id);
+ VERIFY(tri_status, TRUE, "H5Lexists");
+
+ tri_status = H5Lexists(file_id, "/A/B16/grp", access_id);
+ VERIFY(tri_status, FALSE, "H5Lexists");
+
+ /**********************************************************************
+ * test H5Lcreate_hard()
+ **********************************************************************/
+
+ status = H5Lcreate_hard(file_id, "/A/B01/grp", file_id, "/A/B18/grp", create_id, access_id);
+ CHECK(status, FAIL, "H5Lcreate_hard");
+
+ tri_status = H5Lexists(file_id, "/A/B18/grp", access_id);
+ VERIFY(tri_status, TRUE, "H5Lexists");
+
+ /**********************************************************************
+ * test H5Lcreate_soft()
+ **********************************************************************/
+
+ status = H5Lcreate_soft("/A/B01/grp", file_id, "/A/B19/grp", create_id, access_id);
+ CHECK(status, FAIL, "H5Lcreate_soft");
+
+ tri_status = H5Lexists(file_id, "/A/B19/grp", access_id);
+ VERIFY(tri_status, TRUE, "H5Lexists");
+
+ /**********************************************************************
+ * test H5Lcreate_external()
+ **********************************************************************/
+#ifndef NO_EXTERNAL_LINKS
+ status = H5Lcreate_external("fake_filename", "fake_path", file_id, "/A/B20/grp", create_id, access_id);
+ CHECK(status, FAIL, "H5Lcreate_external");
+
+ tri_status = H5Lexists(file_id, "/A/B20/grp", access_id);
+ VERIFY(tri_status, TRUE, "H5Lexists");
+#endif
+ /**********************************************************************
+ * test H5Lcreate_ud()
+ **********************************************************************/
+#ifndef NO_USER_DEFINED_LINKS
+ status =
+ H5Lcreate_ud(file_id, "/A/B21/grp", H5L_TYPE_EXTERNAL, "file\0obj", (size_t)9, create_id, access_id);
+ CHECK(status, FAIL, "H5Lcreate_ud");
+
+ tri_status = H5Lexists(file_id, "/A/B21/grp", access_id);
+ VERIFY(tri_status, TRUE, "H5Lexists");
+#endif
+ /**********************************************************************
+ * close
+ **********************************************************************/
+
+ status = H5Pclose(create_id);
+ CHECK(status, FAIL, "H5Pclose");
+
+ status = H5Gclose(group_id);
+ CHECK(status, FAIL, "H5Gclose");
+
+ status = H5Fclose(file_id);
+ CHECK(status, FAIL, "H5Fclose");
+
+} /* end test_misc23() */
+
+/****************************************************************
+**
+** test_misc24(): Test opening objects with inappropriate APIs
+**
+****************************************************************/
+static void
+test_misc24(void)
+{
+#if 0
+ hid_t file_id = 0, group_id = 0, type_id = 0, space_id = 0, dset_id = 0, tmp_id = 0;
+ herr_t ret; /* Generic return value */
+#endif
+
+ /* Output message about test being performed */
+ MESSAGE(5,
+ ("Testing opening objects with inappropriate APIs - SKIPPED due to causing problems in HDF5\n"));
+#if 0
+ /* Create a new file using default properties. */
+ file_id = H5Fcreate(MISC24_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ space_id = H5Screate(H5S_SCALAR);
+ CHECK(space_id, FAIL, "H5Screate");
+
+ /* Create group, dataset & named datatype objects */
+ group_id = H5Gcreate2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gcreate2");
+
+ dset_id = H5Dcreate2(file_id, MISC24_DATASET_NAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ type_id = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(type_id, FAIL, "H5Tcopy");
+
+ ret = H5Tcommit2(file_id, MISC24_DATATYPE_NAME, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Create soft links to the objects created */
+ ret = H5Lcreate_soft(MISC24_GROUP_NAME, file_id, MISC24_GROUP_LINK, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_soft");
+
+ ret = H5Lcreate_soft(MISC24_DATASET_NAME, file_id, MISC24_DATASET_LINK, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_soft");
+
+ ret = H5Lcreate_soft(MISC24_DATATYPE_NAME, file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_soft");
+
+ /* Close IDs for objects */
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Gclose(group_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Attempt to open each kind of object with wrong API, including using soft links */
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dopen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Topen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Topen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Topen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Topen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gopen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Topen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Topen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Topen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Topen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dopen2");
+
+ /* Try again, with the object already open through valid call */
+ /* Open group */
+ group_id = H5Gopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dopen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dopen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Topen2(file_id, MISC24_GROUP_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Topen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Topen2(file_id, MISC24_GROUP_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Topen2");
+
+ ret = H5Gclose(group_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open dataset */
+ dset_id = H5Dopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gopen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gopen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Topen2(file_id, MISC24_DATASET_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Topen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Topen2(file_id, MISC24_DATASET_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Topen2");
+
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open named datatype */
+ type_id = H5Topen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Topen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Gopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Gopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_NAME, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dopen2");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_id = H5Dopen2(file_id, MISC24_DATATYPE_LINK, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_id, FAIL, "H5Dopen2");
+
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close file */
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+#endif
+} /* end test_misc24() */
+
+/****************************************************************
+**
+** test_misc25a(): Exercise null object header message merge bug
+** with new file
+**
+****************************************************************/
+static void
+test_misc25a(void)
+{
+ hid_t fid; /* File ID */
+ hid_t gid, gid2, gid3; /* Group IDs */
+ hid_t aid; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Exercise null object header message bug\n"));
+
+ /* Create file */
+ fid = H5Fcreate(MISC25A_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create top group */
+ gid = H5Gcreate2(fid, MISC25A_GROUP0_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Close top group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create first group */
+ gid = H5Gcreate2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Close first group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create second group */
+ gid2 = H5Gcreate2(fid, MISC25A_GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gcreate2");
+
+ /* Close second group */
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Create dataspace for attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataype for attribute */
+ tid = H5Tcopy(H5T_C_S1);
+ CHECK(tid, FAIL, "H5Tcopy");
+ ret = H5Tset_size(tid, (size_t)MISC25A_ATTR1_LEN);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Add 1st attribute on first group */
+ aid = H5Acreate2(gid, MISC25A_ATTR1_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create dataspace for 2nd attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataype for attribute */
+ tid = H5Tcopy(H5T_C_S1);
+ CHECK(tid, FAIL, "H5Tcopy");
+ ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Add 2nd attribute on first group */
+ aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close 2nd attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close first group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create third group */
+ gid3 = H5Gcreate2(fid, MISC25A_GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid3, FAIL, "H5Gcreate2");
+
+ /* Close third group */
+ ret = H5Gclose(gid3);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Delete 2nd attribute */
+ ret = H5Adelete(gid, MISC25A_ATTR2_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Close first group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Create dataspace for 3rd attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataype for attribute */
+ tid = H5Tcopy(H5T_C_S1);
+ CHECK(tid, FAIL, "H5Tcopy");
+ ret = H5Tset_size(tid, (size_t)MISC25A_ATTR3_LEN);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Add 3rd attribute on first group (smaller than 2nd attribute) */
+ aid = H5Acreate2(gid, MISC25A_ATTR3_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close 3rd attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close first group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Delete 3rd attribute */
+ ret = H5Adelete(gid, MISC25A_ATTR3_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Create dataspace for 3rd attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataype for attribute */
+ tid = H5Tcopy(H5T_C_S1);
+ CHECK(tid, FAIL, "H5Tcopy");
+ ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Re-create 2nd attribute on first group */
+ aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close 2nd attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close first group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Delete 2nd attribute */
+ ret = H5Adelete(gid, MISC25A_ATTR2_NAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Close first group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file */
+ fid = H5Fopen(MISC25A_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open first group */
+ gid = H5Gopen2(fid, MISC25A_GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Create dataspace for 3rd attribute */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create dataype for attribute */
+ tid = H5Tcopy(H5T_C_S1);
+ CHECK(tid, FAIL, "H5Tcopy");
+ ret = H5Tset_size(tid, (size_t)MISC25A_ATTR2_LEN);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Re-create 2nd attribute on first group */
+ aid = H5Acreate2(gid, MISC25A_ATTR2_NAME, tid, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close 2nd attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close first group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc25a() */
+
+/****************************************************************
+**
+** test_misc25b(): Exercise null object header message merge bug
+** with existing file (This test relies on
+** the file produced by test/gen_mergemsg.c)
+**
+****************************************************************/
+#if 0
+static void
+test_misc25b(void)
+{
+ hid_t fid; /* File ID */
+ hid_t gid; /* Group ID */
+ const char *testfile = H5_get_srcdir_filename(MISC25B_FILE); /* Corrected test file name */
+ hbool_t driver_is_default_compatible;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Exercise null object header message bug\n"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ /* Open file */
+ fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open group with object header messages that will merge */
+ gid = H5Gopen2(fid, MISC25B_GROUP, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Close first group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc25b() */
+#endif
+
+/****************************************************************
+**
+** test_misc25c(): Exercise another null object header message merge bug.
+**
+****************************************************************/
+static void
+test_misc25c(void)
+{
+ hid_t fid; /* File ID */
+ hid_t fapl; /* File access property list ID */
+ hid_t gcpl; /* Group creation property list ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t did; /* Dataset ID */
+ hid_t gid; /* Group ID */
+ hid_t gid2; /* Group ID */
+ hid_t aid; /* Attribute ID */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Exercise another null object header message bug\n"));
+
+ /* Compose file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create the file */
+ fid = H5Fcreate(MISC25C_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Compose group creation property list */
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+ CHECK(gcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ CHECK(ret, FAIL, "H5Pset_link_creation_order");
+ ret = H5Pset_attr_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ CHECK(ret, FAIL, "H5Pset_attr_creation_order");
+ ret = H5Pset_est_link_info(gcpl, 1, 18);
+ CHECK(ret, FAIL, "H5Pset_est_link_info");
+
+ /* Create a group for the dataset */
+ gid = H5Gcreate2(fid, MISC25C_DSETGRPNAME, H5P_DEFAULT, gcpl, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ /* Create the dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create the dataset */
+ did = H5Dcreate2(gid, MISC25C_DSETNAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Create an extra group */
+ gid2 = H5Gcreate2(fid, MISC25C_GRPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gcreate2");
+
+ /* Close the extra group */
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Add an attribute to the dataset group */
+ aid = H5Acreate2(gid, MISC25C_ATTRNAME, H5T_NATIVE_CHAR, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close the attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create a second extra group */
+ gid2 = H5Gcreate2(fid, MISC25C_GRPNAME2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid2, FAIL, "H5Gcreate2");
+
+ /* Close the second extra group */
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Add second attribute to the dataset group */
+ aid = H5Acreate2(gid, MISC25C_ATTRNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(aid, FAIL, "H5Acreate2");
+
+ /* Close the attribute */
+ ret = H5Aclose(aid);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the dataset group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Close the property lists */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(gcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Re-open the file */
+ fid = H5Fopen(MISC25C_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Re-open the dataset group */
+ gid = H5Gopen2(fid, MISC25C_DSETGRPNAME, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Rename the dataset */
+ ret = H5Lmove(gid, MISC25C_DSETNAME, H5L_SAME_LOC, MISC25C_DSETNAME2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lmove");
+
+ /* Delete the first attribute */
+ ret = H5Adelete(gid, MISC25C_ATTRNAME);
+ CHECK(ret, FAIL, "H5Adelete");
+
+ /* Close the dataset group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc25c() */
+
+/****************************************************************
+**
+** test_misc26(): Regression test: ensure that copying filter
+** pipelines works properly.
+**
+****************************************************************/
+static void
+test_misc26(void)
+{
+ hid_t fid; /* File ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t did; /* Dataset ID */
+ hid_t dcpl1, dcpl2, dcpl3; /* Property List IDs */
+ hsize_t dims[] = {1};
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Copying filter pipelines\n"));
+
+ /* Create the property list. It needs chunking so we can add filters */
+ dcpl1 = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK_I(dcpl1, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl1, 1, dims);
+ CHECK_I(ret, "H5Pset_chunk");
+
+ /* Add a filter with a data value to the property list */
+ ret = H5Pset_deflate(dcpl1, 1);
+ CHECK_I(ret, "H5Pset_deflate");
+
+ /* Copy the property list */
+ dcpl2 = H5Pcopy(dcpl1);
+ CHECK_I(dcpl2, "H5Pcopy");
+
+ /* Add a filter with no data values to the copy */
+ ret = H5Pset_shuffle(dcpl2);
+ CHECK_I(ret, "H5Pset_shuffle");
+
+ /* Copy the copy */
+ dcpl3 = H5Pcopy(dcpl2);
+ CHECK_I(dcpl3, "H5Pcopy");
+
+ /* Add another filter */
+ ret = H5Pset_deflate(dcpl3, 2);
+ CHECK_I(ret, "H5Pset_deflate");
+
+ /* Create a new file and datasets within that file that use these
+ * property lists
+ */
+ fid = H5Fcreate(MISC26_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ sid = H5Screate_simple(1, dims, dims);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ did = H5Dcreate2(fid, "dataset1", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl1, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+ ret = H5Dclose(did);
+ CHECK_I(ret, "H5Dclose");
+
+ did = H5Dcreate2(fid, "dataset2", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl2, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+ ret = H5Dclose(did);
+ CHECK_I(ret, "H5Dclose");
+
+ did = H5Dcreate2(fid, "dataset3", H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, dcpl3, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+ ret = H5Dclose(did);
+ CHECK_I(ret, "H5Dclose");
+
+ /* Close the dataspace and file */
+ ret = H5Sclose(sid);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK_I(ret, "H5Fclose");
+
+ /* Close the property lists. */
+ ret = H5Pclose(dcpl1);
+ CHECK_I(ret, "H5Pclose");
+ ret = H5Pclose(dcpl2);
+ CHECK_I(ret, "H5Pclose");
+ ret = H5Pclose(dcpl3);
+ CHECK_I(ret, "H5Pclose");
+}
+
+/****************************************************************
+**
+** test_misc27(): Ensure that objects with incorrect # of object
+** header messages are handled appropriately.
+**
+** (Note that this test file is generated by the "gen_bad_ohdr.c" code)
+**
+****************************************************************/
+#if 0
+static void
+test_misc27(void)
+{
+ hid_t fid; /* File ID */
+ hid_t gid; /* Group ID */
+ const char *testfile = H5_get_srcdir_filename(MISC27_FILE); /* Corrected test file name */
+ hbool_t driver_is_default_compatible;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Corrupt object header handling\n"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ /* Open the file */
+ fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+#ifdef H5_STRICT_FORMAT_CHECKS
+ /* Open group with incorrect # of object header messages (should fail) */
+ H5E_BEGIN_TRY
+ {
+ gid = H5Gopen2(fid, MISC27_GROUP, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(gid, FAIL, "H5Gopen2");
+#else /* H5_STRICT_FORMAT_CHECKS */
+ /* Open group with incorrect # of object header messages */
+ gid = H5Gopen2(fid, MISC27_GROUP, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gopen2");
+
+ /* Close group */
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+#endif /* H5_STRICT_FORMAT_CHECKS */
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc27() */
+#endif
+
+/****************************************************************
+**
+** test_misc28(): Ensure that the dataset chunk cache will hold
+** the correct number of chunks in cache without
+** evicting them.
+**
+****************************************************************/
+static void
+test_misc28(void)
+{
+ hid_t fid; /* File ID */
+ hid_t sidf; /* File Dataspace ID */
+ hid_t sidm; /* Memory Dataspace ID */
+ hid_t did; /* Dataset ID */
+ hid_t dcpl, fapl; /* Property List IDs */
+ hsize_t dims[] = {MISC28_SIZE, MISC28_SIZE};
+ hsize_t mdims[] = {MISC28_SIZE};
+ hsize_t cdims[] = {1, 1};
+ hsize_t start[] = {0, 0};
+ hsize_t count[] = {MISC28_SIZE, 1};
+#if 0
+ size_t nbytes_used;
+ int nused;
+#endif
+ char buf[MISC28_SIZE];
+ int i;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Dataset chunk cache\n"));
+
+ /* Create the fapl and set the cache size. Set nelmts to larger than the
+ * file size so we can be guaranteed that no chunks will be evicted due to
+ * a hash collision. Set nbytes to fit exactly 1 column of chunks (10
+ * bytes). */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_cache(fapl, MISC28_NSLOTS, MISC28_NSLOTS, MISC28_SIZE, 0.75);
+ CHECK(ret, FAIL, "H5Pset_cache");
+
+ /* Create the dcpl and set the chunk size */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, 2, cdims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create a new file and datasets within that file that use these
+ * property lists
+ */
+ fid = H5Fcreate(MISC28_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ sidf = H5Screate_simple(2, dims, NULL);
+ CHECK(sidf, FAIL, "H5Screate_simple");
+
+ did = H5Dcreate2(fid, "dataset", H5T_NATIVE_CHAR, sidf, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+#if 0
+ /* Verify that the chunk cache is empty */
+ ret = H5D__current_cache_size_test(did, &nbytes_used, &nused);
+ CHECK(ret, FAIL, "H5D__current_cache_size_test");
+ VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test");
+ VERIFY(nused, 0, "H5D__current_cache_size_test");
+#endif
+ /* Initialize write buffer */
+ for (i = 0; i < MISC28_SIZE; i++)
+ buf[i] = (char)i;
+
+ /* Create memory dataspace and selection in file dataspace */
+ sidm = H5Screate_simple(1, mdims, NULL);
+ CHECK(sidm, FAIL, "H5Screate_simple");
+
+ ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write hypserslab */
+ ret = H5Dwrite(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+#if 0
+ /* Verify that all 10 chunks written have been cached */
+ ret = H5D__current_cache_size_test(did, &nbytes_used, &nused);
+ CHECK(ret, FAIL, "H5D__current_cache_size_test");
+ VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test");
+ VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test");
+#endif
+ /* Initialize write buffer */
+ for (i = 0; i < MISC28_SIZE; i++)
+ buf[i] = (char)(MISC28_SIZE - 1 - i);
+
+ /* Select new hyperslab */
+ start[1] = 1;
+ ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write hyperslab */
+ ret = H5Dwrite(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+#if 0
+ /* Verify that the size of the cache remains at 10 */
+ ret = H5D__current_cache_size_test(did, &nbytes_used, &nused);
+ CHECK(ret, FAIL, "H5D__current_cache_size_test");
+ VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test");
+ VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test");
+#endif
+ /* Close dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Re open dataset */
+ did = H5Dopen2(fid, "dataset", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen2");
+#if 0
+ /* Verify that the chunk cache is empty */
+ ret = H5D__current_cache_size_test(did, &nbytes_used, &nused);
+ CHECK(ret, FAIL, "H5D__current_cache_size_test");
+ VERIFY(nbytes_used, (size_t)0, "H5D__current_cache_size_test");
+ VERIFY(nused, 0, "H5D__current_cache_size_test");
+#endif
+ /* Select hyperslabe for reading */
+ start[1] = 0;
+ ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read hypserslab */
+ ret = H5Dread(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read */
+ for (i = 0; i < MISC28_SIZE; i++)
+ VERIFY(buf[i], i, "H5Dread");
+#if 0
+ /* Verify that all 10 chunks read have been cached */
+ ret = H5D__current_cache_size_test(did, &nbytes_used, &nused);
+ CHECK(ret, FAIL, "H5D__current_cache_size_test");
+ VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test");
+ VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test");
+#endif
+ /* Select new hyperslab */
+ start[1] = 1;
+ ret = H5Sselect_hyperslab(sidf, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read hyperslab */
+ ret = H5Dread(did, H5T_NATIVE_CHAR, sidm, sidf, H5P_DEFAULT, buf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read */
+ for (i = 0; i < MISC28_SIZE; i++)
+ VERIFY(buf[i], MISC28_SIZE - 1 - i, "H5Dread");
+#if 0
+ /* Verify that the size of the cache remains at 10 */
+ ret = H5D__current_cache_size_test(did, &nbytes_used, &nused);
+ CHECK(ret, FAIL, "H5D__current_cache_size_test");
+ VERIFY(nbytes_used, (size_t)MISC28_SIZE, "H5D__current_cache_size_test");
+ VERIFY(nused, MISC28_SIZE, "H5D__current_cache_size_test");
+#endif
+ /* Close dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the dataspaces and file */
+ ret = H5Sclose(sidf);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Sclose(sidm);
+ CHECK_I(ret, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK_I(ret, "H5Fclose");
+
+ /* Close the property lists. */
+ ret = H5Pclose(dcpl);
+ CHECK_I(ret, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK_I(ret, "H5Pclose");
+} /* end test_misc28() */
+
+/****************************************************************
+**
+** test_misc29(): Ensure that speculative metadata reads don't
+** get raw data into the metadata accumulator.
+**
+****************************************************************/
+#if 0
+static void
+test_misc29(void)
+{
+ hbool_t driver_is_default_compatible;
+ hid_t fid; /* File ID */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Speculative metadata reads\n"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ /* Make a copy of the data file from svn. */
+ ret = h5_make_local_copy(MISC29_ORIG_FILE, MISC29_COPY_FILE);
+ CHECK(ret, -1, "h5_make_local_copy");
+
+ /* Open the copied file */
+ fid = H5Fopen(MISC29_COPY_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Delete the last dataset */
+ ret = H5Ldelete(fid, MISC29_DSETNAME, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_misc29() */
+#endif
+
+#if 0
+static int
+test_misc30_get_info_cb(hid_t loc_id, const char *name, const H5L_info2_t H5_ATTR_UNUSED *info,
+ void H5_ATTR_UNUSED *op_data)
+{
+ H5O_info2_t object_info;
+
+ return H5Oget_info_by_name3(loc_id, name, &object_info, H5O_INFO_BASIC, H5P_DEFAULT);
+}
+
+static int
+test_misc30_get_info(hid_t loc_id)
+{
+ return H5Literate2(loc_id, H5_INDEX_NAME, H5_ITER_INC, NULL, test_misc30_get_info_cb, NULL);
+}
+#endif
+
+/****************************************************************
+**
+** test_misc30(): Exercise local heap code that loads prefix
+** separately from data block, causing the free
+** block information to get lost.
+**
+****************************************************************/
+#if 0
+static void
+test_misc30(void)
+{
+ hsize_t file_size[] = {0, 0}; /* Sizes of file created */
+ unsigned get_info; /* Whether to perform the get info call */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Local heap dropping free block info\n"));
+
+ for (get_info = FALSE; get_info <= TRUE; get_info++) {
+ hid_t fid; /* File ID */
+ hid_t gid; /* Group ID */
+ int i; /* Local index counter */
+ herr_t ret; /* Generic return value */
+
+ fid = H5Fcreate(MISC30_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+ gid = H5Gcreate2(fid, "/g0", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ for (i = 0; i < 20; i++) {
+ char gname[32];
+
+ fid = H5Fopen(MISC30_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ if (get_info) {
+ ret = test_misc30_get_info(fid);
+ CHECK(ret, FAIL, "test_misc30_get_info");
+ }
+
+ HDsnprintf(gname, sizeof(gname), "/g0/group%d", i);
+ gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, FAIL, "H5Gcreate2");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+
+ fid = H5Fopen(MISC30_FILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+ ret = H5Fget_filesize(fid, &file_size[get_info]);
+ CHECK(fid, FAIL, "H5Fget_filesize");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+
+ VERIFY(file_size[0], file_size[1], "test_misc30");
+} /* end test_misc30() */
+#endif
+
+/****************************************************************
+**
+** test_misc31(): Test reentering library through deprecated
+* routines that register an id after calling
+* H5close().
+**
+****************************************************************/
+#if 0
+static void
+test_misc31(void)
+{
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ hid_t file_id; /* File id */
+ hid_t space_id; /* Dataspace id */
+ hid_t dset_id; /* Dataset id */
+ hid_t attr_id; /* Attribute id */
+ hid_t group_id; /* Group id */
+ hid_t dtype_id; /* Datatype id */
+ herr_t ret; /* Generic return value */
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Deprecated routines initialize after H5close()\n"));
+
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ file_id = H5Fcreate(MISC31_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Test dataset package */
+ space_id = H5Screate(H5S_SCALAR);
+ CHECK(space_id, FAIL, "H5Screate");
+ dset_id = H5Dcreate1(file_id, MISC31_DSETNAME, H5T_NATIVE_INT, space_id, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate1");
+ ret = H5close();
+ CHECK(ret, FAIL, "H5close");
+ file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+ dset_id = H5Dopen1(file_id, MISC31_DSETNAME);
+ CHECK(dset_id, FAIL, "H5Dopen1");
+
+ /* Test attribute package */
+ space_id = H5Screate(H5S_SCALAR);
+ CHECK(space_id, FAIL, "H5Screate");
+ attr_id = H5Acreate1(dset_id, MISC31_ATTRNAME1, H5T_NATIVE_INT, space_id, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate1");
+ ret = H5close();
+ CHECK(ret, FAIL, "H5close");
+ file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+ dset_id = H5Dopen1(file_id, MISC31_DSETNAME);
+ CHECK(dset_id, FAIL, "H5Dopen1");
+ space_id = H5Screate(H5S_SCALAR);
+ CHECK(space_id, FAIL, "H5Screate");
+ attr_id = H5Acreate1(dset_id, MISC31_ATTRNAME2, H5T_NATIVE_INT, space_id, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate1");
+
+ /* Test group package */
+ group_id = H5Gcreate1(file_id, MISC31_GROUPNAME, 0);
+ CHECK(group_id, FAIL, "H5Gcreate1");
+ ret = H5close();
+ CHECK(ret, FAIL, "H5close");
+ file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+ group_id = H5Gopen1(file_id, MISC31_GROUPNAME);
+ CHECK(group_id, FAIL, "H5Gopen1");
+
+ /* Test property list package */
+ ret = H5Pregister1(H5P_OBJECT_CREATE, MISC31_PROPNAME, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK(ret, FAIL, "H5Pregister1");
+ ret = H5close();
+ CHECK(ret, FAIL, "H5close");
+ ret = H5Pregister1(H5P_OBJECT_CREATE, MISC31_PROPNAME, 0, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ CHECK(ret, FAIL, "H5Pregister1");
+ ret = H5close();
+ CHECK(ret, FAIL, "H5close");
+
+ /* Test datatype package */
+ file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+ dtype_id = H5Tcopy(H5T_NATIVE_INT);
+ CHECK(dtype_id, FAIL, "H5Tcopy");
+ ret = H5Tcommit1(file_id, MISC31_DTYPENAME, dtype_id);
+ CHECK(ret, FAIL, "H5Tcommit1");
+ ret = H5close();
+ CHECK(ret, FAIL, "H5close");
+ file_id = H5Fopen(MISC31_FILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+ dtype_id = H5Topen1(file_id, MISC31_DTYPENAME);
+ CHECK(ret, FAIL, "H5Topen1");
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Tclose(dtype_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+#else /* H5_NO_DEPRECATED_SYMBOLS */
+ /* Output message about test being skipped */
+ MESSAGE(5, (" ...Skipped"));
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+} /* end test_misc31() */
+#endif
+
+/****************************************************************
+ *
+ * test_misc32(): Simple test of filter memory allocation
+ * functions.
+ *
+ ***************************************************************/
+static void
+test_misc32(void)
+{
+ void *buffer;
+ void *resized;
+ size_t size;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Edge case test of filter memory allocation functions\n"));
+
+ /* Test that the filter memory allocation functions behave correctly
+ * at edge cases.
+ */
+
+ /* FREE */
+
+ /* Test freeing a NULL pointer.
+ * No real confirmation check here, but Valgrind will confirm no
+ * shenanigans.
+ */
+ buffer = NULL;
+ H5free_memory(buffer);
+
+ /* ALLOCATE */
+
+ /* Size zero returns NULL.
+ * Also checks that a size of zero and setting the buffer clear flag
+ * to TRUE can be used together.
+ *
+ * Note that we have asserts in the code, so only check when NDEBUG
+ * is defined.
+ */
+#ifdef NDEBUG
+ buffer = H5allocate_memory(0, FALSE);
+ CHECK_PTR_NULL(buffer, "H5allocate_memory"); /*BAD*/
+ buffer = H5allocate_memory(0, TRUE);
+ CHECK_PTR_NULL(buffer, "H5allocate_memory"); /*BAD*/
+#endif /* NDEBUG */
+
+ /* RESIZE */
+
+ /* Size zero returns NULL. Valgrind will confirm buffer is freed. */
+ size = 1024;
+ buffer = H5allocate_memory(size, TRUE);
+ resized = H5resize_memory(buffer, 0);
+ CHECK_PTR_NULL(resized, "H5resize_memory");
+
+ /* NULL input pointer returns new buffer */
+ resized = H5resize_memory(NULL, 1024);
+ CHECK_PTR(resized, "H5resize_memory");
+ H5free_memory(resized);
+
+ /* NULL input pointer and size zero returns NULL */
+#ifdef NDEBUG
+ resized = H5resize_memory(NULL, 0);
+ CHECK_PTR_NULL(resized, "H5resize_memory"); /*BAD*/
+#endif /* NDEBUG */
+
+} /* end test_misc32() */
+
+/****************************************************************
+**
+** test_misc33(): Test for H5FFV-10216
+** --verify that H5HL_offset_into() returns error if the
+** input parameter "offset" exceeds heap data block size.
+** --case (1), (2), (3) are scenarios that will traverse to the
+** the 3 locations in the file having bad offset values to
+** the heap. (See description in gen_bad_offset.c)
+**
+****************************************************************/
+#if 0
+static void
+test_misc33(void)
+{
+ hid_t fid = -1; /* File ID */
+ const char *testfile = H5_get_srcdir_filename(MISC33_FILE); /* Corrected test file name */
+ H5O_info2_t oinfo; /* Structure for object metadata information */
+ hbool_t driver_is_default_compatible;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing that bad offset into the heap returns error"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ /* Open the test file */
+ fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Case (1) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Oget_info_by_name3(fid, "/soft_two", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Oget_info_by_name3");
+
+ /* Case (2) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Oget_info_by_name3(fid, "/dsetA", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Oget_info_by_name3");
+
+ /* Case (3) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Oget_info_by_name3(fid, "/soft_one", &oinfo, H5O_INFO_BASIC, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Oget_info_by_name3");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(fid, FAIL, "H5Fclose");
+
+} /* end test_misc33() */
+#endif
+
+/****************************************************************
+**
+** test_misc34(): Ensure zero-size memory allocations work
+**
+****************************************************************/
+#if 0
+static void
+test_misc34(void)
+{
+ void *mem = NULL; /* allocated buffer */
+ char *dup = NULL; /* 'duplicated' string */
+ size_t sz = 0; /* buffer size */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing O and NULL behavior in H5MM API calls"));
+
+ /* H5MM_xfree(): Ensure that passing NULL is allowed and returns NULL */
+ mem = H5MM_xfree(mem);
+ CHECK_PTR_NULL(mem, "H5MM_xfree");
+
+ /* H5MM_realloc(): Check behavior:
+ *
+ * H5MM_realloc(NULL, size) <==> H5MM_malloc(size)
+ * H5MM_realloc(ptr, 0) <==> H5MM_xfree(ptr)
+ * H5MM_realloc(NULL, 0) <==> NULL
+ */
+ mem = H5MM_xfree(mem);
+
+ sz = 1024;
+ mem = H5MM_realloc(mem, sz);
+ CHECK_PTR(mem, "H5MM_realloc (case 1)");
+ /* Don't free mem here! */
+
+ sz = 0;
+ mem = H5MM_realloc(mem, sz);
+ CHECK_PTR_NULL(mem, "H5MM_realloc (case 2)");
+ mem = H5MM_xfree(mem);
+
+ mem = H5MM_realloc(mem, sz);
+ CHECK_PTR_NULL(mem, "H5MM_realloc (case 3)");
+ mem = H5MM_xfree(mem);
+
+ /* H5MM_xstrdup(): Ensure NULL returns NULL */
+ dup = H5MM_xstrdup((const char *)mem);
+ CHECK_PTR_NULL(dup, "H5MM_xstrdup");
+ dup = (char *)H5MM_xfree((void *)dup);
+
+} /* end test_misc34() */
+
+/****************************************************************
+**
+** test_misc35(): Check operation of free-list routines
+**
+****************************************************************/
+static void
+test_misc35(void)
+{
+ hid_t sid = H5I_INVALID_HID; /* Dataspace ID */
+ hsize_t dims[] = {MISC35_SPACE_DIM1, MISC35_SPACE_DIM2, MISC35_SPACE_DIM3}; /* Dataspace dims */
+ hsize_t coord[MISC35_NPOINTS][MISC35_SPACE_RANK] = /* Coordinates for point selection */
+ {{0, 10, 5}, {1, 2, 7}, {2, 4, 9}, {0, 6, 11}, {1, 8, 13},
+ {2, 12, 0}, {0, 14, 2}, {1, 0, 4}, {2, 1, 6}, {0, 3, 8}};
+ size_t reg_size_start; /* Initial amount of regular memory allocated */
+ size_t arr_size_start; /* Initial amount of array memory allocated */
+ size_t blk_size_start; /* Initial amount of block memory allocated */
+ size_t fac_size_start; /* Initial amount of factory memory allocated */
+ size_t reg_size_final; /* Final amount of regular memory allocated */
+ size_t arr_size_final; /* Final amount of array memory allocated */
+ size_t blk_size_final; /* Final amount of block memory allocated */
+ size_t fac_size_final; /* Final amount of factory memory allocated */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Free-list API calls"));
+
+ /* Create dataspace */
+ /* (Allocates array free-list nodes) */
+ sid = H5Screate_simple(MISC35_SPACE_RANK, dims, NULL);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Select sequence of ten points */
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)MISC35_NPOINTS, (const hsize_t *)coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Retrieve initial free list values */
+ ret = H5get_free_list_sizes(&reg_size_start, &arr_size_start, &blk_size_start, &fac_size_start);
+ CHECK(ret, FAIL, "H5get_free_list_sizes");
+
+#if !defined H5_NO_FREE_LISTS && !defined H5_USING_MEMCHECKER
+ /* All the free list values should be >0 */
+ CHECK(reg_size_start, 0, "H5get_free_list_sizes");
+ CHECK(arr_size_start, 0, "H5get_free_list_sizes");
+ CHECK(blk_size_start, 0, "H5get_free_list_sizes");
+ CHECK(fac_size_start, 0, "H5get_free_list_sizes");
+#else
+ /* All the values should be == 0 */
+ VERIFY(reg_size_start, 0, "H5get_free_list_sizes");
+ VERIFY(arr_size_start, 0, "H5get_free_list_sizes");
+ VERIFY(blk_size_start, 0, "H5get_free_list_sizes");
+ VERIFY(fac_size_start, 0, "H5get_free_list_sizes");
+#endif
+
+ /* Garbage collect the free lists */
+ ret = H5garbage_collect();
+ CHECK(ret, FAIL, "H5garbage_collect");
+
+ /* Retrieve free list values again */
+ ret = H5get_free_list_sizes(&reg_size_final, &arr_size_final, &blk_size_final, &fac_size_final);
+ CHECK(ret, FAIL, "H5get_free_list_sizes");
+
+ /* All the free list values should be <= previous values */
+ if (reg_size_final > reg_size_start)
+ ERROR("reg_size_final > reg_size_start");
+ if (arr_size_final > arr_size_start)
+ ERROR("arr_size_final > arr_size_start");
+ if (blk_size_final > blk_size_start)
+ ERROR("blk_size_final > blk_size_start");
+ if (fac_size_final > fac_size_start)
+ ERROR("fac_size_final > fac_size_start");
+
+} /* end test_misc35() */
+#endif
+
+/* Context to pass to 'atclose' callbacks */
+static int test_misc36_context;
+
+/* 'atclose' callbacks for test_misc36 */
+static void
+test_misc36_cb1(void *_ctx)
+{
+ int *ctx = (int *)_ctx; /* Set up context pointer */
+ hbool_t is_terminating; /* Flag indicating the library is terminating */
+ herr_t ret; /* Return value */
+
+ /* Check whether the library thinks it's terminating */
+ is_terminating = FALSE;
+ ret = H5is_library_terminating(&is_terminating);
+ CHECK(ret, FAIL, "H5is_library_terminating");
+ VERIFY(is_terminating, TRUE, "H5is_library_terminating");
+
+ /* Verify correct ordering for 'atclose' callbacks */
+ if (0 != *ctx)
+ HDabort();
+
+ /* Update context value */
+ *ctx = 1;
+}
+
+static void
+test_misc36_cb2(void *_ctx)
+{
+ int *ctx = (int *)_ctx; /* Set up context pointer */
+ hbool_t is_terminating; /* Flag indicating the library is terminating */
+ herr_t ret; /* Return value */
+
+ /* Check whether the library thinks it's terminating */
+ is_terminating = FALSE;
+ ret = H5is_library_terminating(&is_terminating);
+ CHECK(ret, FAIL, "H5is_library_terminating");
+ VERIFY(is_terminating, TRUE, "H5is_library_terminating");
+
+ /* Verify correct ordering for 'atclose' callbacks */
+ if (1 != *ctx)
+ HDabort();
+
+ /* Update context value */
+ *ctx = 2;
+}
+
+/****************************************************************
+**
+** test_misc36(): Exercise H5atclose and H5is_library_terminating
+**
+****************************************************************/
+static void
+test_misc36(void)
+{
+ hbool_t is_terminating; /* Flag indicating the library is terminating */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("H5atclose and H5is_library_terminating API calls"));
+
+ /* Check whether the library thinks it's terminating */
+ is_terminating = TRUE;
+ ret = H5is_library_terminating(&is_terminating);
+ CHECK(ret, FAIL, "H5is_library_terminating");
+ VERIFY(is_terminating, FALSE, "H5is_library_terminating");
+
+ /* Shut the library down */
+ test_misc36_context = 0;
+ H5close();
+
+ /* Check whether the library thinks it's terminating */
+ is_terminating = TRUE;
+ ret = H5is_library_terminating(&is_terminating);
+ CHECK(ret, FAIL, "H5is_library_terminating");
+ VERIFY(is_terminating, FALSE, "H5is_library_terminating");
+
+ /* Check the close context was not changed */
+ VERIFY(test_misc36_context, 0, "H5atclose");
+
+ /* Restart the library */
+ H5open();
+
+ /* Check whether the library thinks it's terminating */
+ is_terminating = TRUE;
+ ret = H5is_library_terminating(&is_terminating);
+ CHECK(ret, FAIL, "H5is_library_terminating");
+ VERIFY(is_terminating, FALSE, "H5is_library_terminating");
+
+ /* Register the 'atclose' callbacks */
+ /* (Note that these will be called in reverse order, which is checked) */
+ ret = H5atclose(&test_misc36_cb2, &test_misc36_context);
+ CHECK(ret, FAIL, "H5atclose");
+ ret = H5atclose(&test_misc36_cb1, &test_misc36_context);
+ CHECK(ret, FAIL, "H5atclose");
+
+ /* Shut the library down */
+ test_misc36_context = 0;
+ H5close();
+
+ /* Check the close context was changed correctly */
+ VERIFY(test_misc36_context, 2, "H5atclose");
+
+ /* Restart the library */
+ H5open();
+
+ /* Close the library again */
+ test_misc36_context = 0;
+ H5close();
+
+ /* Check the close context was not changed */
+ VERIFY(test_misc36_context, 0, "H5atclose");
+} /* end test_misc36() */
+
+#if 0
+/****************************************************************
+**
+** test_misc37():
+** Test for seg fault issue when closing the provided test file
+** which has an illegal file size in its cache image.
+** See HDFFV-11052/CVE-2020-10812 for details.
+**
+****************************************************************/
+static void
+test_misc37(void)
+{
+ const char *testfile = H5_get_srcdir_filename(CVE_2020_10812_FILENAME);
+ hbool_t driver_is_default_compatible;
+ hid_t fid;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Fix for HDFFV-11052/CVE-2020-10812"));
+
+ ret = h5_driver_is_default_vfd_compatible(H5P_DEFAULT, &driver_is_default_compatible);
+ CHECK(ret, FAIL, "h5_driver_is_default_vfd_compatible");
+
+ if (!driver_is_default_compatible) {
+ HDprintf("-- SKIPPED --\n");
+ return;
+ }
+
+ fid = H5Fopen(testfile, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* This should fail due to the illegal file size.
+ It should fail gracefully and not seg fault */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fclose(fid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Fclose");
+
+} /* end test_misc37() */
+#endif
+
+/****************************************************************
+**
+** test_misc(): Main misc. test routine.
+**
+****************************************************************/
+void
+test_misc(void)
+{
+ hbool_t default_driver = h5_using_default_driver(NULL);
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Miscellaneous Routines\n"));
+
+ test_misc1(); /* Test unlinking a dataset & immediately re-using name */
+ test_misc2(); /* Test storing a VL-derived datatype in two different files */
+ test_misc3(); /* Test reading from chunked dataset with non-zero fill value */
+ test_misc4(); /* Test retrieving the fileno for various objects with H5Oget_info() */
+ test_misc5(); /* Test several level deep nested compound & VL datatypes */
+ test_misc6(); /* Test object header continuation code */
+#if 0
+ test_misc7(); /* Test for sensible datatypes stored on disk */
+ test_misc8(); /* Test storage sizes of various types of dataset storage */
+#endif
+ test_misc9(); /* Test for opening (not creating) core files */
+#if 0
+ test_misc10(); /* Test for using dataset creation property lists from old files */
+#endif
+
+ if (default_driver) {
+ test_misc11(); /* Test for all properties of a file creation property list being stored */
+ }
+
+ test_misc12(); /* Test VL-strings in chunked datasets operating correctly */
+#if 0
+ if (default_driver) {
+ test_misc13(); /* Test that a user block can be insert in front of file contents */
+ }
+#endif
+ test_misc14(); /* Test that deleted dataset's data is removed from sieve buffer correctly */
+ test_misc15(); /* Test that checking a file's access property list more than once works */
+ test_misc16(); /* Test array of fixed-length string */
+ test_misc17(); /* Test array of ASCII character */
+ test_misc18(); /* Test new object header information in H5O_info2_t struct */
+ test_misc19(); /* Test incrementing & decrementing ref count on IDs */
+#if 0
+ test_misc20(); /* Test problems with truncated dimensions in version 2 of storage layout message */
+#endif
+#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS)
+ test_misc21(); /* Test that "late" allocation time is treated the same as "incremental", for chunked
+ datasets w/a filters */
+ test_misc22(); /* check szip bits per pixel */
+#endif /* H5_HAVE_FILTER_SZIP */
+ test_misc23(); /* Test intermediate group creation */
+ test_misc24(); /* Test inappropriate API opens of objects */
+ test_misc25a(); /* Exercise null object header message merge bug */
+#if 0
+ test_misc25b(); /* Exercise null object header message merge bug on existing file */
+#endif
+ test_misc25c(); /* Exercise another null object header message merge bug */
+ test_misc26(); /* Test closing property lists with long filter pipelines */
+#if 0
+ test_misc27(); /* Test opening file with object that has bad # of object header messages */
+#endif
+ test_misc28(); /* Test that chunks are cached appropriately */
+#if 0
+ test_misc29(); /* Test that speculative metadata reads are handled correctly */
+ test_misc30(); /* Exercise local heap loading bug where free lists were getting dropped */
+
+ if (default_driver) {
+ test_misc31(); /* Test Reentering library through deprecated routines after H5close() */
+ }
+#endif
+ test_misc32(); /* Test filter memory allocation functions */
+#if 0
+ test_misc33(); /* Test to verify that H5HL_offset_into() returns error if offset exceeds heap block */
+ test_misc34(); /* Test behavior of 0 and NULL in H5MM API calls */
+ test_misc35(); /* Test behavior of free-list & allocation statistics API calls */
+#endif
+ test_misc36(); /* Exercise H5atclose and H5is_library_terminating */
+#if 0
+ test_misc37(); /* Test for seg fault failure at file close */
+#endif
+} /* test_misc() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_misc
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Albert Cheng
+ * July 2, 1998
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_misc(void)
+{
+ H5Fdelete(MISC1_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC2_FILE_1, H5P_DEFAULT);
+ H5Fdelete(MISC2_FILE_2, H5P_DEFAULT);
+ H5Fdelete(MISC3_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC4_FILE_1, H5P_DEFAULT);
+ H5Fdelete(MISC4_FILE_2, H5P_DEFAULT);
+ H5Fdelete(MISC5_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC6_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC7_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC8_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC9_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC10_FILE_NEW, H5P_DEFAULT);
+ H5Fdelete(MISC11_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC12_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC13_FILE_1, H5P_DEFAULT);
+ H5Fdelete(MISC13_FILE_2, H5P_DEFAULT);
+ H5Fdelete(MISC14_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC15_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC16_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC17_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC18_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC19_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC20_FILE, H5P_DEFAULT);
+#if defined(H5_HAVE_FILTER_SZIP) && !defined(H5_API_TEST_NO_FILTERS)
+ H5Fdelete(MISC21_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC22_FILE, H5P_DEFAULT);
+#endif /* H5_HAVE_FILTER_SZIP */
+ H5Fdelete(MISC23_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC24_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC25A_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC25C_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC26_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC28_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC29_COPY_FILE, H5P_DEFAULT);
+ H5Fdelete(MISC30_FILE, H5P_DEFAULT);
+#ifndef H5_NO_DEPRECATED_SYMBOLS
+ H5Fdelete(MISC31_FILE, H5P_DEFAULT);
+#endif /* H5_NO_DEPRECATED_SYMBOLS */
+} /* end cleanup_misc() */
diff --git a/test/API/trefer.c b/test/API/trefer.c
new file mode 100644
index 0000000..af0b11b
--- /dev/null
+++ b/test/API/trefer.c
@@ -0,0 +1,3641 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: trefer
+ *
+ * Test the Reference functionality
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+
+#define FILE_REF_PARAM "trefer_param.h5"
+#define FILE_REF_OBJ "trefer_obj.h5"
+#define FILE_REF_VL_OBJ "trefer_vl_obj.h5"
+#define FILE_REF_CMPND_OBJ "trefer_cmpnd_obj.h5"
+#define FILE_REF_REG "trefer_reg.h5"
+#define FILE_REF_REG_1D "trefer_reg_1d.h5"
+#define FILE_REF_OBJ_DEL "trefer_obj_del.h5"
+#define FILE_REF_GRP "trefer_grp.h5"
+#define FILE_REF_ATTR "trefer_attr.h5"
+#define FILE_REF_EXT1 "trefer_ext1.h5"
+#define FILE_REF_EXT2 "trefer_ext2.h5"
+#define FILE_REF_COMPAT "trefer_compat.h5"
+
+/* 1-D dataset with fixed dimensions */
+#define SPACE1_RANK 1
+#define SPACE1_DIM1 4
+
+/* 2-D dataset with fixed dimensions */
+#define SPACE2_RANK 2
+#define SPACE2_DIM1 10
+#define SPACE2_DIM2 10
+
+/* Larger 1-D dataset with fixed dimensions */
+#define SPACE3_RANK 1
+#define SPACE3_DIM1 100
+
+/* Element selection information */
+#define POINT1_NPOINTS 10
+
+/* Compound datatype */
+typedef struct s1_t {
+ unsigned int a;
+ unsigned int b;
+ float c;
+} s1_t;
+
+/* Compound datatype with reference */
+typedef struct s2_t {
+ H5R_ref_t ref0; /* reference */
+ H5R_ref_t ref1; /* reference */
+ H5R_ref_t ref2; /* reference */
+ H5R_ref_t ref3; /* reference */
+ unsigned int dim_idx; /* dimension index of the dataset */
+} s2_t;
+
+#define GROUPNAME "/group"
+#define GROUPNAME2 "group2"
+#define GROUPNAME3 "group3"
+#define DSETNAME "/dset"
+#define DSETNAME2 "dset2"
+#define NAME_SIZE 16
+
+#define MAX_ITER_CREATE 1000
+#define MAX_ITER_WRITE MAX_ITER_CREATE
+#define MAX_ITER_READ MAX_ITER_CREATE
+
+/****************************************************************
+**
+** test_reference_params(): Test basic H5R (reference) parameters
+** for correct processing
+**
+****************************************************************/
+static void
+test_reference_params(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset, /* Dataset ID */
+ dset2; /* Dereferenced dataset ID */
+ hid_t group; /* Group ID */
+ hid_t attr; /* Attribute ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hid_t aapl_id; /* Attribute access property list */
+ hid_t dapl_id; /* Dataset access property list */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ H5R_ref_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temp. buffer read from disk */
+ unsigned *obuf;
+ H5R_type_t type; /* Reference type */
+ unsigned int i; /* Counters */
+#if 0
+ const char *write_comment = "Foo!"; /* Comments for group */
+#endif
+ hid_t ret_id; /* Generic hid_t return value */
+ ssize_t name_size; /* Size of reference name */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Reference Parameters\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ rbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ tbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ obuf[i] = i * 3;
+
+ /* Create file */
+ fid1 = H5Fcreate(FILE_REF_PARAM, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create attribute access property list */
+ aapl_id = H5Pcreate(H5P_ATTRIBUTE_ACCESS);
+ CHECK(aapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a group */
+ group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Gcreate2");
+#if 0
+ /* Set group's comment */
+ ret = H5Oset_comment(group, write_comment);
+ CHECK(ret, FAIL, "H5Oset_comment");
+#endif
+ /* Create a dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create an attribute for the dataset */
+ attr = H5Acreate2(dataset, "Attr", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
+
+ /* Write attribute to disk */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, obuf);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid1, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Test parameters to H5Rcreate_object */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_object ref");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_object(H5I_INVALID_HID, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_object loc_id");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_object(fid1, NULL, H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_object name");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_object(fid1, "", H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_object null name");
+
+ /* Test parameters to H5Rcreate_region */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_region ref");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_region(H5I_INVALID_HID, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_region loc_id");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_region(fid1, NULL, sid1, H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_region name");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_region(fid1, "/Group1/Dataset1", H5I_INVALID_HID, H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_region dataspace");
+
+ /* Test parameters to H5Rcreate_attr */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", "Attr", H5P_DEFAULT, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_attr ref");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_attr(H5I_INVALID_HID, "/Group1/Dataset2", "Attr", H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_attr loc_id");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_attr(fid1, NULL, "Attr", H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_attr name");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", NULL, H5P_DEFAULT, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcreate_attr attr_name");
+
+ /* Test parameters to H5Rdestroy */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rdestroy(NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rdestroy");
+
+ /* Test parameters to H5Rget_type */
+ H5E_BEGIN_TRY
+ {
+ type = H5Rget_type(NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(type, H5R_BADTYPE, "H5Rget_type ref");
+
+ /* Test parameters to H5Requal */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Requal(NULL, &rbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Requal ref1");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Requal(&rbuf[0], NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Requal ref2");
+
+ /* Test parameters to H5Rcopy */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcopy(NULL, &wbuf[0]);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcopy src_ref");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rcopy(&rbuf[0], NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rcopy dest_ref");
+
+ /* Test parameters to H5Ropen_object */
+ H5E_BEGIN_TRY
+ {
+ dset2 = H5Ropen_object(&rbuf[0], H5I_INVALID_HID, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+ VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object oapl_id");
+ H5E_BEGIN_TRY
+ {
+ dset2 = H5Ropen_object(NULL, H5P_DEFAULT, dapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object ref");
+
+ /* Test parameters to H5Ropen_region */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Ropen_region(NULL, H5I_INVALID_HID, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, H5I_INVALID_HID, "H5Ropen_region ref");
+
+ /* Test parameters to H5Ropen_attr */
+ H5E_BEGIN_TRY
+ {
+ ret_id = H5Ropen_attr(NULL, H5P_DEFAULT, aapl_id);
+ }
+ H5E_END_TRY;
+ VERIFY(ret_id, H5I_INVALID_HID, "H5Ropen_attr ref");
+
+ /* Test parameters to H5Rget_obj_type3 */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rget_obj_type3(NULL, H5P_DEFAULT, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rget_obj_type3 ref");
+
+ /* Test parameters to H5Rget_file_name */
+ H5E_BEGIN_TRY
+ {
+ name_size = H5Rget_file_name(NULL, NULL, 0);
+ }
+ H5E_END_TRY;
+ VERIFY(name_size, (-1), "H5Rget_file_name ref");
+
+ /* Test parameters to H5Rget_obj_name */
+ H5E_BEGIN_TRY
+ {
+ name_size = H5Rget_obj_name(NULL, H5P_DEFAULT, NULL, 0);
+ }
+ H5E_END_TRY;
+ VERIFY(name_size, (-1), "H5Rget_obj_name ref");
+
+ /* Test parameters to H5Rget_attr_name */
+ H5E_BEGIN_TRY
+ {
+ name_size = H5Rget_attr_name(NULL, NULL, 0);
+ }
+ H5E_END_TRY;
+ VERIFY(name_size, (-1), "H5Rget_attr_name ref");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close attribute access property list */
+ ret = H5Pclose(aapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(tbuf);
+ HDfree(obuf);
+} /* test_reference_params() */
+
+/****************************************************************
+**
+** test_reference_obj(): Test basic H5R (reference) object reference code.
+** Tests references to various kinds of objects
+**
+****************************************************************/
+static void
+test_reference_obj(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset, /* Dataset ID */
+ dset2; /* Dereferenced dataset ID */
+ hid_t group; /* Group ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hid_t dapl_id; /* Dataset access property list */
+ H5R_ref_t *wbuf, /* buffer to write to disk */
+ *rbuf; /* buffer read from disk */
+ unsigned *ibuf, *obuf;
+ unsigned i, j; /* Counters */
+ H5O_type_t obj_type; /* Object type */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Object Reference Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ rbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ ibuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+ obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ obuf[i] = i * 3;
+
+ /* Create file */
+ fid1 = H5Fcreate(FILE_REF_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a group */
+ group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Create a dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid1, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create reference to dataset */
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to dataset */
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &wbuf[1]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[1], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to group */
+ ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &wbuf[2]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3");
+
+ /* Create reference to named datatype */
+ ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &wbuf[3]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[3], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid1 = H5Fopen(FILE_REF_OBJ, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Open dataset object */
+ dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Check information in referenced dataset */
+ sid1 = H5Dget_space(dset2);
+ CHECK(sid1, H5I_INVALID_HID, "H5Dget_space");
+
+ ret = (int)H5Sget_simple_extent_npoints(sid1);
+ VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints");
+
+ /* Read from disk */
+ ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(ibuf[i], i * 3, "Data");
+
+ /* Close dereferenced Dataset */
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */
+ group = H5Ropen_object(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */
+ tid1 = H5Ropen_object(&rbuf[3], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Verify correct datatype */
+ {
+ H5T_class_t tclass;
+
+ tclass = H5Tget_class(tid1);
+ VERIFY(tclass, H5T_COMPOUND, "H5Tget_class");
+
+ ret = H5Tget_nmembers(tid1);
+ VERIFY(ret, 3, "H5Tget_nmembers");
+ }
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy references */
+ for (j = 0; j < SPACE1_DIM1; j++) {
+ ret = H5Rdestroy(&wbuf[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&rbuf[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(ibuf);
+ HDfree(obuf);
+} /* test_reference_obj() */
+
+/****************************************************************
+**
+** test_reference_vlen_obj(): Test basic H5R (reference) object reference
+** within a vlen type.
+** Tests references to various kinds of objects
+**
+****************************************************************/
+static void
+test_reference_vlen_obj(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset, /* Dataset ID */
+ dset2; /* Dereferenced dataset ID */
+ hid_t group; /* Group ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hsize_t vl_dims[] = {1};
+ hid_t dapl_id; /* Dataset access property list */
+ H5R_ref_t *wbuf, /* buffer to write to disk */
+ *rbuf = NULL; /* buffer read from disk */
+ unsigned *ibuf, *obuf;
+ unsigned i, j; /* Counters */
+ H5O_type_t obj_type; /* Object type */
+ herr_t ret; /* Generic return value */
+ hvl_t vl_wbuf = {0, NULL}, vl_rbuf = {0, NULL};
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Object Reference Functions within VLEN type\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ ibuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+ obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ obuf[i] = i * 3;
+
+ /* Create file */
+ fid1 = H5Fcreate(FILE_REF_VL_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a group */
+ group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Create a dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid1, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create vlen type */
+ tid1 = H5Tvlen_create(H5T_STD_REF);
+ CHECK(tid1, H5I_INVALID_HID, "H5Tvlen_create");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, vl_dims, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create reference to dataset */
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to dataset */
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &wbuf[1]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[1], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to group */
+ ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &wbuf[2]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3");
+
+ /* Create reference to named datatype */
+ ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &wbuf[3]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[3], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3");
+
+ /* Store references into vlen */
+ vl_wbuf.len = SPACE1_DIM1;
+ vl_wbuf.p = wbuf;
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &vl_wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid1 = H5Fopen(FILE_REF_VL_OBJ, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, H5I_INVALID_HID, "H5Dget_type");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &vl_rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ VERIFY(vl_rbuf.len, SPACE1_DIM1, "H5Dread");
+ rbuf = vl_rbuf.p;
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open dataset object */
+ dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Check information in referenced dataset */
+ sid1 = H5Dget_space(dset2);
+ CHECK(sid1, H5I_INVALID_HID, "H5Dget_space");
+
+ ret = (int)H5Sget_simple_extent_npoints(sid1);
+ VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints");
+
+ /* Read from disk */
+ ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(ibuf[i], i * 3, "Data");
+
+ /* Close dereferenced Dataset */
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */
+ group = H5Ropen_object(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */
+ tid1 = H5Ropen_object(&rbuf[3], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Verify correct datatype */
+ {
+ H5T_class_t tclass;
+
+ tclass = H5Tget_class(tid1);
+ VERIFY(tclass, H5T_COMPOUND, "H5Tget_class");
+
+ ret = H5Tget_nmembers(tid1);
+ VERIFY(ret, 3, "H5Tget_nmembers");
+ }
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy references */
+ for (j = 0; j < SPACE1_DIM1; j++) {
+ ret = H5Rdestroy(&wbuf[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&rbuf[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(ibuf);
+ HDfree(obuf);
+} /* test_reference_vlen_obj() */
+
+/****************************************************************
+**
+** test_reference_cmpnd_obj(): Test basic H5R (reference) object reference
+** within a compound type.
+** Tests references to various kinds of objects
+**
+****************************************************************/
+static void
+test_reference_cmpnd_obj(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset, /* Dataset ID */
+ dset2; /* Dereferenced dataset ID */
+ hid_t group; /* Group ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hsize_t cmpnd_dims[] = {1};
+ hid_t dapl_id; /* Dataset access property list */
+ unsigned *ibuf, *obuf;
+ unsigned i; /* Counter */
+ H5O_type_t obj_type; /* Object type */
+ herr_t ret; /* Generic return value */
+ s2_t cmpnd_wbuf, cmpnd_rbuf;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Object Reference Functions within compound type\n"));
+
+ /* Allocate write & read buffers */
+ ibuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+ obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ obuf[i] = i * 3;
+
+ /* Create file */
+ fid1 = H5Fcreate(FILE_REF_CMPND_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a group */
+ group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Create a dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid1, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create compound type */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s2_t));
+ CHECK(tid1, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid1, "ref0", HOFFSET(s2_t, ref0), H5T_STD_REF);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "ref1", HOFFSET(s2_t, ref1), H5T_STD_REF);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "ref2", HOFFSET(s2_t, ref2), H5T_STD_REF);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "ref3", HOFFSET(s2_t, ref3), H5T_STD_REF);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "dim_idx", HOFFSET(s2_t, dim_idx), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, cmpnd_dims, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Reset buffer for writing */
+ HDmemset(&cmpnd_wbuf, 0, sizeof(cmpnd_wbuf));
+
+ /* Create reference to dataset */
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &cmpnd_wbuf.ref0);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&cmpnd_wbuf.ref0, H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to dataset */
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset2", H5P_DEFAULT, &cmpnd_wbuf.ref1);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&cmpnd_wbuf.ref1, H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to group */
+ ret = H5Rcreate_object(fid1, "/Group1", H5P_DEFAULT, &cmpnd_wbuf.ref2);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&cmpnd_wbuf.ref2, H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3");
+
+ /* Create reference to named datatype */
+ ret = H5Rcreate_object(fid1, "/Group1/Datatype1", H5P_DEFAULT, &cmpnd_wbuf.ref3);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&cmpnd_wbuf.ref3, H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3");
+
+ /* Store dimensions */
+ cmpnd_wbuf.dim_idx = SPACE1_DIM1;
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &cmpnd_wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid1 = H5Fopen(FILE_REF_CMPND_OBJ, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, H5I_INVALID_HID, "H5Dget_type");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &cmpnd_rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ VERIFY(cmpnd_rbuf.dim_idx, SPACE1_DIM1, "H5Dread");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Open dataset object */
+ dset2 = H5Ropen_object(&cmpnd_rbuf.ref0, H5P_DEFAULT, dapl_id);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Check information in referenced dataset */
+ sid1 = H5Dget_space(dset2);
+ CHECK(sid1, H5I_INVALID_HID, "H5Dget_space");
+
+ ret = (int)H5Sget_simple_extent_npoints(sid1);
+ VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints");
+
+ /* Read from disk */
+ ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(ibuf[i], i * 3, "Data");
+
+ /* Close dereferenced Dataset */
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open group object. GAPL isn't supported yet. But it's harmless to pass in */
+ group = H5Ropen_object(&cmpnd_rbuf.ref2, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Open datatype object. TAPL isn't supported yet. But it's harmless to pass in */
+ tid1 = H5Ropen_object(&cmpnd_rbuf.ref3, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tid1, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Verify correct datatype */
+ {
+ H5T_class_t tclass;
+
+ tclass = H5Tget_class(tid1);
+ VERIFY(tclass, H5T_COMPOUND, "H5Tget_class");
+
+ ret = H5Tget_nmembers(tid1);
+ VERIFY(ret, 3, "H5Tget_nmembers");
+ }
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy references */
+ ret = H5Rdestroy(&cmpnd_wbuf.ref0);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&cmpnd_wbuf.ref1);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&cmpnd_wbuf.ref2);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&cmpnd_wbuf.ref3);
+ CHECK(ret, FAIL, "H5Rdestroy");
+
+ ret = H5Rdestroy(&cmpnd_rbuf.ref0);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&cmpnd_rbuf.ref1);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&cmpnd_rbuf.ref2);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&cmpnd_rbuf.ref3);
+ CHECK(ret, FAIL, "H5Rdestroy");
+
+ /* Free memory buffers */
+ HDfree(ibuf);
+ HDfree(obuf);
+} /* test_reference_cmpnd_obj() */
+
+/****************************************************************
+**
+** test_reference_region(): Test basic H5R (reference) object reference code.
+** Tests references to various kinds of objects
+**
+** Note: The libver_low/libver_high parameters are added to create the file
+** with the low and high bounds setting in fapl.
+** Please see the RFC for "H5Sencode/H5Sdecode Format Change".
+**
+****************************************************************/
+static void
+test_reference_region(H5F_libver_t libver_low, H5F_libver_t libver_high)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t fapl; /* File access property list */
+ hid_t dset1, /* Dataset ID */
+ dset2; /* Dereferenced dataset ID */
+ hid_t sid1, /* Dataspace ID #1 */
+ sid2; /* Dataspace ID #2 */
+ hid_t dapl_id; /* Dataset access property list */
+ hsize_t dims1[] = {SPACE1_DIM1}, dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */
+ hsize_t coord1[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */
+ hsize_t *coords; /* Coordinate buffer */
+ hsize_t low[SPACE2_RANK]; /* Selection bounds */
+ hsize_t high[SPACE2_RANK]; /* Selection bounds */
+ H5R_ref_t *wbuf = NULL, /* buffer to write to disk */
+ *rbuf = NULL; /* buffer read from disk */
+ H5R_ref_t nvrbuf[3] = {{{{0}}}, {{{101}}}, {{{255}}}}; /* buffer with non-valid refs */
+ uint8_t *dwbuf = NULL, /* Buffer for writing numeric data to disk */
+ *drbuf = NULL; /* Buffer for reading numeric data from disk */
+ uint8_t *tu8; /* Temporary pointer to uint8 data */
+ H5O_type_t obj_type; /* Type of object */
+ int i, j; /* Counters */
+ hssize_t hssize_ret; /* hssize_t return value */
+ htri_t tri_ret; /* htri_t return value */
+ herr_t ret; /* Generic return value */
+ hid_t dset_NA; /* Dataset id for undefined reference */
+ hid_t space_NA; /* Dataspace id for undefined reference */
+ hsize_t dims_NA[1] = {1}; /* Dims array for undefined reference */
+ H5R_ref_t rdata_NA[1]; /* Read buffer */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataset Region Reference Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ rbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ dwbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2));
+ drbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2));
+
+ for (tu8 = dwbuf, i = 0; i < (SPACE2_DIM1 * SPACE2_DIM2); i++)
+ *tu8++ = (uint8_t)(i * 3);
+
+ /* Create file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Set the low/high version bounds in fapl */
+ ret = H5Pset_libver_bounds(fapl, libver_low, libver_high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create file with the fapl */
+ fid1 = H5Fcreate(FILE_REF_REG, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a dataset */
+ dset2 = H5Dcreate2(fid1, "Dataset2", H5T_STD_U8LE, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset2, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dset2, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, dwbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create dataspace for the reference dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a dataset */
+ H5E_BEGIN_TRY
+ {
+ dset1 = H5Dcreate2(fid1, "Dataset1", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset1 < 0) {
+ VERIFY(libver_high <= H5F_LIBVER_V110, TRUE, "H5Dcreate2");
+
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+ else {
+
+ CHECK(dset1, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create references */
+
+ /* Select 6x6 hyperslab for first reference */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 6;
+ block[1] = 6;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ ret = (int)H5Sget_select_npoints(sid2);
+ VERIFY(ret, 36, "H5Sget_select_npoints");
+
+ /* Store first dataset region */
+ ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[0]);
+ CHECK(ret, FAIL, "H5Rcreate_region");
+ ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Select sequence of ten points for second reference */
+ coord1[0][0] = 6;
+ coord1[0][1] = 9;
+ coord1[1][0] = 2;
+ coord1[1][1] = 2;
+ coord1[2][0] = 8;
+ coord1[2][1] = 4;
+ coord1[3][0] = 1;
+ coord1[3][1] = 6;
+ coord1[4][0] = 2;
+ coord1[4][1] = 8;
+ coord1[5][0] = 3;
+ coord1[5][1] = 2;
+ coord1[6][0] = 0;
+ coord1[6][1] = 4;
+ coord1[7][0] = 9;
+ coord1[7][1] = 0;
+ coord1[8][0] = 7;
+ coord1[8][1] = 1;
+ coord1[9][0] = 3;
+ coord1[9][1] = 3;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ ret = (int)H5Sget_select_npoints(sid2);
+ VERIFY(ret, SPACE2_DIM2, "H5Sget_select_npoints");
+
+ /* Store second dataset region */
+ ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[1]);
+ CHECK(ret, FAIL, "H5Rcreate_region");
+
+ /* Select unlimited hyperslab for third reference */
+ start[0] = 1;
+ start[1] = 8;
+ stride[0] = 4;
+ stride[1] = 1;
+ count[0] = H5S_UNLIMITED;
+ count[1] = 1;
+ block[0] = 2;
+ block[1] = 2;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ hssize_ret = H5Sget_select_npoints(sid2);
+ VERIFY(hssize_ret, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints");
+
+ /* Store third dataset region */
+ ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[2]);
+ CHECK(ret, FAIL, "H5Rcreate_region");
+
+ ret = H5Rget_obj_type3(&wbuf[2], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Store fourth dataset region */
+ ret = H5Rcreate_region(fid1, "/Dataset2", sid2, H5P_DEFAULT, &wbuf[3]);
+ CHECK(ret, FAIL, "H5Rcreate_region");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+
+ /*
+ * Store a dataset region reference which will not get written to disk
+ */
+
+ /* Create the dataspace of the region references */
+ space_NA = H5Screate_simple(1, dims_NA, NULL);
+ CHECK(space_NA, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create the dataset and write the region references to it */
+ dset_NA = H5Dcreate2(fid1, "DS_NA", H5T_STD_REF, space_NA, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset_NA, H5I_INVALID_HID, "H5Dcreate");
+
+ /* Close and release resources for undefined region reference tests */
+ ret = H5Dclose(dset_NA);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(space_NA);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close uint8 dataset dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid1 = H5Fopen(FILE_REF_REG, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fopen");
+
+ /*
+ * Start the test of an undefined reference
+ */
+
+ /* Open the dataset of the undefined references */
+ dset_NA = H5Dopen2(fid1, "DS_NA", H5P_DEFAULT);
+ CHECK(dset_NA, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read the data */
+ ret = H5Dread(dset_NA, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata_NA);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /*
+ * Dereference an undefined reference (should fail)
+ */
+ H5E_BEGIN_TRY
+ {
+ dset2 = H5Ropen_object(&rdata_NA[0], H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Close and release resources. */
+ ret = H5Dclose(dset_NA);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* This close should fail since H5Ropen_object never created
+ * the id of the referenced object. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dclose(dset2);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dclose");
+
+ /*
+ * End the test of an undefined reference
+ */
+
+ /* Open the dataset */
+ dset1 = H5Dopen2(fid1, "/Dataset1", H5P_DEFAULT);
+ CHECK(dset1, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read selection from disk */
+ ret = H5Dread(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Try to open objects */
+ dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Check what H5Rget_obj_type3 function returns */
+ ret = H5Rget_obj_type3(&rbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Check information in referenced dataset */
+ sid1 = H5Dget_space(dset2);
+ CHECK(sid1, H5I_INVALID_HID, "H5Dget_space");
+
+ ret = (int)H5Sget_simple_extent_npoints(sid1);
+ VERIFY(ret, (SPACE2_DIM1 * SPACE2_DIM2), "H5Sget_simple_extent_npoints");
+
+ /* Read from disk */
+ ret = H5Dread(dset2, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, drbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ for (tu8 = (uint8_t *)drbuf, i = 0; i < (SPACE2_DIM1 * SPACE2_DIM2); i++, tu8++)
+ VERIFY(*tu8, (uint8_t)(i * 3), "Data");
+
+ /* Get the hyperslab selection */
+ sid2 = H5Ropen_region(&rbuf[0], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region");
+
+ /* Verify correct hyperslab selected */
+ ret = (int)H5Sget_select_npoints(sid2);
+ VERIFY(ret, 36, "H5Sget_select_npoints");
+ ret = (int)H5Sget_select_hyper_nblocks(sid2);
+ VERIFY(ret, 1, "H5Sget_select_hyper_nblocks");
+
+ /* allocate space for the hyperslab blocks */
+ coords = (hsize_t *)HDmalloc((size_t)ret * SPACE2_RANK * sizeof(hsize_t) * 2);
+
+ ret = H5Sget_select_hyper_blocklist(sid2, (hsize_t)0, (hsize_t)ret, coords);
+ CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist");
+ VERIFY(coords[0], 2, "Hyperslab Coordinates");
+ VERIFY(coords[1], 2, "Hyperslab Coordinates");
+ VERIFY(coords[2], 7, "Hyperslab Coordinates");
+ VERIFY(coords[3], 7, "Hyperslab Coordinates");
+ HDfree(coords);
+ ret = H5Sget_select_bounds(sid2, low, high);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low[0], 2, "Selection Bounds");
+ VERIFY(low[1], 2, "Selection Bounds");
+ VERIFY(high[0], 7, "Selection Bounds");
+ VERIFY(high[1], 7, "Selection Bounds");
+
+ /* Close region space */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Get the element selection */
+ sid2 = H5Ropen_region(&rbuf[1], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region");
+
+ /* Verify correct elements selected */
+ ret = (int)H5Sget_select_npoints(sid2);
+ VERIFY(ret, SPACE2_DIM2, "H5Sget_select_npoints");
+ ret = (int)H5Sget_select_elem_npoints(sid2);
+ VERIFY(ret, SPACE2_DIM2, "H5Sget_select_elem_npoints");
+
+ /* allocate space for the element points */
+ coords = (hsize_t *)HDmalloc((size_t)ret * SPACE2_RANK * sizeof(hsize_t));
+
+ ret = H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)ret, coords);
+ CHECK(ret, FAIL, "H5Sget_select_elem_pointlist");
+ VERIFY(coords[0], coord1[0][0], "Element Coordinates");
+ VERIFY(coords[1], coord1[0][1], "Element Coordinates");
+ VERIFY(coords[2], coord1[1][0], "Element Coordinates");
+ VERIFY(coords[3], coord1[1][1], "Element Coordinates");
+ VERIFY(coords[4], coord1[2][0], "Element Coordinates");
+ VERIFY(coords[5], coord1[2][1], "Element Coordinates");
+ VERIFY(coords[6], coord1[3][0], "Element Coordinates");
+ VERIFY(coords[7], coord1[3][1], "Element Coordinates");
+ VERIFY(coords[8], coord1[4][0], "Element Coordinates");
+ VERIFY(coords[9], coord1[4][1], "Element Coordinates");
+ VERIFY(coords[10], coord1[5][0], "Element Coordinates");
+ VERIFY(coords[11], coord1[5][1], "Element Coordinates");
+ VERIFY(coords[12], coord1[6][0], "Element Coordinates");
+ VERIFY(coords[13], coord1[6][1], "Element Coordinates");
+ VERIFY(coords[14], coord1[7][0], "Element Coordinates");
+ VERIFY(coords[15], coord1[7][1], "Element Coordinates");
+ VERIFY(coords[16], coord1[8][0], "Element Coordinates");
+ VERIFY(coords[17], coord1[8][1], "Element Coordinates");
+ VERIFY(coords[18], coord1[9][0], "Element Coordinates");
+ VERIFY(coords[19], coord1[9][1], "Element Coordinates");
+ HDfree(coords);
+ ret = H5Sget_select_bounds(sid2, low, high);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low[0], 0, "Selection Bounds");
+ VERIFY(low[1], 0, "Selection Bounds");
+ VERIFY(high[0], 9, "Selection Bounds");
+ VERIFY(high[1], 9, "Selection Bounds");
+
+ /* Close region space */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Get the unlimited selection */
+ sid2 = H5Ropen_region(&rbuf[2], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(sid2, H5I_INVALID_HID, "H5Ropen_region");
+
+ /* Verify correct hyperslab selected */
+ hssize_ret = H5Sget_select_npoints(sid2);
+ VERIFY(hssize_ret, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints");
+ tri_ret = H5Sis_regular_hyperslab(sid2);
+ CHECK(tri_ret, FAIL, "H5Sis_regular_hyperslab");
+ VERIFY(tri_ret, TRUE, "H5Sis_regular_hyperslab Result");
+ ret = H5Sget_regular_hyperslab(sid2, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sget_regular_hyperslab");
+ VERIFY(start[0], (hsize_t)1, "Hyperslab Coordinates");
+ VERIFY(start[1], (hsize_t)8, "Hyperslab Coordinates");
+ VERIFY(stride[0], (hsize_t)4, "Hyperslab Coordinates");
+ VERIFY(stride[1], (hsize_t)1, "Hyperslab Coordinates");
+ VERIFY(count[0], H5S_UNLIMITED, "Hyperslab Coordinates");
+ VERIFY(count[1], (hsize_t)1, "Hyperslab Coordinates");
+ VERIFY(block[0], (hsize_t)2, "Hyperslab Coordinates");
+ VERIFY(block[1], (hsize_t)2, "Hyperslab Coordinates");
+
+ /* Close region space */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close first space */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dereferenced Dataset */
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Attempting to retrieve type of object using non-valid refs */
+ for (j = 0; j < 3; j++) {
+ H5E_BEGIN_TRY
+ {
+ ret = H5Rget_obj_type3(&nvrbuf[j], H5P_DEFAULT, &obj_type);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Rget_obj_type3");
+ } /* end for */
+
+ /* Close Dataset */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy references */
+ for (j = 0; j < SPACE1_DIM1; j++) {
+ ret = H5Rdestroy(&wbuf[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&rbuf[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+ }
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(dwbuf);
+ HDfree(drbuf);
+
+} /* test_reference_region() */
+
+/****************************************************************
+**
+** test_reference_region_1D(): Test H5R (reference) object reference code.
+** Tests 1-D references to various kinds of objects
+**
+** Note: The libver_low/libver_high parameters are added to create the file
+** with the low and high bounds setting in fapl.
+** Please see the RFC for "H5Sencode/H5Sdecode Format Change".
+**
+****************************************************************/
+static void
+test_reference_region_1D(H5F_libver_t libver_low, H5F_libver_t libver_high)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t fapl; /* File access property list */
+ hid_t dset1, /* Dataset ID */
+ dset3; /* Dereferenced dataset ID */
+ hid_t sid1, /* Dataspace ID #1 */
+ sid3; /* Dataspace ID #3 */
+ hid_t dapl_id; /* Dataset access property list */
+ hsize_t dims1[] = {2}, /* Must be 2 */
+ dims3[] = {SPACE3_DIM1};
+ hsize_t start[SPACE3_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE3_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE3_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE3_RANK]; /* Block size of hyperslab */
+ hsize_t coord1[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */
+ hsize_t *coords; /* Coordinate buffer */
+ hsize_t low[SPACE3_RANK]; /* Selection bounds */
+ hsize_t high[SPACE3_RANK]; /* Selection bounds */
+ H5R_ref_t *wbuf = NULL, /* buffer to write to disk */
+ *rbuf = NULL; /* buffer read from disk */
+ uint8_t *dwbuf = NULL, /* Buffer for writing numeric data to disk */
+ *drbuf = NULL; /* Buffer for reading numeric data from disk */
+ uint8_t *tu8; /* Temporary pointer to uint8 data */
+ H5O_type_t obj_type; /* Object type */
+ int i; /* Counter */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 1-D Dataset Region Reference Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = HDcalloc(sizeof(H5R_ref_t), (size_t)SPACE1_DIM1);
+ rbuf = HDcalloc(sizeof(H5R_ref_t), (size_t)SPACE1_DIM1);
+ dwbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)SPACE3_DIM1);
+ drbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)SPACE3_DIM1);
+
+ for (tu8 = dwbuf, i = 0; i < SPACE3_DIM1; i++)
+ *tu8++ = (uint8_t)(i * 3);
+
+ /* Create the file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Set the low/high version bounds in fapl */
+ ret = H5Pset_libver_bounds(fapl, libver_low, libver_high);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Create file with the fapl */
+ fid1 = H5Fcreate(FILE_REF_REG_1D, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid3 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid3, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a dataset */
+ dset3 = H5Dcreate2(fid1, "Dataset2", H5T_STD_U8LE, sid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset3, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dset3, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, dwbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create dataspace for the reference dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a dataset */
+ H5E_BEGIN_TRY
+ {
+ dset1 = H5Dcreate2(fid1, "Dataset1", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+
+ if (dset1 < 0) {
+
+ VERIFY(libver_high <= H5F_LIBVER_V110, TRUE, "H5Dcreate2");
+
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+ else {
+
+ CHECK(ret, FAIL, "H5Dcreate2");
+
+ /* Create references */
+
+ /* Select 15 2x1 hyperslabs for first reference */
+ start[0] = 2;
+ stride[0] = 5;
+ count[0] = 15;
+ block[0] = 2;
+ ret = H5Sselect_hyperslab(sid3, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ ret = (int)H5Sget_select_npoints(sid3);
+ VERIFY(ret, (block[0] * count[0]), "H5Sget_select_npoints");
+
+ /* Store first dataset region */
+ ret = H5Rcreate_region(fid1, "/Dataset2", sid3, H5P_DEFAULT, &wbuf[0]);
+ CHECK(ret, FAIL, "H5Rcreate_region");
+ ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Select sequence of ten points for second reference */
+ coord1[0][0] = 16;
+ coord1[1][0] = 22;
+ coord1[2][0] = 38;
+ coord1[3][0] = 41;
+ coord1[4][0] = 52;
+ coord1[5][0] = 63;
+ coord1[6][0] = 70;
+ coord1[7][0] = 89;
+ coord1[8][0] = 97;
+ coord1[9][0] = 03;
+ ret = H5Sselect_elements(sid3, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ ret = (int)H5Sget_select_npoints(sid3);
+ VERIFY(ret, POINT1_NPOINTS, "H5Sget_select_npoints");
+
+ /* Store second dataset region */
+ ret = H5Rcreate_region(fid1, "/Dataset2", sid3, H5P_DEFAULT, &wbuf[1]);
+ CHECK(ret, FAIL, "H5Rcreate_region");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close uint8 dataset dataspace */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid1 = H5Fopen(FILE_REF_REG_1D, H5F_ACC_RDWR, fapl);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open the dataset */
+ dset1 = H5Dopen2(fid1, "/Dataset1", H5P_DEFAULT);
+ CHECK(dset1, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read selection from disk */
+ ret = H5Dread(dset1, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Try to open objects */
+ dset3 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id);
+ CHECK(dset3, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Check what H5Rget_obj_type3 function returns */
+ ret = H5Rget_obj_type3(&rbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Check information in referenced dataset */
+ sid1 = H5Dget_space(dset3);
+ CHECK(sid1, H5I_INVALID_HID, "H5Dget_space");
+
+ ret = (int)H5Sget_simple_extent_npoints(sid1);
+ VERIFY(ret, SPACE3_DIM1, "H5Sget_simple_extent_npoints");
+
+ /* Read from disk */
+ ret = H5Dread(dset3, H5T_STD_U8LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, drbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ for (tu8 = (uint8_t *)drbuf, i = 0; i < SPACE3_DIM1; i++, tu8++)
+ VERIFY(*tu8, (uint8_t)(i * 3), "Data");
+
+ /* Get the hyperslab selection */
+ sid3 = H5Ropen_region(&rbuf[0], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(sid3, H5I_INVALID_HID, "H5Ropen_region");
+
+ /* Verify correct hyperslab selected */
+ ret = (int)H5Sget_select_npoints(sid3);
+ VERIFY(ret, 30, "H5Sget_select_npoints");
+ ret = (int)H5Sget_select_hyper_nblocks(sid3);
+ VERIFY(ret, 15, "H5Sget_select_hyper_nblocks");
+
+ /* allocate space for the hyperslab blocks */
+ coords = (hsize_t *)HDmalloc((size_t)ret * SPACE3_RANK * sizeof(hsize_t) * 2);
+
+ ret = H5Sget_select_hyper_blocklist(sid3, (hsize_t)0, (hsize_t)ret, coords);
+ CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist");
+ VERIFY(coords[0], 2, "Hyperslab Coordinates");
+ VERIFY(coords[1], 3, "Hyperslab Coordinates");
+ VERIFY(coords[2], 7, "Hyperslab Coordinates");
+ VERIFY(coords[3], 8, "Hyperslab Coordinates");
+ VERIFY(coords[4], 12, "Hyperslab Coordinates");
+ VERIFY(coords[5], 13, "Hyperslab Coordinates");
+ VERIFY(coords[6], 17, "Hyperslab Coordinates");
+ VERIFY(coords[7], 18, "Hyperslab Coordinates");
+ VERIFY(coords[8], 22, "Hyperslab Coordinates");
+ VERIFY(coords[9], 23, "Hyperslab Coordinates");
+ VERIFY(coords[10], 27, "Hyperslab Coordinates");
+ VERIFY(coords[11], 28, "Hyperslab Coordinates");
+ VERIFY(coords[12], 32, "Hyperslab Coordinates");
+ VERIFY(coords[13], 33, "Hyperslab Coordinates");
+ VERIFY(coords[14], 37, "Hyperslab Coordinates");
+ VERIFY(coords[15], 38, "Hyperslab Coordinates");
+ VERIFY(coords[16], 42, "Hyperslab Coordinates");
+ VERIFY(coords[17], 43, "Hyperslab Coordinates");
+ VERIFY(coords[18], 47, "Hyperslab Coordinates");
+ VERIFY(coords[19], 48, "Hyperslab Coordinates");
+ VERIFY(coords[20], 52, "Hyperslab Coordinates");
+ VERIFY(coords[21], 53, "Hyperslab Coordinates");
+ VERIFY(coords[22], 57, "Hyperslab Coordinates");
+ VERIFY(coords[23], 58, "Hyperslab Coordinates");
+ VERIFY(coords[24], 62, "Hyperslab Coordinates");
+ VERIFY(coords[25], 63, "Hyperslab Coordinates");
+ VERIFY(coords[26], 67, "Hyperslab Coordinates");
+ VERIFY(coords[27], 68, "Hyperslab Coordinates");
+ VERIFY(coords[28], 72, "Hyperslab Coordinates");
+ VERIFY(coords[29], 73, "Hyperslab Coordinates");
+ HDfree(coords);
+ ret = H5Sget_select_bounds(sid3, low, high);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low[0], 2, "Selection Bounds");
+ VERIFY(high[0], 73, "Selection Bounds");
+
+ /* Close region space */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Get the element selection */
+ sid3 = H5Ropen_region(&rbuf[1], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(sid3, H5I_INVALID_HID, "H5Ropen_region");
+
+ /* Verify correct elements selected */
+ ret = (int)H5Sget_select_npoints(sid3);
+ VERIFY(ret, 10, "H5Sget_select_npoints");
+ ret = (int)H5Sget_select_elem_npoints(sid3);
+ VERIFY(ret, 10, "H5Sget_select_elem_npoints");
+
+ /* allocate space for the element points */
+ coords = (hsize_t *)HDmalloc((size_t)ret * SPACE3_RANK * sizeof(hsize_t));
+
+ ret = H5Sget_select_elem_pointlist(sid3, (hsize_t)0, (hsize_t)ret, coords);
+ CHECK(ret, FAIL, "H5Sget_select_elem_pointlist");
+ VERIFY(coords[0], coord1[0][0], "Element Coordinates");
+ VERIFY(coords[1], coord1[1][0], "Element Coordinates");
+ VERIFY(coords[2], coord1[2][0], "Element Coordinates");
+ VERIFY(coords[3], coord1[3][0], "Element Coordinates");
+ VERIFY(coords[4], coord1[4][0], "Element Coordinates");
+ VERIFY(coords[5], coord1[5][0], "Element Coordinates");
+ VERIFY(coords[6], coord1[6][0], "Element Coordinates");
+ VERIFY(coords[7], coord1[7][0], "Element Coordinates");
+ VERIFY(coords[8], coord1[8][0], "Element Coordinates");
+ VERIFY(coords[9], coord1[9][0], "Element Coordinates");
+ HDfree(coords);
+ ret = H5Sget_select_bounds(sid3, low, high);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low[0], 3, "Selection Bounds");
+ VERIFY(high[0], 97, "Selection Bounds");
+
+ /* Close region space */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close first space */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dereferenced Dataset */
+ ret = H5Dclose(dset3);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy references */
+ for (i = 0; i < 2; i++) {
+ ret = H5Rdestroy(&wbuf[i]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&rbuf[i]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+ }
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(dwbuf);
+ HDfree(drbuf);
+
+} /* test_reference_region_1D() */
+
+/****************************************************************
+**
+** test_reference_obj_deleted(): Test H5R (reference) object reference code.
+** Tests for correct failures for deleted and non-existent objects
+**
+****************************************************************/
+static void
+test_reference_obj_deleted(void)
+{
+#ifndef NO_REFERENCE_TO_DELETED
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset, /* Dataset ID */
+ dset2; /* Dereferenced dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ H5R_ref_t oref; /* Object Reference to test */
+ H5O_type_t obj_type; /* Object type */
+ herr_t ret; /* Generic return value */
+#endif
+ MESSAGE(5, ("Testing References to Deleted Objects - SKIPPED for now due to no support\n"));
+#ifndef NO_REFERENCE_TO_DELETED
+ /* Create file */
+ fid1 = H5Fcreate(FILE_REF_OBJ_DEL, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create scalar dataspace for datasets */
+ sid1 = H5Screate_simple(0, NULL, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a dataset to reference (deleted later) */
+ dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset2", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create reference to dataset */
+ ret = H5Rcreate_object(fid1, "/Dataset1", H5P_DEFAULT, &oref);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&oref, H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &oref);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Delete referenced dataset */
+ ret = H5Ldelete(fid1, "/Dataset1", H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy reference */
+ ret = H5Rdestroy(&oref);
+ CHECK(ret, FAIL, "H5Rdestroy");
+
+ /* Re-open the file */
+ fid1 = H5Fopen(FILE_REF_OBJ_DEL, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "/Dataset2", H5P_DEFAULT);
+ CHECK(ret, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &oref);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Open deleted dataset object */
+ dset2 = H5Ropen_object(&oref, H5P_DEFAULT, H5P_DEFAULT);
+ VERIFY(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy reference */
+ ret = H5Rdestroy(&oref);
+ CHECK(ret, FAIL, "H5Rdestroy");
+#endif
+} /* test_reference_obj_deleted() */
+
+/****************************************************************
+**
+** test_deref_iter_op(): Iterator callback for test_reference_group_iterate()
+** test.
+**
+****************************************************************/
+static herr_t
+test_deref_iter_op(hid_t H5_ATTR_UNUSED group, const char *name, const H5L_info2_t H5_ATTR_UNUSED *info,
+ void *op_data)
+{
+ int *count = (int *)op_data; /* Pointer to name counter */
+ herr_t ret_value;
+
+ /* Simple check for correct names */
+ if (*count == 0) {
+ if (HDstrcmp(name, DSETNAME2) == 0)
+ ret_value = 0;
+ else
+ ret_value = -1;
+ } /* end if */
+ else if (*count == 1) {
+ if (HDstrcmp(name, GROUPNAME2) == 0)
+ ret_value = 0;
+ else
+ ret_value = -1;
+ } /* end if */
+ else if (*count == 2) {
+ if (HDstrcmp(name, GROUPNAME3) == 0)
+ ret_value = 0;
+ else
+ ret_value = -1;
+ } /* end if */
+ else
+ ret_value = -1;
+
+ (*count)++;
+
+ return (ret_value);
+} /* end test_deref_iter_op() */
+
+/****************************************************************
+**
+** test_reference_group(): Test H5R (reference) object reference code.
+** Tests for correct behavior of various routines on dereferenced group
+**
+****************************************************************/
+static void
+test_reference_group(void)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t gid = -1, gid2 = -1; /* Group IDs */
+ hid_t did; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ H5R_ref_t wref; /* Reference to write */
+ H5R_ref_t rref; /* Reference to read */
+ H5G_info_t ginfo; /* Group info struct */
+ char objname[NAME_SIZE]; /* Buffer to store name */
+ H5O_info2_t oinfo; /* Object info struct */
+ int count = 0; /* Count within iterated group */
+ ssize_t size; /* Name length */
+ herr_t ret;
+
+ /* Create file with a group and a dataset containing an object reference to the group */
+ fid = H5Fcreate(FILE_REF_GRP, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace to use for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate");
+
+ /* Create group to refer to */
+ gid = H5Gcreate2(fid, GROUPNAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Create nested groups */
+ gid2 = H5Gcreate2(gid, GROUPNAME2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid2, H5I_INVALID_HID, "H5Gcreate2");
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ gid2 = H5Gcreate2(gid, GROUPNAME3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid2, H5I_INVALID_HID, "H5Gcreate2");
+ ret = H5Gclose(gid2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create bottom dataset */
+ did = H5Dcreate2(gid, DSETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate2");
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create dataset */
+ did = H5Dcreate2(fid, DSETNAME, H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create reference to group */
+ ret = H5Rcreate_object(fid, GROUPNAME, H5P_DEFAULT, &wref);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+
+ /* Write reference to disk */
+ ret = H5Dwrite(did, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wref);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close objects */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy reference */
+ ret = H5Rdestroy(&wref);
+ CHECK(ret, FAIL, "H5Rdestroy");
+
+ /* Re-open file */
+ fid = H5Fopen(FILE_REF_GRP, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, H5I_INVALID_HID, "H5Fopen");
+
+ /* Re-open dataset */
+ did = H5Dopen2(fid, DSETNAME, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read in the reference */
+ ret = H5Dread(did, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rref);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Dereference to get the group */
+ gid = H5Ropen_object(&rref, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Iterate through objects in dereferenced group */
+ ret = H5Literate2(gid, H5_INDEX_NAME, H5_ITER_INC, NULL, test_deref_iter_op, &count);
+ CHECK(ret, FAIL, "H5Literate");
+
+ /* Various queries on the group opened */
+ ret = H5Gget_info(gid, &ginfo);
+ CHECK(ret, FAIL, "H5Gget_info");
+ VERIFY(ginfo.nlinks, 3, "H5Gget_info");
+
+ size = H5Lget_name_by_idx(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, objname, (size_t)NAME_SIZE,
+ H5P_DEFAULT);
+ CHECK(size, (-1), "H5Lget_name_by_idx");
+ VERIFY_STR(objname, DSETNAME2, "H5Lget_name_by_idx");
+
+ ret = H5Oget_info_by_idx3(gid, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)0, &oinfo, H5O_INFO_BASIC,
+ H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oget_info_by_idx3");
+ VERIFY(oinfo.type, H5O_TYPE_DATASET, "H5Oget_info_by_idx3");
+
+ /* Unlink one of the objects in the dereferenced group */
+ ret = H5Ldelete(gid, GROUPNAME2, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Delete dataset object in dereferenced group (with other dataset still open) */
+ ret = H5Ldelete(gid, DSETNAME2, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+
+ /* Close objects */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Gclose(gid);
+ CHECK(ret, FAIL, "H5Gclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy reference */
+ ret = H5Rdestroy(&rref);
+ CHECK(ret, FAIL, "H5Rdestroy");
+} /* test_reference_group() */
+
+/****************************************************************
+**
+** test_reference_attr(): Test basic H5R (reference) attribute reference code.
+** Tests references to attributes on various kinds of objects
+**
+****************************************************************/
+static void
+test_reference_attr(void)
+{
+ hid_t fid; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t group; /* Group ID */
+ hid_t attr; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hsize_t dims[] = {SPACE1_DIM1};
+ hid_t dapl_id; /* Dataset access property list */
+ H5R_ref_t ref_wbuf[SPACE1_DIM1], /* Buffer to write to disk */
+ ref_rbuf[SPACE1_DIM1]; /* Buffer read from disk */
+ unsigned wbuf[SPACE1_DIM1], rbuf[SPACE1_DIM1];
+ unsigned i; /* Local index variables */
+ H5O_type_t obj_type; /* Object type */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Attribute Reference Functions\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILE_REF_ATTR, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid = H5Screate_simple(SPACE1_RANK, dims, NULL);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a group */
+ group = H5Gcreate2(fid, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Create an attribute for the dataset */
+ attr = H5Acreate2(group, "Attr2", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ wbuf[i] = (i * 3) + 1;
+
+ /* Write attribute to disk */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create a dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create an attribute for the dataset */
+ attr = H5Acreate2(dataset, "Attr1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ wbuf[i] = i * 3;
+
+ /* Write attribute to disk */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a datatype to refer to */
+ tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(group, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Create an attribute for the datatype */
+ attr = H5Acreate2(tid, "Attr3", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ wbuf[i] = (i * 3) + 2;
+
+ /* Write attribute to disk */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid, "Dataset3", H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create reference to dataset1 attribute */
+ ret = H5Rcreate_attr(fid, "/Group1/Dataset1", "Attr1", H5P_DEFAULT, &ref_wbuf[0]);
+ CHECK(ret, FAIL, "H5Rcreate_attr");
+ ret = H5Rget_obj_type3(&ref_wbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to dataset2 attribute */
+ ret = H5Rcreate_attr(fid, "/Group1/Dataset2", "Attr1", H5P_DEFAULT, &ref_wbuf[1]);
+ CHECK(ret, FAIL, "H5Rcreate_attr");
+ ret = H5Rget_obj_type3(&ref_wbuf[1], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to group attribute */
+ ret = H5Rcreate_attr(fid, "/Group1", "Attr2", H5P_DEFAULT, &ref_wbuf[2]);
+ CHECK(ret, FAIL, "H5Rcreate_attr");
+ ret = H5Rget_obj_type3(&ref_wbuf[2], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3");
+
+ /* Create reference to named datatype attribute */
+ ret = H5Rcreate_attr(fid, "/Group1/Datatype1", "Attr3", H5P_DEFAULT, &ref_wbuf[3]);
+ CHECK(ret, FAIL, "H5Rcreate_attr");
+ ret = H5Rget_obj_type3(&ref_wbuf[3], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid = H5Fopen(FILE_REF_ATTR, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid, "/Dataset3", H5P_DEFAULT);
+ CHECK(ret, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Open attribute on dataset object */
+ attr = H5Ropen_attr(&ref_rbuf[0], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr");
+
+ /* Check information in referenced dataset */
+ sid = H5Aget_space(attr);
+ CHECK(sid, H5I_INVALID_HID, "H5Aget_space");
+
+ ret = (int)H5Sget_simple_extent_npoints(sid);
+ VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints");
+
+ /* Read from disk */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf);
+ CHECK(ret, FAIL, "H5Aread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(rbuf[i], i * 3, "Data");
+
+ /* Close dereferenced Dataset */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open attribute on group object */
+ attr = H5Ropen_attr(&ref_rbuf[2], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr");
+
+ /* Read from disk */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf);
+ CHECK(ret, FAIL, "H5Aread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(rbuf[i], (i * 3) + 1, "Data");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open attribute on named datatype object */
+ attr = H5Ropen_attr(&ref_rbuf[3], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr");
+
+ /* Read from disk */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf);
+ CHECK(ret, FAIL, "H5Aread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(rbuf[i], (i * 3) + 2, "Data");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ ret = H5Rdestroy(&ref_wbuf[i]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&ref_rbuf[i]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+} /* test_reference_attr() */
+
+/****************************************************************
+**
+** test_reference_external():
+** Tests external references on various kinds of objects
+**
+****************************************************************/
+static void
+test_reference_external(void)
+{
+ hid_t fid1, fid2; /* HDF5 File ID */
+ hid_t dataset; /* Dataset ID */
+ hid_t group; /* Group ID */
+ hid_t attr; /* Attribute ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t tid; /* Datatype ID */
+ hsize_t dims[] = {SPACE1_DIM1};
+ hid_t dapl_id; /* Dataset access property list */
+ H5R_ref_t ref_wbuf[SPACE1_DIM1], /* Buffer to write to disk */
+ ref_rbuf[SPACE1_DIM1]; /* Buffer read from disk */
+ unsigned wbuf[SPACE1_DIM1], rbuf[SPACE1_DIM1];
+ unsigned i; /* Local index variables */
+ H5O_type_t obj_type; /* Object type */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing External References Functions\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(FILE_REF_EXT1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid = H5Screate_simple(SPACE1_RANK, dims, NULL);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a group */
+ group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Create an attribute for the dataset */
+ attr = H5Acreate2(group, "Attr2", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ wbuf[i] = (i * 3) + 1;
+
+ /* Write attribute to disk */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Create a dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create an attribute for the dataset */
+ attr = H5Acreate2(dataset, "Attr1", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ wbuf[i] = i * 3;
+
+ /* Write attribute to disk */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a datatype to refer to */
+ tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(group, "Datatype1", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Create an attribute for the datatype */
+ attr = H5Acreate2(tid, "Attr3", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Acreate2");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ wbuf[i] = (i * 3) + 2;
+
+ /* Write attribute to disk */
+ ret = H5Awrite(attr, H5T_NATIVE_UINT, wbuf);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create reference to dataset1 attribute */
+ ret = H5Rcreate_attr(fid1, "/Group1/Dataset1", "Attr1", H5P_DEFAULT, &ref_wbuf[0]);
+ CHECK(ret, FAIL, "H5Rcreate_attr");
+ ret = H5Rget_obj_type3(&ref_wbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to dataset2 attribute */
+ ret = H5Rcreate_attr(fid1, "/Group1/Dataset2", "Attr1", H5P_DEFAULT, &ref_wbuf[1]);
+ CHECK(ret, FAIL, "H5Rcreate_attr");
+ ret = H5Rget_obj_type3(&ref_wbuf[1], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Create reference to group attribute */
+ ret = H5Rcreate_attr(fid1, "/Group1", "Attr2", H5P_DEFAULT, &ref_wbuf[2]);
+ CHECK(ret, FAIL, "H5Rcreate_attr");
+ ret = H5Rget_obj_type3(&ref_wbuf[2], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3");
+
+ /* Create reference to named datatype attribute */
+ ret = H5Rcreate_attr(fid1, "/Group1/Datatype1", "Attr3", H5P_DEFAULT, &ref_wbuf[3]);
+ CHECK(ret, FAIL, "H5Rcreate_attr");
+ ret = H5Rget_obj_type3(&ref_wbuf[3], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Create file */
+ fid2 = H5Fcreate(FILE_REF_EXT2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid2, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid = H5Screate_simple(SPACE1_RANK, dims, NULL);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid2, "Dataset3", H5T_STD_REF, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid2 = H5Fopen(FILE_REF_EXT2, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid2, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid2, "/Dataset3", H5P_DEFAULT);
+ CHECK(ret, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, ref_rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Open attribute on dataset object */
+ attr = H5Ropen_attr(&ref_rbuf[0], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr");
+
+ /* Check information in referenced dataset */
+ sid = H5Aget_space(attr);
+ CHECK(sid, H5I_INVALID_HID, "H5Aget_space");
+
+ ret = (int)H5Sget_simple_extent_npoints(sid);
+ VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints");
+
+ /* Read from disk */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf);
+ CHECK(ret, FAIL, "H5Aread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(rbuf[i], i * 3, "Data");
+
+ /* Close dereferenced Dataset */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open attribute on group object */
+ attr = H5Ropen_attr(&ref_rbuf[2], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr");
+
+ /* Read from disk */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf);
+ CHECK(ret, FAIL, "H5Aread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(rbuf[i], (i * 3) + 1, "Data");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Open attribute on named datatype object */
+ attr = H5Ropen_attr(&ref_rbuf[3], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr");
+
+ /* Read from disk */
+ ret = H5Aread(attr, H5T_NATIVE_UINT, rbuf);
+ CHECK(ret, FAIL, "H5Aread");
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ VERIFY(rbuf[i], (i * 3) + 2, "Data");
+
+ /* Close attribute */
+ ret = H5Aclose(attr);
+ CHECK(ret, FAIL, "H5Aclose");
+
+ /* Close dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid2);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ ret = H5Rdestroy(&ref_wbuf[i]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&ref_rbuf[i]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+} /* test_reference_external() */
+
+/****************************************************************
+**
+** test_reference_compat_conv(): Test basic H5R (reference) object reference code.
+** Tests deprecated API routines and type conversion.
+**
+****************************************************************/
+#if 0
+static void
+test_reference_compat_conv(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset, dset2; /* Dataset ID */
+ hid_t group, group2; /* Group ID */
+ hid_t sid1, sid2, sid3; /* Dataspace IDs */
+ hid_t tid1, tid2; /* Datatype ID */
+ hsize_t dims1[] = {SPACE1_DIM1}, dims2[] = {SPACE2_DIM1, SPACE2_DIM2},
+ dims3[] = {SPACE1_DIM1}; /* Purposely set dimension larger to test NULL references */
+ hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */
+ hsize_t coord1[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */
+ hobj_ref_t *wbuf_obj = NULL; /* Buffer to write to disk */
+ H5R_ref_t *rbuf_obj = NULL; /* Buffer read from disk */
+ hdset_reg_ref_t *wbuf_reg = NULL; /* Buffer to write to disk */
+ H5R_ref_t *rbuf_reg = NULL; /* Buffer read from disk */
+ H5O_type_t obj_type; /* Object type */
+ herr_t ret; /* Generic return value */
+ unsigned int i; /* Counter */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Deprecated Object Reference Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf_obj = (hobj_ref_t *)HDcalloc(sizeof(hobj_ref_t), SPACE1_DIM1);
+ rbuf_obj = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ wbuf_reg = HDcalloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1);
+ rbuf_reg = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILE_REF_COMPAT, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create another dataspace for datasets */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create another dataspace for datasets */
+ sid3 = H5Screate_simple(SPACE1_RANK, dims3, NULL);
+ CHECK(sid3, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create a group */
+ group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Create a dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid1, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create a dataset with object reference datatype */
+ dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Create reference to dataset */
+ ret = H5Rcreate(&wbuf_obj[0], fid1, "/Group1/Dataset1", H5R_OBJECT, H5I_INVALID_HID);
+ CHECK(ret, FAIL, "H5Rcreate");
+
+ /* Create reference to dataset */
+ ret = H5Rcreate(&wbuf_obj[1], fid1, "/Group1/Dataset2", H5R_OBJECT, H5I_INVALID_HID);
+ CHECK(ret, FAIL, "H5Rcreate");
+
+ /* Create reference to group */
+ ret = H5Rcreate(&wbuf_obj[2], fid1, "/Group1", H5R_OBJECT, H5I_INVALID_HID);
+ CHECK(ret, FAIL, "H5Rcreate");
+
+ /* Create reference to named datatype */
+ ret = H5Rcreate(&wbuf_obj[3], fid1, "/Group1/Datatype1", H5R_OBJECT, H5I_INVALID_HID);
+ CHECK(ret, FAIL, "H5Rcreate");
+
+ /* Write references to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_obj);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a dataset with region reference datatype */
+ dataset = H5Dcreate2(fid1, "Dataset4", H5T_STD_REF_DSETREG, sid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Select 6x6 hyperslab for first reference */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 6;
+ block[1] = 6;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create first dataset region */
+ ret = H5Rcreate(&wbuf_reg[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION, sid2);
+ CHECK(ret, FAIL, "H5Rcreate");
+
+ /* Select sequence of ten points for second reference */
+ coord1[0][0] = 6;
+ coord1[0][1] = 9;
+ coord1[1][0] = 2;
+ coord1[1][1] = 2;
+ coord1[2][0] = 8;
+ coord1[2][1] = 4;
+ coord1[3][0] = 1;
+ coord1[3][1] = 6;
+ coord1[4][0] = 2;
+ coord1[4][1] = 8;
+ coord1[5][0] = 3;
+ coord1[5][1] = 2;
+ coord1[6][0] = 0;
+ coord1[6][1] = 4;
+ coord1[7][0] = 9;
+ coord1[7][1] = 0;
+ coord1[8][0] = 7;
+ coord1[8][1] = 1;
+ coord1[9][0] = 3;
+ coord1[9][1] = 3;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create second dataset region */
+ ret = H5Rcreate(&wbuf_reg[1], fid1, "/Group1/Dataset2", H5R_DATASET_REGION, sid2);
+ CHECK(ret, FAIL, "H5Rcreate");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close disk dataspaces */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid1 = H5Fopen(FILE_REF_COMPAT, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open the object reference dataset */
+ dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_obj);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify type of objects pointed at */
+ ret = H5Rget_obj_type3(&rbuf_obj[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ ret = H5Rget_obj_type3(&rbuf_obj[1], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ ret = H5Rget_obj_type3(&rbuf_obj[2], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_GROUP, "H5Rget_obj_type3");
+
+ ret = H5Rget_obj_type3(&rbuf_obj[3], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_NAMED_DATATYPE, "H5Rget_obj_type3");
+
+ /* Make sure the referenced objects can be opened */
+ dset2 = H5Ropen_object(&rbuf_obj[0], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ dset2 = H5Ropen_object(&rbuf_obj[1], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ group2 = H5Ropen_object(&rbuf_obj[2], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group2, H5I_INVALID_HID, "H5Ropen_object");
+
+ ret = H5Gclose(group2);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ tid2 = H5Ropen_object(&rbuf_obj[3], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(tid2, H5I_INVALID_HID, "H5Ropen_object");
+
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open the dataset region reference dataset */
+ dataset = H5Dopen2(fid1, "/Dataset4", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify type of objects pointed at */
+ ret = H5Rget_obj_type3(&rbuf_reg[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ ret = H5Rget_obj_type3(&rbuf_reg[1], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ /* Make sure the referenced objects can be opened */
+ dset2 = H5Ropen_object(&rbuf_reg[0], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ dset2 = H5Ropen_object(&rbuf_reg[1], H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy references */
+ for (i = 0; i < dims1[0]; i++) {
+ ret = H5Rdestroy(&rbuf_obj[i]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+ for (i = 0; i < dims3[0]; i++) {
+ ret = H5Rdestroy(&rbuf_reg[i]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+
+ /* Free memory buffers */
+ HDfree(wbuf_obj);
+ HDfree(rbuf_obj);
+ HDfree(wbuf_reg);
+ HDfree(rbuf_reg);
+} /* test_reference_compat() */
+#endif
+
+/****************************************************************
+**
+** test_reference_perf(): Test basic H5R (reference) object reference
+** performance.
+**
+****************************************************************/
+static void
+test_reference_perf(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset, /* Dataset ID */
+ dset2; /* Dereferenced dataset ID */
+ hid_t group; /* Group ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hsize_t dims1[] = {1};
+ hid_t dapl_id; /* Dataset access property list */
+ H5R_ref_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temp. buffer read from disk */
+ H5R_ref_t *wbuf_reg, /* buffer to write to disk */
+ *rbuf_reg; /* buffer read from disk */
+ hobj_ref_t *wbuf_deprec, /* deprecated references */
+ *rbuf_deprec; /* deprecated references */
+ hdset_reg_ref_t *wbuf_reg_deprec, /* deprecated references*/
+ *rbuf_reg_deprec; /* deprecated references*/
+ unsigned *ibuf, *obuf;
+ unsigned i, j; /* Counters */
+ H5O_type_t obj_type; /* Object type */
+ herr_t ret; /* Generic return value */
+ double t1, t2, t; /* Timers */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Object Reference Performance\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ obuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+ ibuf = HDcalloc(sizeof(unsigned), SPACE1_DIM1);
+ wbuf_deprec = (hobj_ref_t *)HDcalloc(sizeof(hobj_ref_t), SPACE1_DIM1);
+ rbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ rbuf_deprec = (hobj_ref_t *)HDcalloc(sizeof(hobj_ref_t), SPACE1_DIM1);
+ tbuf = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ wbuf_reg = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ rbuf_reg = (H5R_ref_t *)HDcalloc(sizeof(H5R_ref_t), SPACE1_DIM1);
+ wbuf_reg_deprec = (hdset_reg_ref_t *)HDcalloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1);
+ rbuf_reg_deprec = (hdset_reg_ref_t *)HDcalloc(sizeof(hdset_reg_ref_t), SPACE1_DIM1);
+
+ for (i = 0; i < SPACE1_DIM1; i++)
+ obuf[i] = i * 3;
+
+ /* Create file */
+ fid1 = H5Fcreate(FILE_REF_OBJ, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Create dataset access property list */
+ dapl_id = H5Pcreate(H5P_DATASET_ACCESS);
+ CHECK(dapl_id, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create a group */
+ group = H5Gcreate2(fid1, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group, H5I_INVALID_HID, "H5Gcreate2");
+
+ /* Create a dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset1", H5T_NATIVE_UINT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset (inside Group1) */
+ dataset = H5Dcreate2(group, "Dataset2", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(tid1, H5I_INVALID_HID, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid1, "a", HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "b", HOFFSET(s1_t, b), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(tid1, "c", HOFFSET(s1_t, c), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Save datatype for later */
+ ret = H5Tcommit2(group, "Datatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close group */
+ ret = H5Gclose(group);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset3", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_CREATE; i++) {
+ t1 = H5_get_time();
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ ret = H5Rdestroy(&wbuf[0]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Object reference create time: %lfs\n", t / MAX_ITER_CREATE);
+
+ /* Create reference to dataset */
+ ret = H5Rcreate_object(fid1, "/Group1/Dataset1", H5P_DEFAULT, &wbuf[0]);
+ CHECK(ret, FAIL, "H5Rcreate_object");
+ ret = H5Rget_obj_type3(&wbuf[0], H5P_DEFAULT, &obj_type);
+ CHECK(ret, FAIL, "H5Rget_obj_type3");
+ VERIFY(obj_type, H5O_TYPE_DATASET, "H5Rget_obj_type3");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_WRITE; i++) {
+ t1 = H5_get_time();
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Object reference write time: %lfs\n", t / MAX_ITER_WRITE);
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+#if 0
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset4", H5T_STD_REF_OBJ, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_CREATE; i++) {
+ t1 = H5_get_time();
+ ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID);
+ CHECK(ret, FAIL, "H5Rcreate");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Deprecated object reference create time: %lfs\n", t / MAX_ITER_CREATE);
+
+ /* Create reference to dataset */
+ ret = H5Rcreate(&wbuf_deprec[0], fid1, "/Group1/Dataset1", H5R_OBJECT1, H5I_INVALID_HID);
+ CHECK(ret, FAIL, "H5Rcreate");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_WRITE; i++) {
+ t1 = H5_get_time();
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_deprec);
+ CHECK(ret, FAIL, "H5Dwrite");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Deprecated object reference write time: %lfs\n", t / MAX_ITER_WRITE);
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+#endif
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset5", H5T_STD_REF, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_CREATE; i++) {
+ t1 = H5_get_time();
+ /* Store first dataset region */
+ ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf_reg[0]);
+ CHECK(ret, FAIL, "H5Rcreate_region");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ ret = H5Rdestroy(&wbuf_reg[0]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Region reference create time: %lfs\n", t / MAX_ITER_CREATE);
+
+ /* Store first dataset region */
+ ret = H5Rcreate_region(fid1, "/Group1/Dataset1", sid1, H5P_DEFAULT, &wbuf_reg[0]);
+ CHECK(ret, FAIL, "H5Rcreate_region");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_WRITE; i++) {
+ t1 = H5_get_time();
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg);
+ CHECK(ret, FAIL, "H5Dwrite");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Region reference write time: %lfs\n", t / MAX_ITER_WRITE);
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+#if 0
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset6", H5T_STD_REF_DSETREG, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_CREATE; i++) {
+ t1 = H5_get_time();
+ /* Store first dataset region */
+ ret = H5Rcreate(&wbuf_reg_deprec[0], fid1, "/Group1/Dataset1", H5R_DATASET_REGION1, sid1);
+ CHECK(ret, FAIL, "H5Rcreate");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Deprecated region reference create time: %lfs\n", t / MAX_ITER_CREATE);
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_WRITE; i++) {
+ t1 = H5_get_time();
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf_reg_deprec);
+ CHECK(ret, FAIL, "H5Dwrite");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Deprecated region reference write time: %lfs\n", t / MAX_ITER_WRITE);
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+#endif
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid1 = H5Fopen(FILE_REF_OBJ, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "/Dataset3", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_READ; i++) {
+ t1 = H5_get_time();
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ ret = H5Rdestroy(&rbuf[0]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Object reference read time: %lfs\n", t / MAX_ITER_READ);
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Open dataset object */
+ dset2 = H5Ropen_object(&rbuf[0], H5P_DEFAULT, dapl_id);
+ CHECK(dset2, H5I_INVALID_HID, "H5Ropen_object");
+
+ /* Check information in referenced dataset */
+ sid1 = H5Dget_space(dset2);
+ CHECK(sid1, H5I_INVALID_HID, "H5Dget_space");
+
+ ret = (int)H5Sget_simple_extent_npoints(sid1);
+ VERIFY(ret, dims1[0], "H5Sget_simple_extent_npoints");
+
+ /* Read from disk */
+ ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ for (i = 0; i < dims1[0]; i++)
+ VERIFY(ibuf[i], i * 3, "Data");
+
+ /* Close dereferenced Dataset */
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+#if 0
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "/Dataset4", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_READ; i++) {
+ t1 = H5_get_time();
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_deprec);
+ CHECK(ret, FAIL, "H5Dread");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Deprecated object reference read time: %lfs\n", t / MAX_ITER_READ);
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+#endif
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "/Dataset5", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_READ; i++) {
+ t1 = H5_get_time();
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg);
+ CHECK(ret, FAIL, "H5Dread");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ ret = H5Rdestroy(&rbuf_reg[0]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Region reference read time: %lfs\n", t / MAX_ITER_READ);
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+#if 0
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "/Dataset6", H5P_DEFAULT);
+ CHECK(dataset, H5I_INVALID_HID, "H5Dopen2");
+
+ t = 0;
+ for (i = 0; i < MAX_ITER_READ; i++) {
+ t1 = H5_get_time();
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_STD_REF_DSETREG, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf_reg_deprec);
+ CHECK(ret, FAIL, "H5Dread");
+ t2 = H5_get_time();
+ t += t2 - t1;
+ }
+ if (VERBOSE_MED)
+ HDprintf("--- Deprecated region reference read time: %lfs\n", t / MAX_ITER_READ);
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+#endif
+ /* Close dataset access property list */
+ ret = H5Pclose(dapl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Destroy references */
+ for (j = 0; j < dims1[0]; j++) {
+ ret = H5Rdestroy(&wbuf[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&wbuf_reg[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&rbuf[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ ret = H5Rdestroy(&rbuf_reg[j]);
+ CHECK(ret, FAIL, "H5Rdestroy");
+ }
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(wbuf_reg);
+ HDfree(rbuf_reg);
+ HDfree(wbuf_deprec);
+ HDfree(rbuf_deprec);
+ HDfree(wbuf_reg_deprec);
+ HDfree(rbuf_reg_deprec);
+ HDfree(tbuf);
+ HDfree(ibuf);
+ HDfree(obuf);
+} /* test_reference_perf() */
+
+/****************************************************************
+**
+** test_reference(): Main H5R reference testing routine.
+**
+****************************************************************/
+void
+test_reference(void)
+{
+ H5F_libver_t low, high; /* Low and high bounds */
+ const char *env_h5_drvr; /* File Driver value from environment */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing References\n"));
+
+ /* Get the VFD to use */
+ env_h5_drvr = HDgetenv(HDF5_DRIVER);
+ if (env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+
+ test_reference_params(); /* Test for correct parameter checking */
+ test_reference_obj(); /* Test basic H5R object reference code */
+ test_reference_vlen_obj(); /* Test reference within vlen */
+ test_reference_cmpnd_obj(); /* Test reference within compound type */
+
+ /* Loop through all the combinations of low/high version bounds */
+ for (low = H5F_LIBVER_EARLIEST; low < H5F_LIBVER_NBOUNDS; low++) {
+ for (high = H5F_LIBVER_EARLIEST; high < H5F_LIBVER_NBOUNDS; high++) {
+
+ /* Invalid combinations, just continue */
+ if (high == H5F_LIBVER_EARLIEST || high < low)
+ continue;
+
+ test_reference_region(low, high); /* Test basic H5R dataset region reference code */
+ test_reference_region_1D(low, high); /* Test H5R dataset region reference code for 1-D datasets */
+
+ } /* end high bound */
+ } /* end low bound */
+
+ /* The following test is currently broken with the Direct VFD */
+ if (HDstrcmp(env_h5_drvr, "direct") != 0) {
+ test_reference_obj_deleted(); /* Test H5R object reference code for deleted objects */
+ }
+
+ test_reference_group(); /* Test operations on dereferenced groups */
+ test_reference_attr(); /* Test attribute references */
+ test_reference_external(); /* Test external references */
+#if 0
+ test_reference_compat_conv(); /* Test operations with old types */
+#endif
+
+ test_reference_perf();
+
+} /* test_reference() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_reference
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Quincey Koziol
+ * September 8, 1998
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_reference(void)
+{
+ H5Fdelete(FILE_REF_PARAM, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_OBJ, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_VL_OBJ, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_CMPND_OBJ, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_REG, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_REG_1D, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_OBJ_DEL, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_GRP, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_ATTR, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_EXT1, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_EXT2, H5P_DEFAULT);
+ H5Fdelete(FILE_REF_COMPAT, H5P_DEFAULT);
+}
diff --git a/test/API/tselect.c b/test/API/tselect.c
new file mode 100644
index 0000000..a2f377d
--- /dev/null
+++ b/test/API/tselect.c
@@ -0,0 +1,16314 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tselect
+ *
+ * Test the Dataspace selection functionality
+ *
+ *************************************************************/
+
+#define H5S_FRIEND /*suppress error about including H5Spkg */
+
+/* Define this macro to indicate that the testing APIs should be available */
+#define H5S_TESTING
+
+#include "testhdf5.h"
+#include "hdf5.h"
+/* #include "H5Spkg.h" */ /* Dataspaces */
+
+#define FILENAME "tselect.h5"
+
+/* 3-D dataset with fixed dimensions */
+#define SPACE1_NAME "Space1"
+#define SPACE1_RANK 3
+#define SPACE1_DIM1 3
+#define SPACE1_DIM2 15
+#define SPACE1_DIM3 13
+
+/* 2-D dataset with fixed dimensions */
+#define SPACE2_NAME "Space2"
+#define SPACE2_RANK 2
+#define SPACE2_DIM1 30
+#define SPACE2_DIM2 26
+#define SPACE2A_RANK 1
+#define SPACE2A_DIM1 (SPACE2_DIM1 * SPACE2_DIM2)
+
+/* 2-D dataset with fixed dimensions */
+#define SPACE3_NAME "Space3"
+#define SPACE3_RANK 2
+#define SPACE3_DIM1 15
+#define SPACE3_DIM2 26
+
+/* 3-D dataset with fixed dimensions */
+#define SPACE4_NAME "Space4"
+#define SPACE4_RANK 3
+#define SPACE4_DIM1 11
+#define SPACE4_DIM2 13
+#define SPACE4_DIM3 17
+
+/* Number of random hyperslabs to test */
+#define NHYPERSLABS 10
+
+/* Number of random hyperslab tests performed */
+#define NRAND_HYPER 100
+
+/* 5-D dataset with fixed dimensions */
+#define SPACE5_NAME "Space5"
+#define SPACE5_RANK 5
+#define SPACE5_DIM1 10
+#define SPACE5_DIM2 10
+#define SPACE5_DIM3 10
+#define SPACE5_DIM4 10
+#define SPACE5_DIM5 10
+
+/* 1-D dataset with same size as 5-D dataset */
+#define SPACE6_RANK 1
+#define SPACE6_DIM1 (SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5)
+
+/* 2-D dataset with easy dimension sizes */
+#define SPACE7_NAME "Space7"
+#define SPACE7_RANK 2
+#define SPACE7_DIM1 10
+#define SPACE7_DIM2 10
+#define SPACE7_FILL 254
+#define SPACE7_CHUNK_DIM1 5
+#define SPACE7_CHUNK_DIM2 5
+#define SPACE7_NPOINTS 8
+
+/* 4-D dataset with fixed dimensions */
+#define SPACE8_NAME "Space8"
+#define SPACE8_RANK 4
+#define SPACE8_DIM1 11
+#define SPACE8_DIM2 13
+#define SPACE8_DIM3 17
+#define SPACE8_DIM4 19
+
+/* Another 2-D dataset with easy dimension sizes */
+#define SPACE9_RANK 2
+#define SPACE9_DIM1 12
+#define SPACE9_DIM2 12
+
+/* Element selection information */
+#define POINT1_NPOINTS 10
+
+/* Chunked dataset information */
+#define DATASETNAME "ChunkArray"
+#define NX_SUB 87 /* hyperslab dimensions */
+#define NY_SUB 61
+#define NZ_SUB 181
+#define NX 87 /* output buffer dimensions */
+#define NY 61
+#define NZ 181
+#define RANK_F 3 /* File dataspace rank */
+#define RANK_M 3 /* Memory dataspace rank */
+#define X 87 /* dataset dimensions */
+#define Y 61
+#define Z 181
+#define CHUNK_X 87 /* chunk dimensions */
+#define CHUNK_Y 61
+#define CHUNK_Z 181
+
+/* Basic chunk size */
+#define SPACE10_DIM1 180
+#define SPACE10_CHUNK_SIZE 12
+
+/* Information for bounds checking test */
+#define SPACE11_RANK 2
+#define SPACE11_DIM1 100
+#define SPACE11_DIM2 100
+#define SPACE11_NPOINTS 4
+
+/* Information for offsets w/chunks test #2 */
+#define SPACE12_RANK 1
+#define SPACE12_DIM0 25
+#define SPACE12_CHUNK_DIM0 5
+
+/* Information for Space rebuild test */
+#define SPACERE1_RANK 1
+#define SPACERE1_DIM0 20
+#define SPACERE2_RANK 2
+#define SPACERE2_DIM0 8
+#define SPACERE2_DIM1 12
+#define SPACERE3_RANK 3
+#define SPACERE3_DIM0 8
+#define SPACERE3_DIM1 12
+#define SPACERE3_DIM2 8
+#define SPACERE4_RANK 4
+#define SPACERE4_DIM0 8
+#define SPACERE4_DIM1 12
+#define SPACERE4_DIM2 8
+#define SPACERE4_DIM3 12
+#define SPACERE5_RANK 5
+#define SPACERE5_DIM0 8
+#define SPACERE5_DIM1 12
+#define SPACERE5_DIM2 8
+#define SPACERE5_DIM3 12
+#define SPACERE5_DIM4 8
+
+/* Information for Space update diminfo test */
+#define SPACEUD1_DIM0 20
+#define SPACEUD3_DIM0 9
+#define SPACEUD3_DIM1 12
+#define SPACEUD3_DIM2 13
+
+/* #defines for shape same / different rank tests */
+#define SS_DR_MAX_RANK 5
+
+/* Information for regular hyperslab query test */
+#define SPACE13_RANK 3
+#define SPACE13_DIM1 50
+#define SPACE13_DIM2 50
+#define SPACE13_DIM3 50
+#define SPACE13_NPOINTS 4
+
+/* Information for testing selection iterators */
+#define SEL_ITER_MAX_SEQ 256
+
+/* Defines for test_hyper_io_1d() */
+#define DNAME "DSET_1D"
+#define RANK 1
+#define NUMCHUNKS 3
+#define CHUNKSZ 20
+#define NUM_ELEMENTS NUMCHUNKS *CHUNKSZ
+
+/* Location comparison function */
+static int compare_size_t(const void *s1, const void *s2);
+
+static herr_t test_select_hyper_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point,
+ void *operator_data);
+static herr_t test_select_point_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point,
+ void *operator_data);
+static herr_t test_select_all_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point,
+ void *operator_data);
+static herr_t test_select_none_iter1(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point,
+ void *operator_data);
+static herr_t test_select_hyper_iter2(void *_elem, hid_t type_id, unsigned ndim, const hsize_t *point,
+ void *_operator_data);
+static herr_t test_select_hyper_iter3(void *elem, hid_t type_id, unsigned ndim, const hsize_t *point,
+ void *operator_data);
+
+/****************************************************************
+**
+** test_select_hyper_iter1(): Iterator for checking hyperslab iteration
+**
+****************************************************************/
+static herr_t
+test_select_hyper_iter1(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim,
+ const hsize_t H5_ATTR_UNUSED *point, void *_operator_data)
+{
+ uint8_t *tbuf = (uint8_t *)_elem, /* temporary buffer pointer */
+ **tbuf2 = (uint8_t **)_operator_data; /* temporary buffer handle */
+
+ if (*tbuf != **tbuf2)
+ return (-1);
+ else {
+ (*tbuf2)++;
+ return (0);
+ }
+} /* end test_select_hyper_iter1() */
+
+/****************************************************************
+**
+** test_select_hyper(): Test basic H5S (dataspace) selection code.
+** Tests hyperslabs of various sizes and dimensionalities.
+**
+****************************************************************/
+static void
+test_select_hyper(hid_t xfer_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ H5S_class_t ext_type; /* Extent type */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Verify extent type */
+ ext_type = H5Sget_simple_extent_type(sid1);
+ VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type");
+
+ /* Test selecting stride==0 to verify failure */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 0;
+ stride[1] = 0;
+ stride[2] = 0;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Test selecting stride<block to verify failure */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 2;
+ block[1] = 2;
+ block[2] = 2;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 2x15x13 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 15x26 hyperslab for memory dataset */
+ start[0] = 15;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Exercise checks for NULL buffer and valid selection */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dwrite");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, xfer_plist, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 0x26 hyperslab to OR into current selection (should be a NOOP) */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 0;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Exercise checks for NULL buffer and valid selection */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dread");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, xfer_plist, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dread");
+
+ /* Check that the values match with a dataset iterator */
+ tbuf = wbuf + (15 * SPACE2_DIM2);
+ ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_hyper_iter1, &tbuf);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper() */
+
+struct pnt_iter {
+ hsize_t coord[POINT1_NPOINTS * 2][SPACE2_RANK]; /* Coordinates for point selection */
+ uint8_t *buf; /* Buffer the points are in */
+ int offset; /* Which point we are looking at */
+};
+
+/****************************************************************
+**
+** test_select_point_iter1(): Iterator for checking point iteration
+** (This is really ugly code, not a very good example of correct usage - QAK)
+**
+****************************************************************/
+static herr_t
+test_select_point_iter1(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim,
+ const hsize_t H5_ATTR_UNUSED *point, void *_operator_data)
+{
+ uint8_t *elem = (uint8_t *)_elem; /* Pointer to the element to examine */
+ uint8_t *tmp; /* temporary ptr to element in operator data */
+ struct pnt_iter *pnt_info = (struct pnt_iter *)_operator_data;
+
+ tmp = pnt_info->buf + (pnt_info->coord[pnt_info->offset][0] * SPACE2_DIM2) +
+ pnt_info->coord[pnt_info->offset][1];
+ if (*elem != *tmp)
+ return (-1);
+ else {
+ pnt_info->offset++;
+ return (0);
+ }
+} /* end test_select_point_iter1() */
+
+/****************************************************************
+**
+** test_select_point(): Test basic H5S (dataspace) selection code.
+** Tests element selections between dataspaces of various sizes
+** and dimensionalities.
+**
+****************************************************************/
+static void
+test_select_point(hid_t xfer_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */
+ hsize_t temp_coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */
+ hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */
+ hsize_t temp_coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */
+ hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */
+ hsize_t temp_coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ struct pnt_iter pi; /* Custom Pointer iterator struct */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Element Selection Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for write buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for disk dataset */
+ coord1[0][0] = 0;
+ coord1[0][1] = 10;
+ coord1[0][2] = 5;
+ coord1[1][0] = 1;
+ coord1[1][1] = 2;
+ coord1[1][2] = 7;
+ coord1[2][0] = 2;
+ coord1[2][1] = 4;
+ coord1[2][2] = 9;
+ coord1[3][0] = 0;
+ coord1[3][1] = 6;
+ coord1[3][2] = 11;
+ coord1[4][0] = 1;
+ coord1[4][1] = 8;
+ coord1[4][2] = 13;
+ coord1[5][0] = 2;
+ coord1[5][1] = 12;
+ coord1[5][2] = 0;
+ coord1[6][0] = 0;
+ coord1[6][1] = 14;
+ coord1[6][2] = 2;
+ coord1[7][0] = 1;
+ coord1[7][1] = 0;
+ coord1[7][2] = 4;
+ coord1[8][0] = 2;
+ coord1[8][1] = 1;
+ coord1[8][2] = 6;
+ coord1[9][0] = 0;
+ coord1[9][1] = 3;
+ coord1[9][2] = 8;
+ ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Verify correct elements selected */
+ H5Sget_select_elem_pointlist(sid1, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord1);
+ for (i = 0; i < POINT1_NPOINTS; i++) {
+ VERIFY(temp_coord1[i][0], coord1[i][0], "H5Sget_select_elem_pointlist");
+ VERIFY(temp_coord1[i][1], coord1[i][1], "H5Sget_select_elem_pointlist");
+ VERIFY(temp_coord1[i][2], coord1[i][2], "H5Sget_select_elem_pointlist");
+ } /* end for */
+
+ ret = (int)H5Sget_select_npoints(sid1);
+ VERIFY(ret, 10, "H5Sget_select_npoints");
+
+ /* Append another sequence of ten points to disk dataset */
+ coord1[0][0] = 0;
+ coord1[0][1] = 2;
+ coord1[0][2] = 0;
+ coord1[1][0] = 1;
+ coord1[1][1] = 10;
+ coord1[1][2] = 8;
+ coord1[2][0] = 2;
+ coord1[2][1] = 8;
+ coord1[2][2] = 10;
+ coord1[3][0] = 0;
+ coord1[3][1] = 7;
+ coord1[3][2] = 12;
+ coord1[4][0] = 1;
+ coord1[4][1] = 3;
+ coord1[4][2] = 11;
+ coord1[5][0] = 2;
+ coord1[5][1] = 1;
+ coord1[5][2] = 1;
+ coord1[6][0] = 0;
+ coord1[6][1] = 13;
+ coord1[6][2] = 7;
+ coord1[7][0] = 1;
+ coord1[7][1] = 14;
+ coord1[7][2] = 6;
+ coord1[8][0] = 2;
+ coord1[8][1] = 2;
+ coord1[8][2] = 5;
+ coord1[9][0] = 0;
+ coord1[9][1] = 6;
+ coord1[9][2] = 13;
+ ret = H5Sselect_elements(sid1, H5S_SELECT_APPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Verify correct elements selected */
+ H5Sget_select_elem_pointlist(sid1, (hsize_t)POINT1_NPOINTS, (hsize_t)POINT1_NPOINTS,
+ (hsize_t *)temp_coord1);
+ for (i = 0; i < POINT1_NPOINTS; i++) {
+ VERIFY(temp_coord1[i][0], coord1[i][0], "H5Sget_select_elem_pointlist");
+ VERIFY(temp_coord1[i][1], coord1[i][1], "H5Sget_select_elem_pointlist");
+ VERIFY(temp_coord1[i][2], coord1[i][2], "H5Sget_select_elem_pointlist");
+ } /* end for */
+
+ ret = (int)H5Sget_select_npoints(sid1);
+ VERIFY(ret, 20, "H5Sget_select_npoints");
+
+ /* Select sequence of ten points for memory dataset */
+ coord2[0][0] = 12;
+ coord2[0][1] = 3;
+ coord2[1][0] = 15;
+ coord2[1][1] = 13;
+ coord2[2][0] = 7;
+ coord2[2][1] = 25;
+ coord2[3][0] = 0;
+ coord2[3][1] = 6;
+ coord2[4][0] = 13;
+ coord2[4][1] = 0;
+ coord2[5][0] = 24;
+ coord2[5][1] = 11;
+ coord2[6][0] = 12;
+ coord2[6][1] = 21;
+ coord2[7][0] = 29;
+ coord2[7][1] = 4;
+ coord2[8][0] = 8;
+ coord2[8][1] = 8;
+ coord2[9][0] = 19;
+ coord2[9][1] = 17;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Verify correct elements selected */
+ H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord2);
+ for (i = 0; i < POINT1_NPOINTS; i++) {
+ VERIFY(temp_coord2[i][0], coord2[i][0], "H5Sget_select_elem_pointlist");
+ VERIFY(temp_coord2[i][1], coord2[i][1], "H5Sget_select_elem_pointlist");
+ } /* end for */
+
+ /* Save points for later iteration */
+ /* (these are in the second half of the buffer, because we are prepending */
+ /* the next list of points to the beginning of the point selection list) */
+ HDmemcpy(((char *)pi.coord) + sizeof(coord2), coord2, sizeof(coord2));
+
+ ret = (int)H5Sget_select_npoints(sid2);
+ VERIFY(ret, 10, "H5Sget_select_npoints");
+
+ /* Append another sequence of ten points to memory dataset */
+ coord2[0][0] = 24;
+ coord2[0][1] = 0;
+ coord2[1][0] = 2;
+ coord2[1][1] = 25;
+ coord2[2][0] = 13;
+ coord2[2][1] = 17;
+ coord2[3][0] = 8;
+ coord2[3][1] = 3;
+ coord2[4][0] = 29;
+ coord2[4][1] = 4;
+ coord2[5][0] = 11;
+ coord2[5][1] = 14;
+ coord2[6][0] = 5;
+ coord2[6][1] = 22;
+ coord2[7][0] = 12;
+ coord2[7][1] = 2;
+ coord2[8][0] = 21;
+ coord2[8][1] = 12;
+ coord2[9][0] = 9;
+ coord2[9][1] = 18;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_PREPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Verify correct elements selected */
+ H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord2);
+ for (i = 0; i < POINT1_NPOINTS; i++) {
+ VERIFY(temp_coord2[i][0], coord2[i][0], "H5Sget_select_elem_pointlist");
+ VERIFY(temp_coord2[i][1], coord2[i][1], "H5Sget_select_elem_pointlist");
+ } /* end for */
+
+ ret = (int)H5Sget_select_npoints(sid2);
+ VERIFY(ret, 20, "H5Sget_select_npoints");
+
+ /* Save points for later iteration */
+ HDmemcpy(pi.coord, coord2, sizeof(coord2));
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select sequence of points for read dataset */
+ coord3[0][0] = 0;
+ coord3[0][1] = 2;
+ coord3[1][0] = 4;
+ coord3[1][1] = 8;
+ coord3[2][0] = 13;
+ coord3[2][1] = 13;
+ coord3[3][0] = 14;
+ coord3[3][1] = 20;
+ coord3[4][0] = 7;
+ coord3[4][1] = 9;
+ coord3[5][0] = 2;
+ coord3[5][1] = 0;
+ coord3[6][0] = 9;
+ coord3[6][1] = 19;
+ coord3[7][0] = 1;
+ coord3[7][1] = 22;
+ coord3[8][0] = 12;
+ coord3[8][1] = 21;
+ coord3[9][0] = 11;
+ coord3[9][1] = 6;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Verify correct elements selected */
+ H5Sget_select_elem_pointlist(sid2, (hsize_t)0, (hsize_t)POINT1_NPOINTS, (hsize_t *)temp_coord3);
+ for (i = 0; i < POINT1_NPOINTS; i++) {
+ VERIFY(temp_coord3[i][0], coord3[i][0], "H5Sget_select_elem_pointlist");
+ VERIFY(temp_coord3[i][1], coord3[i][1], "H5Sget_select_elem_pointlist");
+ } /* end for */
+
+ ret = (int)H5Sget_select_npoints(sid2);
+ VERIFY(ret, 10, "H5Sget_select_npoints");
+
+ /* Append another sequence of ten points to disk dataset */
+ coord3[0][0] = 14;
+ coord3[0][1] = 25;
+ coord3[1][0] = 0;
+ coord3[1][1] = 0;
+ coord3[2][0] = 11;
+ coord3[2][1] = 11;
+ coord3[3][0] = 5;
+ coord3[3][1] = 14;
+ coord3[4][0] = 3;
+ coord3[4][1] = 5;
+ coord3[5][0] = 2;
+ coord3[5][1] = 2;
+ coord3[6][0] = 7;
+ coord3[6][1] = 13;
+ coord3[7][0] = 9;
+ coord3[7][1] = 16;
+ coord3[8][0] = 12;
+ coord3[8][1] = 22;
+ coord3[9][0] = 13;
+ coord3[9][1] = 9;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_APPEND, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Verify correct elements selected */
+ H5Sget_select_elem_pointlist(sid2, (hsize_t)POINT1_NPOINTS, (hsize_t)POINT1_NPOINTS,
+ (hsize_t *)temp_coord3);
+ for (i = 0; i < POINT1_NPOINTS; i++) {
+ VERIFY(temp_coord3[i][0], coord3[i][0], "H5Sget_select_elem_pointlist");
+ VERIFY(temp_coord3[i][1], coord3[i][1], "H5Sget_select_elem_pointlist");
+ } /* end for */
+ ret = (int)H5Sget_select_npoints(sid2);
+ VERIFY(ret, 20, "H5Sget_select_npoints");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the values match with a dataset iterator */
+ pi.buf = wbuf;
+ pi.offset = 0;
+ ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_point_iter1, &pi);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_point() */
+
+/****************************************************************
+**
+** test_select_all_iter1(): Iterator for checking all iteration
+**
+**
+****************************************************************/
+static herr_t
+test_select_all_iter1(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim,
+ const hsize_t H5_ATTR_UNUSED *point, void *_operator_data)
+{
+ uint8_t *tbuf = (uint8_t *)_elem, /* temporary buffer pointer */
+ **tbuf2 = (uint8_t **)_operator_data; /* temporary buffer handle */
+
+ if (*tbuf != **tbuf2)
+ return (-1);
+ else {
+ (*tbuf2)++;
+ return (0);
+ }
+} /* end test_select_all_iter1() */
+
+/****************************************************************
+**
+** test_select_none_iter1(): Iterator for checking none iteration
+** (This is never supposed to be called, so it always returns -1)
+**
+****************************************************************/
+static herr_t
+test_select_none_iter1(void H5_ATTR_UNUSED *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned H5_ATTR_UNUSED ndim,
+ const hsize_t H5_ATTR_UNUSED *point, void H5_ATTR_UNUSED *_operator_data)
+{
+ return (-1);
+} /* end test_select_none_iter1() */
+
+/****************************************************************
+**
+** test_select_all(): Test basic H5S (dataspace) selection code.
+** Tests "all" selections.
+**
+****************************************************************/
+static void
+test_select_all(hid_t xfer_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3};
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j, k; /* Counters */
+ herr_t ret; /* Generic return value */
+ H5S_class_t ext_type; /* Extent type */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 'All' Selection Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE4_DIM1; i++)
+ for (j = 0; j < SPACE4_DIM2; j++)
+ for (k = 0; k < SPACE4_DIM3; k++)
+ *tbuf++ = (uint8_t)((((i * SPACE4_DIM2) + j) * SPACE4_DIM3) + k);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE4_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Verify extent type */
+ ext_type = H5Sget_simple_extent_type(sid1);
+ VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE4_NAME, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, xfer_plist, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, xfer_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the values match with a dataset iterator */
+ tbuf = wbuf;
+ ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid1, test_select_all_iter1, &tbuf);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_all() */
+
+/****************************************************************
+**
+** test_select_all_hyper(): Test basic H5S (dataspace) selection code.
+** Tests "all" and hyperslab selections.
+**
+****************************************************************/
+static void
+test_select_all_hyper(hid_t xfer_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ H5S_class_t ext_type; /* Extent type */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing 'All' Selection Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE3_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Verify extent type */
+ ext_type = H5Sget_simple_extent_type(sid1);
+ VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type");
+
+ /* Select entire 15x26 extent for disk dataset */
+ ret = H5Sselect_all(sid1);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ /* Select 15x26 hyperslab for memory dataset */
+ start[0] = 15;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE3_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select no extent for disk dataset */
+ ret = H5Sselect_none(sid1);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Read selection from disk (should fail with no selection defined) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Dread");
+
+ /* Select entire 15x26 extent for disk dataset */
+ ret = H5Sselect_all(sid1);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ /* Read selection from disk (should work now) */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the values match with a dataset iterator */
+ tbuf = wbuf + (15 * SPACE2_DIM2);
+ ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_all_iter1, &tbuf);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* A quick check to make certain that iterating through a "none" selection works */
+ ret = H5Sselect_none(sid2);
+ CHECK(ret, FAIL, "H5Sselect_none");
+ ret = H5Diterate(rbuf, H5T_NATIVE_UCHAR, sid2, test_select_none_iter1, &tbuf);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_all_hyper() */
+
+/****************************************************************
+**
+** test_select_combo(): Test basic H5S (dataspace) selection code.
+** Tests combinations of element and hyperslab selections between
+** dataspaces of various sizes and dimensionalities.
+**
+****************************************************************/
+static void
+test_select_combo(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Combination of Hyperslab & Element Selection Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for write buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for disk dataset */
+ coord1[0][0] = 0;
+ coord1[0][1] = 10;
+ coord1[0][2] = 5;
+ coord1[1][0] = 1;
+ coord1[1][1] = 2;
+ coord1[1][2] = 7;
+ coord1[2][0] = 2;
+ coord1[2][1] = 4;
+ coord1[2][2] = 9;
+ coord1[3][0] = 0;
+ coord1[3][1] = 6;
+ coord1[3][2] = 11;
+ coord1[4][0] = 1;
+ coord1[4][1] = 8;
+ coord1[4][2] = 13;
+ coord1[5][0] = 2;
+ coord1[5][1] = 12;
+ coord1[5][2] = 0;
+ coord1[6][0] = 0;
+ coord1[6][1] = 14;
+ coord1[6][2] = 2;
+ coord1[7][0] = 1;
+ coord1[7][1] = 0;
+ coord1[7][2] = 4;
+ coord1[8][0] = 2;
+ coord1[8][1] = 1;
+ coord1[8][2] = 6;
+ coord1[9][0] = 0;
+ coord1[9][1] = 3;
+ coord1[9][2] = 8;
+ ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Select 1x10 hyperslab for writing memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 10x1 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0; i < POINT1_NPOINTS; i++) {
+ tbuf = wbuf + i;
+ tbuf2 = rbuf + (i * SPACE3_DIM2);
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("element values don't match!, i=%d\n", i);
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_combo() */
+
+static int
+compare_size_t(const void *s1, const void *s2)
+{
+ if (*(const size_t *)s1 < *(const size_t *)s2)
+ return (-1);
+ else if (*(const size_t *)s1 > *(const size_t *)s2)
+ return (1);
+ else
+ return (0);
+}
+
+/****************************************************************
+**
+** test_select_hyper_stride(): Test H5S (dataspace) selection code.
+** Tests strided hyperslabs of various sizes and dimensionalities.
+**
+****************************************************************/
+static void
+test_select_hyper_stride(hid_t xfer_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ uint16_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ size_t loc1[72] = {
+ /* Gruesomely ugly way to make certain hyperslab locations are checked correctly */
+ 27, 28, 29, 53, 54, 55, 79, 80, 81, /* Block #1 */
+ 32, 33, 34, 58, 59, 60, 84, 85, 86, /* Block #2 */
+ 157, 158, 159, 183, 184, 185, 209, 210, 211, /* Block #3 */
+ 162, 163, 164, 188, 189, 190, 214, 215, 216, /* Block #4 */
+ 287, 288, 289, 313, 314, 315, 339, 340, 341, /* Block #5 */
+ 292, 293, 294, 318, 319, 320, 344, 345, 346, /* Block #6 */
+ 417, 418, 419, 443, 444, 445, 469, 470, 471, /* Block #7 */
+ 422, 423, 424, 448, 449, 450, 474, 475, 476, /* Block #8 */
+ };
+ size_t loc2[72] = {
+ 0, 1, 2, 26, 27, 28, /* Block #1 */
+ 4, 5, 6, 30, 31, 32, /* Block #2 */
+ 8, 9, 10, 34, 35, 36, /* Block #3 */
+ 12, 13, 14, 38, 39, 40, /* Block #4 */
+ 104, 105, 106, 130, 131, 132, /* Block #5 */
+ 108, 109, 110, 134, 135, 136, /* Block #6 */
+ 112, 113, 114, 138, 139, 140, /* Block #7 */
+ 116, 117, 118, 142, 143, 144, /* Block #8 */
+ 208, 209, 210, 234, 235, 236, /* Block #9 */
+ 212, 213, 214, 238, 239, 240, /* Block #10 */
+ 216, 217, 218, 242, 243, 244, /* Block #11 */
+ 220, 221, 222, 246, 247, 248, /* Block #12 */
+ };
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 2x3x3 count with a stride of 2x4x3 & 1x2x2 block hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 2;
+ stride[1] = 4;
+ stride[2] = 3;
+ count[0] = 2;
+ count[1] = 3;
+ count[2] = 3;
+ block[0] = 1;
+ block[1] = 2;
+ block[2] = 2;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 4x2 count with a stride of 5x5 & 3x3 block hyperslab for memory dataset */
+ start[0] = 1;
+ start[1] = 1;
+ stride[0] = 5;
+ stride[1] = 5;
+ count[0] = 4;
+ count[1] = 2;
+ block[0] = 3;
+ block[1] = 3;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 3x4 count with a stride of 4x4 & 2x3 block hyperslab for memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 4;
+ stride[1] = 4;
+ count[0] = 3;
+ count[1] = 4;
+ block[0] = 2;
+ block[1] = 3;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Sort the locations into the proper order */
+ HDqsort(loc1, (size_t)72, sizeof(size_t), compare_size_t);
+ HDqsort(loc2, (size_t)72, sizeof(size_t), compare_size_t);
+ /* Compare data read with data written out */
+ for (i = 0; i < 72; i++) {
+ tbuf = wbuf + loc1[i];
+ tbuf2 = rbuf + loc2[i];
+ if (*tbuf != *tbuf2) {
+ HDprintf("%d: hyperslab values don't match!, loc1[%d]=%d, loc2[%d]=%d\n", __LINE__, i,
+ (int)loc1[i], i, (int)loc2[i]);
+ HDprintf("wbuf=%p, tbuf=%p, rbuf=%p, tbuf2=%p\n", (void *)wbuf, (void *)tbuf, (void *)rbuf,
+ (void *)tbuf2);
+ TestErrPrintf("*tbuf=%u, *tbuf2=%u\n", (unsigned)*tbuf, (unsigned)*tbuf2);
+ } /* end if */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_stride() */
+
+/****************************************************************
+**
+** test_select_hyper_contig(): Test H5S (dataspace) selection code.
+** Tests contiguous hyperslabs of various sizes and dimensionalities.
+**
+****************************************************************/
+static void
+test_select_hyper_contig(hid_t dset_type, hid_t xfer_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims2[] = {SPACE2_DIM2, SPACE2_DIM1};
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ uint16_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Contiguous Hyperslabs Functionality\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 12x10 count with a stride of 1x3 & 3x3 block hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 3;
+ count[0] = 12;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 3;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 4x5 count with a stride of 3x6 & 3x6 block hyperslab for memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 3;
+ stride[1] = 6;
+ count[0] = 4;
+ count[1] = 5;
+ block[0] = 3;
+ block[1] = 6;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE2_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 6x5 count with a stride of 2x6 & 2x6 block hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 6;
+ count[0] = 6;
+ count[1] = 5;
+ block[0] = 2;
+ block[1] = 6;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 3x15 count with a stride of 4x2 & 4x2 block hyperslab for memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 4;
+ stride[1] = 2;
+ count[0] = 3;
+ count[1] = 15;
+ block[0] = 4;
+ block[1] = 2;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ if (HDmemcmp(rbuf, wbuf, sizeof(uint16_t) * 30 * 12) != 0)
+ TestErrPrintf("hyperslab values don't match! Line=%d\n", __LINE__);
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_contig() */
+
+/****************************************************************
+**
+** test_select_hyper_contig2(): Test H5S (dataspace) selection code.
+** Tests more contiguous hyperslabs of various sizes and dimensionalities.
+**
+****************************************************************/
+static void
+test_select_hyper_contig2(hid_t dset_type, hid_t xfer_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims2[] = {SPACE8_DIM4, SPACE8_DIM3, SPACE8_DIM2, SPACE8_DIM1};
+ hsize_t start[SPACE8_RANK]; /* Starting location of hyperslab */
+ hsize_t count[SPACE8_RANK]; /* Element count of hyperslab */
+ uint16_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j, k, l; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing More Contiguous Hyperslabs Functionality\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t),
+ (size_t)(SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE8_DIM1; i++)
+ for (j = 0; j < SPACE8_DIM2; j++)
+ for (k = 0; k < SPACE8_DIM3; k++)
+ for (l = 0; l < SPACE8_DIM4; l++)
+ *tbuf++ = (uint16_t)((i * SPACE8_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE8_RANK, dims2, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select contiguous hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ start[3] = 0;
+ count[0] = 2;
+ count[1] = SPACE8_DIM3;
+ count[2] = SPACE8_DIM2;
+ count[3] = SPACE8_DIM1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select contiguous hyperslab in memory */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ start[3] = 0;
+ count[0] = 2;
+ count[1] = SPACE8_DIM3;
+ count[2] = SPACE8_DIM2;
+ count[3] = SPACE8_DIM1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE8_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select contiguous hyperslab in memory */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ start[3] = 0;
+ count[0] = 2;
+ count[1] = SPACE8_DIM3;
+ count[2] = SPACE8_DIM2;
+ count[3] = SPACE8_DIM1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select contiguous hyperslab in memory */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ start[3] = 0;
+ count[0] = 2;
+ count[1] = SPACE8_DIM3;
+ count[2] = SPACE8_DIM2;
+ count[3] = SPACE8_DIM1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ if (HDmemcmp(rbuf, wbuf, sizeof(uint16_t) * 2 * SPACE8_DIM3 * SPACE8_DIM2 * SPACE8_DIM1) != 0)
+ TestErrPrintf("Error: hyperslab values don't match!\n");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_contig2() */
+
+/****************************************************************
+**
+** test_select_hyper_contig3(): Test H5S (dataspace) selection code.
+** Tests contiguous hyperslabs of various sizes and dimensionalities.
+** This test uses a hyperslab that is contiguous in the lowest dimension,
+** not contiguous in a dimension, then has a selection across the entire next
+** dimension (which should be "flattened" out also).
+**
+****************************************************************/
+static void
+test_select_hyper_contig3(hid_t dset_type, hid_t xfer_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims2[] = {SPACE8_DIM4, SPACE8_DIM3, SPACE8_DIM2, SPACE8_DIM1};
+ hsize_t start[SPACE8_RANK]; /* Starting location of hyperslab */
+ hsize_t count[SPACE8_RANK]; /* Element count of hyperslab */
+ uint16_t *wbuf, /* Buffer to write to disk */
+ *rbuf, /* Buffer read from disk */
+ *tbuf, *tbuf2; /* Temporary buffer pointers */
+ unsigned i, j, k, l; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Yet More Contiguous Hyperslabs Functionality\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t),
+ (size_t)(SPACE8_DIM1 * SPACE8_DIM2 * SPACE8_DIM3 * SPACE8_DIM4));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE8_DIM4; i++)
+ for (j = 0; j < SPACE8_DIM3; j++)
+ for (k = 0; k < SPACE8_DIM2; k++)
+ for (l = 0; l < SPACE8_DIM1; l++)
+ *tbuf++ = (uint16_t)((k * SPACE8_DIM2) + l);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE8_RANK, dims2, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select semi-contiguous hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = SPACE8_DIM2 / 2;
+ start[3] = 0;
+ count[0] = 2;
+ count[1] = SPACE8_DIM3;
+ count[2] = SPACE8_DIM2 / 2;
+ count[3] = SPACE8_DIM1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select semi-contiguous hyperslab in memory */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = SPACE8_DIM2 / 2;
+ start[3] = 0;
+ count[0] = 2;
+ count[1] = SPACE8_DIM3;
+ count[2] = SPACE8_DIM2 / 2;
+ count[3] = SPACE8_DIM1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE8_NAME, dset_type, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE8_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select semi-contiguous hyperslab in memory */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = SPACE8_DIM2 / 2;
+ start[3] = 0;
+ count[0] = 2;
+ count[1] = SPACE8_DIM3;
+ count[2] = SPACE8_DIM2 / 2;
+ count[3] = SPACE8_DIM1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select semi-contiguous hyperslab in memory */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = SPACE8_DIM2 / 2;
+ start[3] = 0;
+ count[0] = 2;
+ count[1] = SPACE8_DIM3;
+ count[2] = SPACE8_DIM2 / 2;
+ count[3] = SPACE8_DIM1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, xfer_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0, tbuf = wbuf, tbuf2 = rbuf; i < SPACE8_DIM4; i++)
+ for (j = 0; j < SPACE8_DIM3; j++)
+ for (k = 0; k < SPACE8_DIM2; k++)
+ for (l = 0; l < SPACE8_DIM1; l++, tbuf++, tbuf2++)
+ if ((i >= start[0] && i < (start[0] + count[0])) &&
+ (j >= start[1] && j < (start[1] + count[1])) &&
+ (k >= start[2] && k < (start[2] + count[2])) &&
+ (l >= start[3] && l < (start[3] + count[3]))) {
+ if (*tbuf != *tbuf2) {
+ HDprintf("Error: hyperslab values don't match!\n");
+ TestErrPrintf("Line: %d, i=%u, j=%u, k=%u, l=%u, *tbuf=%u,*tbuf2=%u\n", __LINE__,
+ i, j, k, l, (unsigned)*tbuf, (unsigned)*tbuf2);
+ } /* end if */
+ } /* end if */
+ else {
+ if (*tbuf2 != 0) {
+ HDprintf("Error: invalid data in read buffer!\n");
+ TestErrPrintf("Line: %d, i=%u, j=%u, k=%u, l=%u, *tbuf=%u,*tbuf2=%u\n", __LINE__,
+ i, j, k, l, (unsigned)*tbuf, (unsigned)*tbuf2);
+ } /* end if */
+ } /* end else */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_contig3() */
+
+#if 0
+/****************************************************************
+**
+** verify_select_hyper_contig_dr__run_test(): Verify data from
+** test_select_hyper_contig_dr__run_test()
+**
+****************************************************************/
+static void
+verify_select_hyper_contig_dr__run_test(const uint16_t *cube_buf, size_t cube_size,
+ unsigned edge_size, unsigned cube_rank)
+{
+ const uint16_t *cube_ptr; /* Pointer into the cube buffer */
+ uint16_t expected_value; /* Expected value in dataset */
+ unsigned i, j, k, l, m; /* Local index variables */
+ size_t s; /* Local index variable */
+ hbool_t mis_match; /* Flag to indicate mismatch in expected value */
+
+ HDassert(cube_buf);
+ HDassert(cube_size > 0);
+
+ expected_value = 0;
+ mis_match = FALSE;
+ cube_ptr = cube_buf;
+ s = 0;
+ i = 0;
+ do {
+ j = 0;
+ do {
+ k = 0;
+ do {
+ l = 0;
+ do {
+ m = 0;
+ do {
+ /* Sanity check */
+ HDassert(s < cube_size);
+
+ /* Check for correct value */
+ if (*cube_ptr != expected_value)
+ mis_match = TRUE;
+
+ /* Advance to next element */
+ cube_ptr++;
+ expected_value++;
+ s++;
+ m++;
+ } while ((cube_rank > 0) && (m < edge_size));
+ l++;
+ } while ((cube_rank > 1) && (l < edge_size));
+ k++;
+ } while ((cube_rank > 2) && (k < edge_size));
+ j++;
+ } while ((cube_rank > 3) && (j < edge_size));
+ i++;
+ } while ((cube_rank > 4) && (i < edge_size));
+ if (mis_match)
+ TestErrPrintf("Initial cube data don't match! Line = %d\n", __LINE__);
+} /* verify_select_hyper_contig_dr__run_test() */
+#endif
+#if 0
+
+/****************************************************************
+**
+** test_select_hyper_contig_dr__run_test(): Test H5S (dataspace)
+** selection code with contiguous source and target having
+** different ranks but the same shape. We have already
+** tested H5Sselect_shape_same in isolation, so now we try to do
+** I/O.
+**
+****************************************************************/
+static void
+test_select_hyper_contig_dr__run_test(int test_num, const uint16_t *cube_buf, const uint16_t *zero_buf,
+ unsigned edge_size, unsigned chunk_edge_size, unsigned small_rank,
+ unsigned large_rank, hid_t dset_type, hid_t xfer_plist)
+{
+ hbool_t mis_match; /* Flag indicating a value read in wasn't what was expected */
+ hid_t fapl; /* File access property list */
+ hid_t fid1; /* File ID */
+ hid_t small_cube_sid; /* Dataspace ID for small cube in memory & file */
+ hid_t mem_large_cube_sid; /* Dataspace ID for large cube in memory */
+ hid_t file_large_cube_sid; /* Dataspace ID for large cube in file */
+ hid_t small_cube_dcpl_id = H5P_DEFAULT; /* DCPL for small cube dataset */
+ hid_t large_cube_dcpl_id = H5P_DEFAULT; /* DCPL for large cube dataset */
+ hid_t small_cube_dataset; /* Dataset ID */
+ hid_t large_cube_dataset; /* Dataset ID */
+ size_t start_index; /* Offset within buffer to begin inspecting */
+ size_t stop_index; /* Offset within buffer to end inspecting */
+ uint16_t expected_value; /* Expected value in dataset */
+ uint16_t *small_cube_buf_1; /* Buffer for small cube data */
+ uint16_t *large_cube_buf_1; /* Buffer for large cube data */
+ uint16_t *ptr_1; /* Temporary pointer into cube data */
+ hsize_t dims[SS_DR_MAX_RANK]; /* Dataspace dimensions */
+ hsize_t start[SS_DR_MAX_RANK]; /* Shared hyperslab start offset */
+ hsize_t stride[SS_DR_MAX_RANK]; /* Shared hyperslab stride */
+ hsize_t count[SS_DR_MAX_RANK]; /* Shared hyperslab count */
+ hsize_t block[SS_DR_MAX_RANK]; /* Shared hyperslab block size */
+ hsize_t *start_ptr; /* Actual hyperslab start offset */
+ hsize_t *stride_ptr; /* Actual hyperslab stride */
+ hsize_t *count_ptr; /* Actual hyperslab count */
+ hsize_t *block_ptr; /* Actual hyperslab block size */
+ size_t small_cube_size; /* Number of elements in small cube */
+ size_t large_cube_size; /* Number of elements in large cube */
+ unsigned u, v, w, x; /* Local index variables */
+ size_t s; /* Local index variable */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, ("\tn-cube slice through m-cube I/O test %d.\n", test_num));
+ MESSAGE(7, ("\tranks = %u/%u, edge_size = %u, chunk_edge_size = %u.\n", small_rank, large_rank, edge_size,
+ chunk_edge_size));
+
+ HDassert(edge_size >= 6);
+ HDassert(edge_size >= chunk_edge_size);
+ HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3));
+ HDassert(small_rank > 0);
+ HDassert(small_rank < large_rank);
+ HDassert(large_rank <= SS_DR_MAX_RANK);
+
+ /* Compute cube sizes */
+ small_cube_size = large_cube_size = (size_t)1;
+ for (u = 0; u < large_rank; u++) {
+ if (u < small_rank)
+ small_cube_size *= (size_t)edge_size;
+
+ large_cube_size *= (size_t)edge_size;
+ } /* end for */
+
+ HDassert(large_cube_size < (size_t)UINT_MAX);
+
+ /* set up the start, stride, count, and block pointers */
+ start_ptr = &(start[SS_DR_MAX_RANK - large_rank]);
+ stride_ptr = &(stride[SS_DR_MAX_RANK - large_rank]);
+ count_ptr = &(count[SS_DR_MAX_RANK - large_rank]);
+ block_ptr = &(block[SS_DR_MAX_RANK - large_rank]);
+
+ /* Allocate buffers */
+ small_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), small_cube_size);
+ CHECK_PTR(small_cube_buf_1, "HDcalloc");
+ large_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), large_cube_size);
+ CHECK_PTR(large_cube_buf_1, "HDcalloc");
+
+ /* Create a dataset transfer property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Use the 'core' VFD for this test */
+ ret = H5Pset_fapl_core(fapl, (size_t)(1024 * 1024), FALSE);
+ CHECK(ret, FAIL, "H5Pset_fapl_core");
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Close file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* setup dims: */
+ dims[0] = dims[1] = dims[2] = dims[3] = dims[4] = (hsize_t)edge_size;
+
+ /* Create small cube dataspaces */
+ small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL);
+ CHECK(small_cube_sid, FAIL, "H5Screate_simple");
+
+ /* Create large cube dataspace */
+ mem_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(mem_large_cube_sid, FAIL, "H5Screate_simple");
+ file_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(file_large_cube_sid, FAIL, "H5Screate_simple");
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if (chunk_edge_size > 0) {
+ hsize_t chunk_dims[SS_DR_MAX_RANK]; /* Chunk dimensions */
+
+ chunk_dims[0] = chunk_dims[1] = chunk_dims[2] = chunk_dims[3] = chunk_dims[4] =
+ (hsize_t)chunk_edge_size;
+
+ small_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(small_cube_dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(small_cube_dcpl_id, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ ret = H5Pset_chunk(small_cube_dcpl_id, (int)small_rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ large_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(large_cube_dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(large_cube_dcpl_id, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ ret = H5Pset_chunk(large_cube_dcpl_id, (int)large_rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ } /* end if */
+
+ /* create the small cube dataset */
+ small_cube_dataset = H5Dcreate2(fid1, "small_cube_dataset", dset_type, small_cube_sid, H5P_DEFAULT,
+ small_cube_dcpl_id, H5P_DEFAULT);
+ CHECK(small_cube_dataset, FAIL, "H5Dcreate2");
+
+ /* Close non-default small dataset DCPL */
+ if (small_cube_dcpl_id != H5P_DEFAULT) {
+ ret = H5Pclose(small_cube_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end if */
+
+ /* create the large cube dataset */
+ large_cube_dataset = H5Dcreate2(fid1, "large_cube_dataset", dset_type, file_large_cube_sid, H5P_DEFAULT,
+ large_cube_dcpl_id, H5P_DEFAULT);
+ CHECK(large_cube_dataset, FAIL, "H5Dcreate2");
+
+ /* Close non-default large dataset DCPL */
+ if (large_cube_dcpl_id != H5P_DEFAULT) {
+ ret = H5Pclose(large_cube_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end if */
+
+ /* write initial data to the on disk datasets */
+ ret =
+ H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, file_large_cube_sid, xfer_plist,
+ cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* read initial data from disk and verify that it is as expected. */
+ ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid, xfer_plist,
+ small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the data is valid */
+ verify_select_hyper_contig_dr__run_test(small_cube_buf_1, small_cube_size, edge_size, small_rank);
+
+ ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, file_large_cube_sid, xfer_plist,
+ large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the data is valid */
+ verify_select_hyper_contig_dr__run_test(large_cube_buf_1, large_cube_size, edge_size, large_rank);
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5Sselect_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading small_rank-D slice from the on disk large cube, and
+ * verifying that the data read is correct. Verify that H5Sselect_shape_same()
+ * returns true on the memory and file selections.
+ */
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (u = 0; u < SS_DR_MAX_RANK; u++) {
+ start[u] = 0;
+ stride[u] = 1;
+ count[u] = 1;
+ if ((SS_DR_MAX_RANK - u) > small_rank)
+ block[u] = 1;
+ else
+ block[u] = (hsize_t)edge_size;
+ } /* end for */
+
+ u = 0;
+ do {
+ v = 0;
+ do {
+ w = 0;
+ do {
+ x = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ start[0] = (hsize_t)u;
+ start[1] = (hsize_t)v;
+ start[2] = (hsize_t)w;
+ start[3] = (hsize_t)x;
+ start[4] = (hsize_t)0;
+
+ ret = H5Sselect_hyperslab(file_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr,
+ count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(small_cube_sid, file_large_cube_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Read selection from disk */
+ ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, file_large_cube_sid,
+ xfer_plist, small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* verify that expected data is retrieved */
+ mis_match = FALSE;
+ ptr_1 = small_cube_buf_1;
+ expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) + (x * edge_size));
+ for (s = 0; s < small_cube_size; s++) {
+ if (*ptr_1 != expected_value)
+ mis_match = TRUE;
+ ptr_1++;
+ expected_value++;
+ } /* end for */
+ if (mis_match)
+ TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__);
+
+ x++;
+ } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size));
+ w++;
+ } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size));
+ v++;
+ } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size));
+ u++;
+ } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size));
+
+ /* similarly, read the on disk small cube into slices through the in memory
+ * large cube, and verify that the correct data (and only the correct data)
+ * is read.
+ */
+
+ /* zero out the in-memory large cube */
+ HDmemset(large_cube_buf_1, 0, large_cube_size * sizeof(uint16_t));
+
+ u = 0;
+ do {
+ v = 0;
+ do {
+ w = 0;
+ do {
+ x = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ start[0] = (hsize_t)u;
+ start[1] = (hsize_t)v;
+ start[2] = (hsize_t)w;
+ start[3] = (hsize_t)x;
+ start[4] = (hsize_t)0;
+
+ ret = H5Sselect_hyperslab(mem_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr,
+ count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(small_cube_sid, mem_large_cube_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Read selection from disk */
+ ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, small_cube_sid,
+ xfer_plist, large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ start_index = (u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) +
+ (x * edge_size);
+ stop_index = start_index + small_cube_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= large_cube_size);
+
+ mis_match = FALSE;
+ ptr_1 = large_cube_buf_1;
+ expected_value = 0;
+ for (s = 0; s < start_index; s++) {
+ if (*ptr_1 != 0)
+ mis_match = TRUE;
+ ptr_1++;
+ } /* end for */
+ for (; s <= stop_index; s++) {
+ if (*ptr_1 != expected_value)
+ mis_match = TRUE;
+ expected_value++;
+ ptr_1++;
+ } /* end for */
+ for (; s < large_cube_size; s++) {
+ if (*ptr_1 != 0)
+ mis_match = TRUE;
+ ptr_1++;
+ } /* end for */
+ if (mis_match)
+ TestErrPrintf("large cube read from small cube has bad data! Line=%u\n", __LINE__);
+
+ /* Zero out the buffer for the next pass */
+ HDmemset(large_cube_buf_1 + start_index, 0, small_cube_size * sizeof(uint16_t));
+
+ x++;
+ } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size));
+ w++;
+ } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size));
+ v++;
+ } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size));
+ u++;
+ } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size));
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank D slices from the in memory large cube, to
+ * the the on disk small cube dataset. After each write, read the small
+ * cube dataset back from disk, and verify that it contains the expected
+ * data. Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
+ */
+
+ u = 0;
+ do {
+ v = 0;
+ do {
+ w = 0;
+ do {
+ x = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out the on disk small cube */
+ ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid,
+ xfer_plist, zero_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* select the portion of the in memory large cube from which we
+ * are going to write data.
+ */
+ start[0] = (hsize_t)u;
+ start[1] = (hsize_t)v;
+ start[2] = (hsize_t)w;
+ start[3] = (hsize_t)x;
+ start[4] = (hsize_t)0;
+
+ ret = H5Sselect_hyperslab(mem_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr,
+ count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory slice through the cube selection and the
+ * on disk full small cube selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(small_cube_sid, mem_large_cube_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* write the slice from the in memory large cube to the on disk small cube */
+ ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid, small_cube_sid,
+ xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* read the on disk small cube into memory */
+ ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, small_cube_sid,
+ xfer_plist, small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* verify that expected data is retrieved */
+ mis_match = FALSE;
+ ptr_1 = small_cube_buf_1;
+ expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) + (x * edge_size));
+ for (s = 0; s < small_cube_size; s++) {
+ if (*ptr_1 != expected_value)
+ mis_match = TRUE;
+ expected_value++;
+ ptr_1++;
+ } /* end for */
+ if (mis_match)
+ TestErrPrintf("small cube data don't match! Line=%d\n", __LINE__);
+
+ x++;
+ } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size));
+ w++;
+ } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size));
+ v++;
+ } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size));
+ u++;
+ } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size));
+
+ /* Now write the contents of the in memory small cube to slices of
+ * the on disk cube. After each write, read the on disk cube
+ * into memory, and verify that it contains the expected
+ * data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ /* select the entire memory and file cube dataspaces */
+ ret = H5Sselect_all(mem_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Sselect_all(file_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ u = 0;
+ do {
+ v = 0;
+ do {
+ w = 0;
+ do {
+ x = 0;
+ do {
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out the on disk cube */
+ ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_USHORT, mem_large_cube_sid,
+ file_large_cube_sid, xfer_plist, zero_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* select the portion of the in memory large cube to which we
+ * are going to write data.
+ */
+ start[0] = (hsize_t)u;
+ start[1] = (hsize_t)v;
+ start[2] = (hsize_t)w;
+ start[3] = (hsize_t)x;
+ start[4] = (hsize_t)0;
+
+ ret = H5Sselect_hyperslab(file_large_cube_sid, H5S_SELECT_SET, start_ptr, stride_ptr,
+ count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory full selection of the small cube and the
+ * on disk slice through the large cube selection
+ * as having the same shape.
+ */
+ check = H5Sselect_shape_same(small_cube_sid, file_large_cube_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* write the cube from memory to the target slice of the disk cube */
+ ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, small_cube_sid, file_large_cube_sid,
+ xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* read the on disk cube into memory */
+ ret = H5Sselect_all(file_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid,
+ file_large_cube_sid, xfer_plist, large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ start_index = (u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) +
+ (x * edge_size);
+ stop_index = start_index + small_cube_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= large_cube_size);
+
+ mis_match = FALSE;
+ ptr_1 = large_cube_buf_1;
+ expected_value = 0;
+ for (s = 0; s < start_index; s++) {
+ if (*ptr_1 != 0)
+ mis_match = TRUE;
+ ptr_1++;
+ } /* end for */
+ for (; s <= stop_index; s++) {
+ if (*ptr_1 != expected_value)
+ mis_match = TRUE;
+ expected_value++;
+ ptr_1++;
+ } /* end for */
+ for (; s < large_cube_size; s++) {
+ if (*ptr_1 != 0)
+ mis_match = TRUE;
+ ptr_1++;
+ } /* end for */
+ if (mis_match)
+ TestErrPrintf("large cube written from small cube has bad data! Line=%d\n", __LINE__);
+
+ x++;
+ } while ((large_rank >= 2) && (small_rank <= 1) && (x < edge_size));
+ w++;
+ } while ((large_rank >= 3) && (small_rank <= 2) && (w < edge_size));
+ v++;
+ } while ((large_rank >= 4) && (small_rank <= 3) && (v < edge_size));
+ u++;
+ } while ((large_rank >= 5) && (small_rank <= 4) && (u < edge_size));
+
+ /* Close memory dataspaces */
+ ret = H5Sclose(small_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(mem_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(file_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Datasets */
+ ret = H5Dclose(small_cube_dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Dclose(large_cube_dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(small_cube_buf_1);
+ HDfree(large_cube_buf_1);
+
+} /* test_select_hyper_contig_dr__run_test() */
+#endif
+#if 0
+/****************************************************************
+**
+** test_select_hyper_contig_dr(): Test H5S (dataspace)
+** selection code with contiguous source and target having
+** different ranks but the same shape. We have already
+** tested H5Sselect_shape_same in isolation, so now we try to do
+** I/O.
+**
+****************************************************************/
+static void
+test_select_hyper_contig_dr(hid_t dset_type, hid_t xfer_plist)
+{
+ int test_num = 0;
+ unsigned chunk_edge_size; /* Size of chunk's dataspace dimensions */
+ unsigned edge_size = 6; /* Size of dataset's dataspace dimensions */
+ unsigned small_rank; /* Current rank of small dataset */
+ unsigned large_rank; /* Current rank of large dataset */
+ uint16_t *cube_buf; /* Buffer for writing cube data */
+ uint16_t *zero_buf; /* Buffer for writing zeroed cube data */
+ uint16_t *cube_ptr; /* Temporary pointer into cube data */
+ unsigned max_rank = 5; /* Max. rank to use */
+ size_t max_cube_size; /* Max. number of elements in largest cube */
+ size_t s; /* Local index variable */
+ unsigned u; /* Local index variable */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Contiguous Hyperslabs With Different Rank I/O Functionality\n"));
+
+ /* Compute max. cube size */
+ max_cube_size = (size_t)1;
+ for (u = 0; u < max_rank; u++)
+ max_cube_size *= (size_t)edge_size;
+
+ /* Allocate cube buffer for writing values */
+ cube_buf = (uint16_t *)HDmalloc(sizeof(uint16_t) * max_cube_size);
+ CHECK_PTR(cube_buf, "HDmalloc");
+
+ /* Initialize the cube buffer */
+ cube_ptr = cube_buf;
+ for (s = 0; s < max_cube_size; s++)
+ *cube_ptr++ = (uint16_t)s;
+
+ /* Allocate cube buffer for zeroing values on disk */
+ zero_buf = (uint16_t *)HDcalloc(sizeof(uint16_t), max_cube_size);
+ CHECK_PTR(zero_buf, "HDcalloc");
+
+ for (large_rank = 1; large_rank <= max_rank; large_rank++) {
+ for (small_rank = 1; small_rank < large_rank; small_rank++) {
+ chunk_edge_size = 0;
+ test_select_hyper_contig_dr__run_test(test_num, cube_buf, zero_buf, edge_size, chunk_edge_size,
+ small_rank, large_rank, dset_type, xfer_plist);
+ test_num++;
+
+ chunk_edge_size = 3;
+ test_select_hyper_contig_dr__run_test(test_num, cube_buf, zero_buf, edge_size, chunk_edge_size,
+ small_rank, large_rank, dset_type, xfer_plist);
+ test_num++;
+ } /* for loop on small rank */
+ } /* for loop on large rank */
+
+ HDfree(cube_buf);
+ HDfree(zero_buf);
+
+} /* test_select_hyper_contig_dr() */
+#endif
+/****************************************************************
+**
+** test_select_hyper_checker_board_dr__select_checker_board():
+** Given an n-cube dataspace with each edge of length
+** edge_size, and a checker_edge_size either select a checker
+** board selection of the entire cube(if sel_rank == n),
+** or select a checker board selection of a
+** sel_rank dimensional slice through n-cube parallel to the
+** sel_rank fastest changing indices, with origin (in the
+** higher indices) as indicated by the start array.
+**
+** Note that this function, like all its relatives, is
+** hard coded to presume a maximum n-cube rank of 5.
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
+** the value of the constant.
+**
+** JRM -- 9/9/09
+**
+****************************************************************/
+#if 0
+static void
+test_select_hyper_checker_board_dr__select_checker_board(hid_t tgt_n_cube_sid, unsigned tgt_n_cube_rank,
+ unsigned edge_size, unsigned checker_edge_size,
+ unsigned sel_rank, const hsize_t sel_start[])
+{
+ hbool_t first_selection = TRUE;
+ unsigned n_cube_offset;
+ unsigned sel_offset;
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[SS_DR_MAX_RANK]; /* Offset of hyperslab selection */
+ hsize_t stride[SS_DR_MAX_RANK]; /* Stride of hyperslab selection */
+ hsize_t count[SS_DR_MAX_RANK]; /* Count of hyperslab selection */
+ hsize_t block[SS_DR_MAX_RANK]; /* Block size of hyperslab selection */
+ unsigned i, j, k, l, m; /* Local index variable */
+ unsigned u; /* Local index variables */
+ herr_t ret; /* Generic return value */
+
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(0 < sel_rank);
+ HDassert(sel_rank <= tgt_n_cube_rank);
+ HDassert(tgt_n_cube_rank <= SS_DR_MAX_RANK);
+
+ sel_offset = SS_DR_MAX_RANK - sel_rank;
+ n_cube_offset = SS_DR_MAX_RANK - tgt_n_cube_rank;
+ HDassert(n_cube_offset <= sel_offset);
+
+ /* First, compute the base count (which assumes start == 0
+ * for the associated offset) and offset_count (which
+ * assumes start == checker_edge_size for the associated
+ * offset).
+ */
+ base_count = edge_size / (checker_edge_size * 2);
+ if ((edge_size % (checker_edge_size * 2)) > 0)
+ base_count++;
+
+ offset_count = (edge_size - checker_edge_size) / (checker_edge_size * 2);
+ if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0)
+ offset_count++;
+
+ /* Now set up the stride and block arrays, and portions of the start
+ * and count arrays that will not be altered during the selection of
+ * the checker board.
+ */
+ u = 0;
+ while (u < n_cube_offset) {
+ /* these values should never be used */
+ start[u] = 0;
+ stride[u] = 0;
+ count[u] = 0;
+ block[u] = 0;
+
+ u++;
+ } /* end while */
+
+ while (u < sel_offset) {
+ start[u] = sel_start[u];
+ stride[u] = 2 * edge_size;
+ count[u] = 1;
+ block[u] = 1;
+
+ u++;
+ } /* end while */
+
+ while (u < SS_DR_MAX_RANK) {
+ stride[u] = 2 * checker_edge_size;
+ block[u] = checker_edge_size;
+
+ u++;
+ } /* end while */
+
+ i = 0;
+ do {
+ if (0 >= sel_offset) {
+ if (i == 0) {
+ start[0] = 0;
+ count[0] = base_count;
+ } /* end if */
+ else {
+ start[0] = checker_edge_size;
+ count[0] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ j = 0;
+ do {
+ if (1 >= sel_offset) {
+ if (j == 0) {
+ start[1] = 0;
+ count[1] = base_count;
+ } /* end if */
+ else {
+ start[1] = checker_edge_size;
+ count[1] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ k = 0;
+ do {
+ if (2 >= sel_offset) {
+ if (k == 0) {
+ start[2] = 0;
+ count[2] = base_count;
+ } /* end if */
+ else {
+ start[2] = checker_edge_size;
+ count[2] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ l = 0;
+ do {
+ if (3 >= sel_offset) {
+ if (l == 0) {
+ start[3] = 0;
+ count[3] = base_count;
+ } /* end if */
+ else {
+ start[3] = checker_edge_size;
+ count[3] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ m = 0;
+ do {
+ if (4 >= sel_offset) {
+ if (m == 0) {
+ start[4] = 0;
+ count[4] = base_count;
+ } /* end if */
+ else {
+ start[4] = checker_edge_size;
+ count[4] = offset_count;
+ } /* end else */
+ } /* end if */
+
+ if (((i + j + k + l + m) % 2) == 0) {
+ if (first_selection) {
+ first_selection = FALSE;
+
+ ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_SET,
+ &(start[n_cube_offset]), &(stride[n_cube_offset]),
+ &(count[n_cube_offset]), &(block[n_cube_offset]));
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end if */
+ else {
+ ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_OR,
+ &(start[n_cube_offset]), &(stride[n_cube_offset]),
+ &(count[n_cube_offset]), &(block[n_cube_offset]));
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end else */
+ } /* end if */
+
+ m++;
+ } while ((m <= 1) && (4 >= sel_offset));
+ l++;
+ } while ((l <= 1) && (3 >= sel_offset));
+ k++;
+ } while ((k <= 1) && (2 >= sel_offset));
+ j++;
+ } while ((j <= 1) && (1 >= sel_offset));
+ i++;
+ } while ((i <= 1) && (0 >= sel_offset));
+
+ /* Weirdness alert:
+ *
+ * Some how, it seems that selections can extend beyond the
+ * boundaries of the target dataspace -- hence the following
+ * code to manually clip the selection back to the dataspace
+ * proper.
+ */
+ for (u = 0; u < SS_DR_MAX_RANK; u++) {
+ start[u] = 0;
+ stride[u] = edge_size;
+ count[u] = 1;
+ block[u] = edge_size;
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(tgt_n_cube_sid, H5S_SELECT_AND, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+} /* test_select_hyper_checker_board_dr__select_checker_board() */
+#endif
+
+/****************************************************************
+**
+** test_select_hyper_checker_board_dr__verify_data():
+**
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
+** otherwise.
+**
+** The supplied buffer is presumed to contain the results
+** of read or writing a checkerboard selection of an
+** n-cube, or a checkerboard selection of an m (1 <= m < n)
+** dimensional slice through an n-cube parallel to the
+** fastest changing indices.
+**
+** It is further presumed that the buffer was zeroed before
+** the read, and that the n-cube was initialize with the
+** natural numbers listed in order from the origin along
+** the fastest changing axis.
+**
+** Thus for a 10x10x10 3-cube, the value stored in location
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
+**
+** (10 * 10 * x) + (10 * y) + z
+**
+** Thus, if the buffer contains the result of reading a
+** checker board selection of a 10x10x10 3-cube, location
+** (x, y, z) will contain zero if it is not in a checker,
+** and 100x + 10y + z if (x, y, z) is in a checker.
+**
+** If the buffer contains the result of reading a 3
+** dimensional slice (parallel to the three fastest changing
+** indices) through an n cube (n > 3), then the expected
+** values in the buffer will be the same, save that we will
+** add a constant determined by the origin of the 3-cube
+** in the n-cube.
+**
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker.
+**
+****************************************************************/
+#if 0
+H5_ATTR_PURE static hbool_t
+test_select_hyper_checker_board_dr__verify_data(uint16_t *buf_ptr, unsigned rank, unsigned edge_size,
+ unsigned checker_edge_size, uint16_t first_expected_val,
+ hbool_t buf_starts_in_checker)
+{
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint16_t expected_value;
+ uint16_t *val_ptr;
+ unsigned i, j, k, l, m; /* to track position in n-cube */
+ unsigned v, w, x, y, z; /* to track position in checker */
+ const unsigned test_max_rank = 5; /* code changes needed if this is increased */
+
+ HDassert(buf_ptr != NULL);
+ HDassert(0 < rank);
+ HDassert(rank <= test_max_rank);
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(test_max_rank <= SS_DR_MAX_RANK);
+
+ val_ptr = buf_ptr;
+ expected_value = first_expected_val;
+
+ i = 0;
+ v = 0;
+ start_in_checker[0] = buf_starts_in_checker;
+ do {
+ if (v >= checker_edge_size) {
+ start_in_checker[0] = !start_in_checker[0];
+ v = 0;
+ } /* end if */
+
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do {
+ if (w >= checker_edge_size) {
+ start_in_checker[1] = !start_in_checker[1];
+ w = 0;
+ } /* end if */
+
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do {
+ if (x >= checker_edge_size) {
+ start_in_checker[2] = !start_in_checker[2];
+ x = 0;
+ } /* end if */
+
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do {
+ if (y >= checker_edge_size) {
+ start_in_checker[3] = !start_in_checker[3];
+ y = 0;
+ } /* end if */
+
+ m = 0;
+ z = 0;
+ in_checker = start_in_checker[3];
+ do {
+ if (z >= checker_edge_size) {
+ in_checker = !in_checker;
+ z = 0;
+ } /* end if */
+
+ if (in_checker) {
+ if (*val_ptr != expected_value)
+ good_data = FALSE;
+ } /* end if */
+ else {
+ if (*val_ptr != 0)
+ good_data = FALSE;
+ } /* end else */
+
+ val_ptr++;
+ expected_value++;
+
+ m++;
+ z++;
+ } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
+ l++;
+ y++;
+ } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
+ k++;
+ x++;
+ } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
+ j++;
+ w++;
+ } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
+ i++;
+ v++;
+ } while ((rank >= test_max_rank) && (i < edge_size));
+
+ return (good_data);
+} /* test_select_hyper_checker_board_dr__verify_data() */
+#endif
+
+/****************************************************************
+**
+** test_select_hyper_checker_board_dr__run_test(): Test H5S
+** (dataspace) selection code with checker board source and
+** target selections having different ranks but the same
+** shape. We have already tested H5Sselect_shape_same in
+** isolation, so now we try to do I/O.
+**
+****************************************************************/
+#if 0
+static void
+test_select_hyper_checker_board_dr__run_test(int test_num, const uint16_t *cube_buf, const uint16_t *zero_buf,
+ unsigned edge_size, unsigned checker_edge_size,
+ unsigned chunk_edge_size, unsigned small_rank,
+ unsigned large_rank, hid_t dset_type, hid_t xfer_plist)
+{
+ hbool_t data_ok;
+ hid_t fapl; /* File access property list */
+ hid_t fid; /* HDF5 File IDs */
+ hid_t full_small_cube_sid; /* Dataspace for small cube w/all selection */
+ hid_t mem_small_cube_sid;
+ hid_t file_small_cube_sid;
+ hid_t full_large_cube_sid; /* Dataspace for large cube w/all selection */
+ hid_t mem_large_cube_sid;
+ hid_t file_large_cube_sid;
+ hid_t small_cube_dcpl_id = H5P_DEFAULT; /* DCPL for small cube dataset */
+ hid_t large_cube_dcpl_id = H5P_DEFAULT; /* DCPL for large cube dataset */
+ hid_t small_cube_dataset; /* Dataset ID */
+ hid_t large_cube_dataset; /* Dataset ID */
+ unsigned small_rank_offset; /* Rank offset of slice */
+ const unsigned test_max_rank = 5; /* must update code if this changes */
+ size_t start_index; /* Offset within buffer to begin inspecting */
+ size_t stop_index; /* Offset within buffer to end inspecting */
+ uint16_t expected_value;
+ uint16_t *small_cube_buf_1;
+ uint16_t *large_cube_buf_1;
+ uint16_t *ptr_1;
+ size_t small_cube_size; /* Number of elements in small cube */
+ size_t large_cube_size; /* Number of elements in large cube */
+ hsize_t dims[SS_DR_MAX_RANK];
+ hsize_t chunk_dims[SS_DR_MAX_RANK];
+ hsize_t sel_start[SS_DR_MAX_RANK];
+ unsigned u, v, w, x; /* Local index variables */
+ size_t s; /* Local index variable */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, ("\tn-cube slice through m-cube I/O test %d.\n", test_num));
+ MESSAGE(7, ("\tranks = %d/%d, edge_size = %d, checker_edge_size = %d, chunk_edge_size = %d.\n",
+ small_rank, large_rank, edge_size, checker_edge_size, chunk_edge_size));
+
+ HDassert(edge_size >= 6);
+ HDassert(checker_edge_size > 0);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(edge_size >= chunk_edge_size);
+ HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3));
+ HDassert(small_rank > 0);
+ HDassert(small_rank < large_rank);
+ HDassert(large_rank <= test_max_rank);
+ HDassert(test_max_rank <= SS_DR_MAX_RANK);
+
+ /* Compute cube sizes */
+ small_cube_size = large_cube_size = (size_t)1;
+ for (u = 0; u < large_rank; u++) {
+ if (u < small_rank)
+ small_cube_size *= (size_t)edge_size;
+
+ large_cube_size *= (size_t)edge_size;
+ } /* end for */
+ HDassert(large_cube_size < (size_t)(UINT_MAX));
+
+ small_rank_offset = test_max_rank - small_rank;
+ HDassert(small_rank_offset >= 1);
+
+ /* also, at present, we use 16 bit values in this test --
+ * hence the following assertion. Delete it if we convert
+ * to 32 bit values.
+ */
+ HDassert(large_cube_size < (size_t)(64 * 1024));
+
+ /* Allocate & initialize buffers */
+ small_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), small_cube_size);
+ CHECK_PTR(small_cube_buf_1, "HDcalloc");
+ large_cube_buf_1 = (uint16_t *)HDcalloc(sizeof(uint16_t), large_cube_size);
+ CHECK_PTR(large_cube_buf_1, "HDcalloc");
+
+ /* Create a dataset transfer property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Use the 'core' VFD for this test */
+ ret = H5Pset_fapl_core(fapl, (size_t)(1024 * 1024), FALSE);
+ CHECK(ret, FAIL, "H5Pset_fapl_core");
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Close file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* setup dims: */
+ dims[0] = dims[1] = dims[2] = dims[3] = dims[4] = edge_size;
+
+ /* Create small cube dataspaces */
+ full_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL);
+ CHECK(full_small_cube_sid, FAIL, "H5Screate_simple");
+
+ mem_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL);
+ CHECK(mem_small_cube_sid, FAIL, "H5Screate_simple");
+
+ file_small_cube_sid = H5Screate_simple((int)small_rank, dims, NULL);
+ CHECK(file_small_cube_sid, FAIL, "H5Screate_simple");
+
+ /* Create large cube dataspace */
+ full_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(full_large_cube_sid, FAIL, "H5Screate_simple");
+
+ mem_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(mem_large_cube_sid, FAIL, "H5Screate_simple");
+
+ file_large_cube_sid = H5Screate_simple((int)large_rank, dims, NULL);
+ CHECK(file_large_cube_sid, FAIL, "H5Screate_simple");
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if (chunk_edge_size > 0) {
+ chunk_dims[0] = chunk_dims[1] = chunk_dims[2] = chunk_dims[3] = chunk_dims[4] = chunk_edge_size;
+
+ small_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(small_cube_dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(small_cube_dcpl_id, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ ret = H5Pset_chunk(small_cube_dcpl_id, (int)small_rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ large_cube_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(large_cube_dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(large_cube_dcpl_id, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ ret = H5Pset_chunk(large_cube_dcpl_id, (int)large_rank, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ } /* end if */
+
+ /* create the small cube dataset */
+ small_cube_dataset = H5Dcreate2(fid, "small_cube_dataset", dset_type, file_small_cube_sid, H5P_DEFAULT,
+ small_cube_dcpl_id, H5P_DEFAULT);
+ CHECK(small_cube_dataset, FAIL, "H5Dcreate2");
+
+ /* Close non-default small dataset DCPL */
+ if (small_cube_dcpl_id != H5P_DEFAULT) {
+ ret = H5Pclose(small_cube_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end if */
+
+ /* create the large cube dataset */
+ large_cube_dataset = H5Dcreate2(fid, "large_cube_dataset", dset_type, file_large_cube_sid, H5P_DEFAULT,
+ large_cube_dcpl_id, H5P_DEFAULT);
+ CHECK(large_cube_dataset, FAIL, "H5Dcreate2");
+
+ /* Close non-default large dataset DCPL */
+ if (large_cube_dcpl_id != H5P_DEFAULT) {
+ ret = H5Pclose(large_cube_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end if */
+
+ /* write initial data to the on disk datasets */
+ ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, full_small_cube_sid,
+ xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid, full_large_cube_sid,
+ xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* read initial small cube data from disk and verify that it is as expected. */
+ ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid, full_small_cube_sid, xfer_plist,
+ small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the data is valid */
+ verify_select_hyper_contig_dr__run_test(small_cube_buf_1, small_cube_size, edge_size, small_rank);
+
+ /* read initial large cube data from disk and verify that it is as expected. */
+ ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid, full_large_cube_sid, xfer_plist,
+ large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check that the data is valid */
+ verify_select_hyper_contig_dr__run_test(large_cube_buf_1, large_cube_size, edge_size, large_rank);
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5Sselect_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading small_rank-D slice from the on disk large cube, and
+ * verifying that the data read is correct. Verify that H5Sselect_shape_same()
+ * returns true on the memory and file selections.
+ *
+ * The first step is to set up the needed checker board selection in the
+ * in memory small small cube
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+
+ test_select_hyper_checker_board_dr__select_checker_board(mem_small_cube_sid, small_rank, edge_size,
+ checker_edge_size, small_rank, sel_start);
+
+ /* now read slices from the large, on-disk cube into the small cube.
+ * Note how we adjust sel_start only in the dimensions peculiar to the
+ * large cube.
+ */
+
+ u = 0;
+ do {
+ if (small_rank_offset > 0)
+ sel_start[0] = u;
+
+ v = 0;
+ do {
+ if (small_rank_offset > 1)
+ sel_start[1] = v;
+
+ w = 0;
+ do {
+ if (small_rank_offset > 2)
+ sel_start[2] = w;
+
+ x = 0;
+ do {
+ if (small_rank_offset > 3)
+ sel_start[3] = x;
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ HDassert((sel_start[0] == 0) || (0 < small_rank_offset));
+ HDassert((sel_start[1] == 0) || (1 < small_rank_offset));
+ HDassert((sel_start[2] == 0) || (2 < small_rank_offset));
+ HDassert((sel_start[3] == 0) || (3 < small_rank_offset));
+ HDassert((sel_start[4] == 0) || (4 < small_rank_offset));
+
+ test_select_hyper_checker_board_dr__select_checker_board(
+ file_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(mem_small_cube_sid, file_large_cube_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* zero the buffer that we will be using for reading */
+ HDmemset(small_cube_buf_1, 0, sizeof(*small_cube_buf_1) * small_cube_size);
+
+ /* Read selection from disk */
+ ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, mem_small_cube_sid,
+ file_large_cube_sid, xfer_plist, small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) + (x * edge_size));
+
+ data_ok = test_select_hyper_checker_board_dr__verify_data(small_cube_buf_1, small_rank,
+ edge_size, checker_edge_size,
+ expected_value, (hbool_t)TRUE);
+ if (!data_ok)
+ TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__);
+
+ x++;
+ } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) &&
+ (x < edge_size));
+ w++;
+ } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) &&
+ (w < edge_size));
+ v++;
+ } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) &&
+ (v < edge_size));
+ u++;
+ } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size));
+
+ /* similarly, read the on disk small cube into slices through the in memory
+ * large cube, and verify that the correct data (and only the correct data)
+ * is read.
+ */
+
+ /* select a checker board in the file small cube dataspace */
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ test_select_hyper_checker_board_dr__select_checker_board(file_small_cube_sid, small_rank, edge_size,
+ checker_edge_size, small_rank, sel_start);
+
+ u = 0;
+ do {
+ if (0 < small_rank_offset)
+ sel_start[0] = u;
+
+ v = 0;
+ do {
+ if (1 < small_rank_offset)
+ sel_start[1] = v;
+
+ w = 0;
+ do {
+ if (2 < small_rank_offset)
+ sel_start[2] = w;
+
+ x = 0;
+ do {
+ if (3 < small_rank_offset)
+ sel_start[3] = x;
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ HDassert((sel_start[0] == 0) || (0 < small_rank_offset));
+ HDassert((sel_start[1] == 0) || (1 < small_rank_offset));
+ HDassert((sel_start[2] == 0) || (2 < small_rank_offset));
+ HDassert((sel_start[3] == 0) || (3 < small_rank_offset));
+ HDassert((sel_start[4] == 0) || (4 < small_rank_offset));
+
+ test_select_hyper_checker_board_dr__select_checker_board(
+ mem_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(file_small_cube_sid, mem_large_cube_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* zero out the in memory large cube */
+ HDmemset(large_cube_buf_1, 0, sizeof(*large_cube_buf_1) * large_cube_size);
+
+ /* Read selection from disk */
+ ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid,
+ file_small_cube_sid, xfer_plist, large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ data_ok = TRUE;
+ ptr_1 = large_cube_buf_1;
+ expected_value = 0;
+ start_index = (u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) +
+ (x * edge_size);
+ stop_index = start_index + small_cube_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= large_cube_size);
+
+ /* verify that the large cube contains only zeros before the slice */
+ for (s = 0; s < start_index; s++) {
+ if (*ptr_1 != 0)
+ data_ok = FALSE;
+ ptr_1++;
+ } /* end for */
+ HDassert(s == start_index);
+
+ data_ok &= test_select_hyper_checker_board_dr__verify_data(
+ ptr_1, small_rank, edge_size, checker_edge_size, (uint16_t)0, (hbool_t)TRUE);
+
+ ptr_1 += small_cube_size;
+ s += small_cube_size;
+
+ HDassert(s == stop_index + 1);
+
+ /* verify that the large cube contains only zeros after the slice */
+ for (s = stop_index + 1; s < large_cube_size; s++) {
+ if (*ptr_1 != 0)
+ data_ok = FALSE;
+ ptr_1++;
+ } /* end for */
+ if (!data_ok)
+ TestErrPrintf("large cube read from small cube has bad data! Line=%d\n", __LINE__);
+
+ x++;
+ } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) &&
+ (x < edge_size));
+ w++;
+ } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) &&
+ (w < edge_size));
+ v++;
+ } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) &&
+ (v < edge_size));
+ u++;
+ } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size));
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank D slices from the in memory large cube, to
+ * the the on disk small cube dataset. After each write, read the small
+ * cube dataset back from disk, and verify that it contains the expected
+ * data. Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
+ */
+
+ /* select a checker board in the file small cube dataspace */
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ test_select_hyper_checker_board_dr__select_checker_board(file_small_cube_sid, small_rank, edge_size,
+ checker_edge_size, small_rank, sel_start);
+
+ u = 0;
+ do {
+ if (small_rank_offset > 0)
+ sel_start[0] = u;
+
+ v = 0;
+ do {
+ if (small_rank_offset > 1)
+ sel_start[1] = v;
+
+ w = 0;
+ do {
+ if (small_rank_offset > 2)
+ sel_start[2] = w;
+
+ x = 0;
+ do {
+ if (small_rank_offset > 3)
+ sel_start[3] = x;
+
+ /* zero out the on disk small cube */
+ ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid,
+ full_small_cube_sid, xfer_plist, zero_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ HDassert((sel_start[0] == 0) || (0 < small_rank_offset));
+ HDassert((sel_start[1] == 0) || (1 < small_rank_offset));
+ HDassert((sel_start[2] == 0) || (2 < small_rank_offset));
+ HDassert((sel_start[3] == 0) || (3 < small_rank_offset));
+ HDassert((sel_start[4] == 0) || (4 < small_rank_offset));
+
+ test_select_hyper_checker_board_dr__select_checker_board(
+ mem_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(file_small_cube_sid, mem_large_cube_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* write the slice from the in memory large cube to the
+ * on disk small cube
+ */
+ ret = H5Dwrite(small_cube_dataset, H5T_NATIVE_UINT16, mem_large_cube_sid,
+ file_small_cube_sid, xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* zero the buffer that we will be using for reading */
+ HDmemset(small_cube_buf_1, 0, sizeof(*small_cube_buf_1) * small_cube_size);
+
+ /* read the on disk small cube into memory */
+ ret = H5Dread(small_cube_dataset, H5T_NATIVE_UINT16, full_small_cube_sid,
+ full_small_cube_sid, xfer_plist, small_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ expected_value = (uint16_t)((u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) +
+ (w * edge_size * edge_size) + (x * edge_size));
+
+ data_ok = test_select_hyper_checker_board_dr__verify_data(small_cube_buf_1, small_rank,
+ edge_size, checker_edge_size,
+ expected_value, (hbool_t)TRUE);
+ if (!data_ok)
+ TestErrPrintf("small cube read from largecube has bad data! Line=%d\n", __LINE__);
+
+ x++;
+ } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) &&
+ (x < edge_size));
+ w++;
+ } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) &&
+ (w < edge_size));
+ v++;
+ } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) &&
+ (v < edge_size));
+ u++;
+ } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size));
+
+ /* Now write checker board selections of the entries in memory
+ * small cube to slices of the on disk cube. After each write,
+ * read the on disk large cube * into memory, and verify that
+ * it contains the expected * data. Verify that
+ * H5Sselect_shape_same() returns true on the memory and file
+ * selections.
+ */
+
+ /* select a checker board in the in memory small cube dataspace */
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ test_select_hyper_checker_board_dr__select_checker_board(mem_small_cube_sid, small_rank, edge_size,
+ checker_edge_size, small_rank, sel_start);
+
+ u = 0;
+ do {
+ if (small_rank_offset > 0)
+ sel_start[0] = u;
+
+ v = 0;
+ do {
+ if (small_rank_offset > 1)
+ sel_start[1] = v;
+
+ w = 0;
+ do {
+ if (small_rank_offset > 2)
+ sel_start[2] = w;
+
+ x = 0;
+ do {
+ if (small_rank_offset > 3)
+ sel_start[3] = x;
+
+ /* zero out the on disk cube */
+ ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_USHORT, full_large_cube_sid,
+ full_large_cube_sid, xfer_plist, zero_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ HDassert((sel_start[0] == 0) || (0 < small_rank_offset));
+ HDassert((sel_start[1] == 0) || (1 < small_rank_offset));
+ HDassert((sel_start[2] == 0) || (2 < small_rank_offset));
+ HDassert((sel_start[3] == 0) || (3 < small_rank_offset));
+ HDassert((sel_start[4] == 0) || (4 < small_rank_offset));
+
+ test_select_hyper_checker_board_dr__select_checker_board(
+ file_large_cube_sid, large_rank, edge_size, checker_edge_size, small_rank, sel_start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(file_large_cube_sid, mem_small_cube_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* write the checker board selection of the in memory
+ * small cube to a slice through the on disk large
+ * cube.
+ */
+ ret = H5Dwrite(large_cube_dataset, H5T_NATIVE_UINT16, mem_small_cube_sid,
+ file_large_cube_sid, xfer_plist, cube_buf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* zero out the in memory large cube */
+ HDmemset(large_cube_buf_1, 0, sizeof(*large_cube_buf_1) * large_cube_size);
+
+ /* read the on disk large cube into memory */
+ ret = H5Dread(large_cube_dataset, H5T_NATIVE_UINT16, full_large_cube_sid,
+ full_large_cube_sid, xfer_plist, large_cube_buf_1);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* verify that the expected data and only the
+ * expected data was written to the on disk large
+ * cube.
+ */
+ data_ok = TRUE;
+ ptr_1 = large_cube_buf_1;
+ expected_value = 0;
+ start_index = (u * edge_size * edge_size * edge_size * edge_size) +
+ (v * edge_size * edge_size * edge_size) + (w * edge_size * edge_size) +
+ (x * edge_size);
+ stop_index = start_index + small_cube_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= large_cube_size);
+
+ /* verify that the large cube contains only zeros before the slice */
+ for (s = 0; s < start_index; s++) {
+ if (*ptr_1 != 0)
+ data_ok = FALSE;
+ ptr_1++;
+ } /* end for */
+ HDassert(s == start_index);
+
+ /* verify that the slice contains the expected data */
+ data_ok &= test_select_hyper_checker_board_dr__verify_data(
+ ptr_1, small_rank, edge_size, checker_edge_size, (uint16_t)0, (hbool_t)TRUE);
+
+ ptr_1 += small_cube_size;
+ s += small_cube_size;
+
+ HDassert(s == stop_index + 1);
+
+ /* verify that the large cube contains only zeros after the slice */
+ for (s = stop_index + 1; s < large_cube_size; s++) {
+ if (*ptr_1 != 0)
+ data_ok = FALSE;
+ ptr_1++;
+ } /* end for */
+ if (!data_ok)
+ TestErrPrintf("large cube written from small cube has bad data! Line=%d\n", __LINE__);
+
+ x++;
+ } while ((large_rank >= (test_max_rank - 3)) && (small_rank <= (test_max_rank - 4)) &&
+ (x < edge_size));
+ w++;
+ } while ((large_rank >= (test_max_rank - 2)) && (small_rank <= (test_max_rank - 3)) &&
+ (w < edge_size));
+ v++;
+ } while ((large_rank >= (test_max_rank - 1)) && (small_rank <= (test_max_rank - 2)) &&
+ (v < edge_size));
+ u++;
+ } while ((large_rank >= test_max_rank) && (small_rank <= (test_max_rank - 1)) && (u < edge_size));
+
+ /* Close memory dataspaces */
+ ret = H5Sclose(full_small_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(full_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(mem_small_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(mem_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(file_small_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(file_large_cube_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Datasets */
+ ret = H5Dclose(small_cube_dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Dclose(large_cube_dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(small_cube_buf_1);
+ HDfree(large_cube_buf_1);
+
+} /* test_select_hyper_checker_board_dr__run_test() */
+#endif
+/****************************************************************
+**
+** test_select_hyper_checker_board_dr(): Test H5S (dataspace)
+** selection code with checkerboard source and target having
+** different ranks but the same shape. We have already
+** tested H5Sselect_shape_same in isolation, so now we try to do
+** I/O.
+**
+** This is just an initial smoke check, so we will work
+** with a slice through a cube only.
+**
+****************************************************************/
+#if 0
+static void
+test_select_hyper_checker_board_dr(hid_t dset_type, hid_t xfer_plist)
+{
+ uint16_t *cube_buf; /* Buffer for writing cube data */
+ uint16_t *cube_ptr; /* Temporary pointer into cube data */
+ uint16_t *zero_buf; /* Buffer for writing zeroed cube data */
+ int test_num = 0;
+ unsigned checker_edge_size = 2; /* Size of checkerboard dimension */
+ unsigned chunk_edge_size; /* Size of chunk's dataspace dimensions */
+ unsigned edge_size = 6; /* Size of dataset's dataspace dimensions */
+ unsigned small_rank; /* Current rank of small dataset */
+ unsigned large_rank; /* Current rank of large dataset */
+ unsigned max_rank = 5; /* Max. rank to use */
+ size_t max_cube_size; /* Max. number of elements in largest cube */
+ size_t s; /* Local index variable */
+ unsigned u; /* Local index variable */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Checker Board Hyperslabs With Different Rank I/O Functionality\n"));
+
+ /* Compute max. cube size */
+ max_cube_size = (size_t)1;
+ for (u = 0; u < max_rank; u++)
+ max_cube_size *= (size_t)(edge_size + 1);
+
+ /* Allocate cube buffer for writing values */
+ cube_buf = (uint16_t *)HDmalloc(sizeof(uint16_t) * max_cube_size);
+ CHECK_PTR(cube_buf, "HDmalloc");
+
+ /* Initialize the cube buffer */
+ cube_ptr = cube_buf;
+ for (s = 0; s < max_cube_size; s++)
+ *cube_ptr++ = (uint16_t)s;
+
+ /* Allocate cube buffer for zeroing values on disk */
+ zero_buf = (uint16_t *)HDcalloc(sizeof(uint16_t), max_cube_size);
+ CHECK_PTR(zero_buf, "HDcalloc");
+
+ for (large_rank = 1; large_rank <= max_rank; large_rank++) {
+ for (small_rank = 1; small_rank < large_rank; small_rank++) {
+ chunk_edge_size = 0;
+ test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size,
+ checker_edge_size, chunk_edge_size, small_rank,
+ large_rank, dset_type, xfer_plist);
+ test_num++;
+
+ test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size + 1,
+ checker_edge_size, chunk_edge_size, small_rank,
+ large_rank, dset_type, xfer_plist);
+ test_num++;
+
+ chunk_edge_size = 3;
+ test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size,
+ checker_edge_size, chunk_edge_size, small_rank,
+ large_rank, dset_type, xfer_plist);
+ test_num++;
+
+ test_select_hyper_checker_board_dr__run_test(test_num, cube_buf, zero_buf, edge_size + 1,
+ checker_edge_size, chunk_edge_size, small_rank,
+ large_rank, dset_type, xfer_plist);
+ test_num++;
+ } /* for loop on small rank */
+ } /* for loop on large rank */
+
+ HDfree(cube_buf);
+ HDfree(zero_buf);
+
+} /* test_select_hyper_checker_board_dr() */
+#endif
+/****************************************************************
+**
+** test_select_hyper_copy(): Test H5S (dataspace) selection code.
+** Tests copying hyperslab selections
+**
+****************************************************************/
+static void
+test_select_hyper_copy(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t data1, data2; /* Dataset IDs */
+ hid_t sid1, sid2, sid3; /* Dataspace IDs */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ uint16_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* 1st buffer read from disk */
+ *rbuf2, /* 2nd buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+ rbuf2 = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf2, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 2x3x3 count with a stride of 2x4x3 & 1x2x2 block hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 2;
+ stride[1] = 4;
+ stride[2] = 3;
+ count[0] = 2;
+ count[1] = 3;
+ count[2] = 3;
+ block[0] = 1;
+ block[1] = 2;
+ block[2] = 2;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 4x2 count with a stride of 5x5 & 3x3 block hyperslab for memory dataset */
+ start[0] = 1;
+ start[1] = 1;
+ stride[0] = 5;
+ stride[1] = 5;
+ count[0] = 4;
+ count[1] = 2;
+ block[0] = 3;
+ block[1] = 3;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Make a copy of the dataspace to write */
+ sid3 = H5Scopy(sid2);
+ CHECK(sid3, FAIL, "H5Scopy");
+
+ /* Create a dataset */
+ data1 = H5Dcreate2(fid1, SPACE1_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(data1, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create another dataset */
+ data2 = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(data2, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 3x4 count with a stride of 4x4 & 2x3 block hyperslab for memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 4;
+ stride[1] = 4;
+ count[0] = 3;
+ count[1] = 4;
+ block[0] = 2;
+ block[1] = 3;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Make a copy of the dataspace to read */
+ sid3 = H5Scopy(sid2);
+ CHECK(sid3, FAIL, "H5Scopy");
+
+ /* Read selection from disk */
+ ret = H5Dread(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Read selection from disk */
+ ret = H5Dread(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, rbuf2);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ if (HDmemcmp(rbuf, rbuf2, sizeof(uint16_t) * SPACE3_DIM1 * SPACE3_DIM2) != 0)
+ TestErrPrintf("hyperslab values don't match! Line=%d\n", __LINE__);
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close 2nd memory dataspace */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(data1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(data2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(rbuf2);
+} /* test_select_hyper_copy() */
+
+/****************************************************************
+**
+** test_select_point_copy(): Test H5S (dataspace) selection code.
+** Tests copying point selections
+**
+****************************************************************/
+static void
+test_select_point_copy(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t data1, data2; /* Dataset IDs */
+ hid_t sid1, sid2, sid3; /* Dataspace IDs */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */
+ hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */
+ hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */
+ uint16_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* 1st buffer read from disk */
+ *rbuf2, /* 2nd buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslabs with Strides Functionality\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint16_t *)HDmalloc(sizeof(uint16_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+ rbuf2 = (uint16_t *)HDcalloc(sizeof(uint16_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf2, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint16_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for disk dataset */
+ coord1[0][0] = 0;
+ coord1[0][1] = 10;
+ coord1[0][2] = 5;
+ coord1[1][0] = 1;
+ coord1[1][1] = 2;
+ coord1[1][2] = 7;
+ coord1[2][0] = 2;
+ coord1[2][1] = 4;
+ coord1[2][2] = 9;
+ coord1[3][0] = 0;
+ coord1[3][1] = 6;
+ coord1[3][2] = 11;
+ coord1[4][0] = 1;
+ coord1[4][1] = 8;
+ coord1[4][2] = 13;
+ coord1[5][0] = 2;
+ coord1[5][1] = 12;
+ coord1[5][2] = 0;
+ coord1[6][0] = 0;
+ coord1[6][1] = 14;
+ coord1[6][2] = 2;
+ coord1[7][0] = 1;
+ coord1[7][1] = 0;
+ coord1[7][2] = 4;
+ coord1[8][0] = 2;
+ coord1[8][1] = 1;
+ coord1[8][2] = 6;
+ coord1[9][0] = 0;
+ coord1[9][1] = 3;
+ coord1[9][2] = 8;
+ ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Select sequence of ten points for write dataset */
+ coord2[0][0] = 12;
+ coord2[0][1] = 3;
+ coord2[1][0] = 15;
+ coord2[1][1] = 13;
+ coord2[2][0] = 7;
+ coord2[2][1] = 25;
+ coord2[3][0] = 0;
+ coord2[3][1] = 6;
+ coord2[4][0] = 13;
+ coord2[4][1] = 0;
+ coord2[5][0] = 24;
+ coord2[5][1] = 11;
+ coord2[6][0] = 12;
+ coord2[6][1] = 21;
+ coord2[7][0] = 29;
+ coord2[7][1] = 4;
+ coord2[8][0] = 8;
+ coord2[8][1] = 8;
+ coord2[9][0] = 19;
+ coord2[9][1] = 17;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Make a copy of the dataspace to write */
+ sid3 = H5Scopy(sid2);
+ CHECK(sid3, FAIL, "H5Scopy");
+
+ /* Create a dataset */
+ data1 = H5Dcreate2(fid1, SPACE1_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(data1, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create another dataset */
+ data2 = H5Dcreate2(fid1, SPACE2_NAME, H5T_STD_U16LE, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(data2, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select sequence of points for read dataset */
+ coord3[0][0] = 0;
+ coord3[0][1] = 2;
+ coord3[1][0] = 4;
+ coord3[1][1] = 8;
+ coord3[2][0] = 13;
+ coord3[2][1] = 13;
+ coord3[3][0] = 14;
+ coord3[3][1] = 25;
+ coord3[4][0] = 7;
+ coord3[4][1] = 9;
+ coord3[5][0] = 2;
+ coord3[5][1] = 0;
+ coord3[6][0] = 9;
+ coord3[6][1] = 19;
+ coord3[7][0] = 1;
+ coord3[7][1] = 22;
+ coord3[8][0] = 12;
+ coord3[8][1] = 21;
+ coord3[9][0] = 11;
+ coord3[9][1] = 6;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Make a copy of the dataspace to read */
+ sid3 = H5Scopy(sid2);
+ CHECK(sid3, FAIL, "H5Scopy");
+
+ /* Read selection from disk */
+ ret = H5Dread(data1, H5T_STD_U16LE, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Read selection from disk */
+ ret = H5Dread(data2, H5T_STD_U16LE, sid3, sid1, H5P_DEFAULT, rbuf2);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ if (HDmemcmp(rbuf, rbuf2, sizeof(uint16_t) * SPACE3_DIM1 * SPACE3_DIM2) != 0)
+ TestErrPrintf("point values don't match!\n");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close 2nd memory dataspace */
+ ret = H5Sclose(sid3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(data1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(data2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(rbuf2);
+} /* test_select_point_copy() */
+
+/****************************************************************
+**
+** test_select_hyper_offset(): Test basic H5S (dataspace) selection code.
+** Tests hyperslabs of various sizes and dimensionalities with selection
+** offsets.
+**
+****************************************************************/
+static void
+test_select_hyper_offset(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ hssize_t offset[SPACE1_RANK]; /* Offset of selection */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ htri_t valid; /* Generic boolean return value */
+ H5S_class_t ext_type; /* Extent type */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions with Offsets\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Verify extent type */
+ ext_type = H5Sget_simple_extent_type(sid1);
+ VERIFY(ext_type, H5S_SIMPLE, "H5Sget_simple_extent_type");
+
+ /* Select 2x15x13 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Check a valid offset */
+ offset[0] = -1;
+ offset[1] = 0;
+ offset[2] = 0;
+ ret = H5Soffset_simple(sid1, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid1);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ /* Check an invalid offset */
+ offset[0] = 10;
+ offset[1] = 0;
+ offset[2] = 0;
+ ret = H5Soffset_simple(sid1, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid1);
+ VERIFY(valid, FALSE, "H5Sselect_valid");
+
+ /* Reset offset */
+ offset[0] = 0;
+ offset[1] = 0;
+ offset[2] = 0;
+ ret = H5Soffset_simple(sid1, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid1);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ /* Select 15x26 hyperslab for memory dataset */
+ start[0] = 15;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Choose a valid offset for the memory dataspace */
+ offset[0] = -10;
+ offset[1] = 0;
+ ret = H5Soffset_simple(sid2, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid2);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0; i < SPACE3_DIM1; i++) {
+ tbuf = wbuf + ((i + 5) * SPACE2_DIM2);
+ tbuf2 = rbuf + (i * SPACE3_DIM2);
+ for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%u, *tbuf2=%u\n",
+ __LINE__, i, j, (unsigned)*tbuf, (unsigned)*tbuf2);
+ } /* end for */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_offset() */
+
+/****************************************************************
+**
+** test_select_hyper_offset2(): Test basic H5S (dataspace) selection code.
+** Tests optimized hyperslab I/O with selection offsets.
+**
+****************************************************************/
+static void
+test_select_hyper_offset2(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hsize_t start[SPACE7_RANK]; /* Starting location of hyperslab */
+ hsize_t count[SPACE7_RANK]; /* Element count of hyperslab */
+ hssize_t offset[SPACE7_RANK]; /* Offset of selection */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ htri_t valid; /* Generic boolean return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing More Hyperslab Selection Functions with Offsets\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE7_DIM1 * SPACE7_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE7_DIM1; i++)
+ for (j = 0; j < SPACE7_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE7_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 4x10 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ count[0] = 4;
+ count[1] = 10;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Set offset */
+ offset[0] = 1;
+ offset[1] = 0;
+ ret = H5Soffset_simple(sid1, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid1);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ /* Select 4x10 hyperslab for memory dataset */
+ start[0] = 1;
+ start[1] = 0;
+ count[0] = 4;
+ count[1] = 10;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Choose a valid offset for the memory dataspace */
+ offset[0] = 2;
+ offset[1] = 0;
+ ret = H5Soffset_simple(sid2, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid2);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE7_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0; i < 4; i++) {
+ tbuf = wbuf + ((i + 3) * SPACE7_DIM2);
+ tbuf2 = rbuf + ((i + 3) * SPACE7_DIM2);
+ for (j = 0; j < SPACE7_DIM2; j++, tbuf++, tbuf2++) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%u, *tbuf2=%u\n",
+ __LINE__, i, j, (unsigned)*tbuf, (unsigned)*tbuf2);
+ } /* end for */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_offset2() */
+
+/****************************************************************
+**
+** test_select_point_offset(): Test basic H5S (dataspace) selection code.
+** Tests element selections between dataspaces of various sizes
+** and dimensionalities with selection offsets.
+**
+****************************************************************/
+static void
+test_select_point_offset(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t coord1[POINT1_NPOINTS][SPACE1_RANK]; /* Coordinates for point selection */
+ hsize_t coord2[POINT1_NPOINTS][SPACE2_RANK]; /* Coordinates for point selection */
+ hsize_t coord3[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */
+ hssize_t offset[SPACE1_RANK]; /* Offset of selection */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ htri_t valid; /* Generic boolean return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Element Selection Functions\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for write buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for disk dataset */
+ coord1[0][0] = 0;
+ coord1[0][1] = 10;
+ coord1[0][2] = 5;
+ coord1[1][0] = 1;
+ coord1[1][1] = 2;
+ coord1[1][2] = 7;
+ coord1[2][0] = 2;
+ coord1[2][1] = 4;
+ coord1[2][2] = 9;
+ coord1[3][0] = 0;
+ coord1[3][1] = 6;
+ coord1[3][2] = 11;
+ coord1[4][0] = 1;
+ coord1[4][1] = 8;
+ coord1[4][2] = 12;
+ coord1[5][0] = 2;
+ coord1[5][1] = 12;
+ coord1[5][2] = 0;
+ coord1[6][0] = 0;
+ coord1[6][1] = 14;
+ coord1[6][2] = 2;
+ coord1[7][0] = 1;
+ coord1[7][1] = 0;
+ coord1[7][2] = 4;
+ coord1[8][0] = 2;
+ coord1[8][1] = 1;
+ coord1[8][2] = 6;
+ coord1[9][0] = 0;
+ coord1[9][1] = 3;
+ coord1[9][2] = 8;
+ ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Check a valid offset */
+ offset[0] = 0;
+ offset[1] = 0;
+ offset[2] = 1;
+ ret = H5Soffset_simple(sid1, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid1);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ /* Check an invalid offset */
+ offset[0] = 10;
+ offset[1] = 0;
+ offset[2] = 0;
+ ret = H5Soffset_simple(sid1, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid1);
+ VERIFY(valid, FALSE, "H5Sselect_valid");
+
+ /* Reset offset */
+ offset[0] = 0;
+ offset[1] = 0;
+ offset[2] = 0;
+ ret = H5Soffset_simple(sid1, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid1);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ /* Select sequence of ten points for write dataset */
+ coord2[0][0] = 12;
+ coord2[0][1] = 3;
+ coord2[1][0] = 15;
+ coord2[1][1] = 13;
+ coord2[2][0] = 7;
+ coord2[2][1] = 24;
+ coord2[3][0] = 0;
+ coord2[3][1] = 6;
+ coord2[4][0] = 13;
+ coord2[4][1] = 0;
+ coord2[5][0] = 24;
+ coord2[5][1] = 11;
+ coord2[6][0] = 12;
+ coord2[6][1] = 21;
+ coord2[7][0] = 23;
+ coord2[7][1] = 4;
+ coord2[8][0] = 8;
+ coord2[8][1] = 8;
+ coord2[9][0] = 19;
+ coord2[9][1] = 17;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Choose a valid offset for the memory dataspace */
+ offset[0] = 5;
+ offset[1] = 1;
+ ret = H5Soffset_simple(sid2, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ valid = H5Sselect_valid(sid2);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select sequence of points for read dataset */
+ coord3[0][0] = 0;
+ coord3[0][1] = 2;
+ coord3[1][0] = 4;
+ coord3[1][1] = 8;
+ coord3[2][0] = 13;
+ coord3[2][1] = 13;
+ coord3[3][0] = 14;
+ coord3[3][1] = 25;
+ coord3[4][0] = 7;
+ coord3[4][1] = 9;
+ coord3[5][0] = 2;
+ coord3[5][1] = 0;
+ coord3[6][0] = 9;
+ coord3[6][1] = 19;
+ coord3[7][0] = 1;
+ coord3[7][1] = 22;
+ coord3[8][0] = 12;
+ coord3[8][1] = 21;
+ coord3[9][0] = 11;
+ coord3[9][1] = 6;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord3);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0; i < POINT1_NPOINTS; i++) {
+ tbuf = wbuf + ((coord2[i][0] + (hsize_t)offset[0]) * SPACE2_DIM2) + coord2[i][1] + (hsize_t)offset[1];
+ tbuf2 = rbuf + (coord3[i][0] * SPACE3_DIM2) + coord3[i][1];
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("element values don't match!, i=%d\n", i);
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_point_offset() */
+
+/****************************************************************
+**
+** test_select_hyper_union(): Test basic H5S (dataspace) selection code.
+** Tests unions of hyperslabs of various sizes and dimensionalities.
+**
+****************************************************************/
+static void
+test_select_hyper_union(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t xfer; /* Dataset Transfer Property List ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ size_t begin[SPACE2_DIM1] = /* Offset within irregular block */
+ {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* First ten rows start at offset 0 */
+ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5}; /* Next eighteen rows start at offset 5 */
+ size_t len[SPACE2_DIM1] = /* Len of each row within irregular block */
+ {10, 10, 10, 10, 10, 10, 10, 10, /* First eight rows are 10 long */
+ 20, 20, /* Next two rows are 20 long */
+ 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15}; /* Next eighteen rows are 15 long */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ hssize_t npoints; /* Number of elements in selection */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions with unions of hyperslabs\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE3_DIM1 * SPACE3_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Test simple case of one block overlapping another */
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 2x15x13 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid1);
+ VERIFY(npoints, 2 * 15 * 13, "H5Sget_select_npoints");
+
+ /* Select 8x26 hyperslab for memory dataset */
+ start[0] = 15;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 8;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Union overlapping 8x26 hyperslab for memory dataset (to form a 15x26 selection) */
+ start[0] = 22;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 8;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0; i < SPACE3_DIM1; i++) {
+ tbuf = wbuf + ((i + 15) * SPACE2_DIM2);
+ tbuf2 = rbuf + (i * SPACE3_DIM2);
+ for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n",
+ __LINE__, i, j, (int)*tbuf, (int)*tbuf2);
+ } /* end for */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Test simple case of several block overlapping another */
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 2x15x13 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 8x15 hyperslab for memory dataset */
+ start[0] = 15;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 8;
+ count[1] = 15;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Union overlapping 8x15 hyperslab for memory dataset (to form a 15x15 selection) */
+ start[0] = 22;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 8;
+ count[1] = 15;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Union overlapping 15x15 hyperslab for memory dataset (to form a 15x26 selection) */
+ start[0] = 15;
+ start[1] = 11;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 15;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0; i < SPACE3_DIM1; i++) {
+ tbuf = wbuf + ((i + 15) * SPACE2_DIM2);
+ tbuf2 = rbuf + (i * SPACE3_DIM2);
+ for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n",
+ __LINE__, i, j, (int)*tbuf, (int)*tbuf2);
+ } /* end for */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Test disjoint case of two non-overlapping blocks */
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 2x15x13 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 7x26 hyperslab for memory dataset */
+ start[0] = 1;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 7;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Union non-overlapping 8x26 hyperslab for memory dataset (to form a 15x26 disjoint selection) */
+ start[0] = 22;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 8;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE3_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0; i < SPACE3_DIM1; i++) {
+ /* Jump over gap in middle */
+ if (i < 7)
+ tbuf = wbuf + ((i + 1) * SPACE2_DIM2);
+ else
+ tbuf = wbuf + ((i + 15) * SPACE2_DIM2);
+ tbuf2 = rbuf + (i * SPACE3_DIM2);
+ for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n",
+ __LINE__, i, j, (int)*tbuf, (int)*tbuf2);
+ } /* end for */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Test disjoint case of two non-overlapping blocks with hyperslab caching turned off */
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 2x15x13 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 7x26 hyperslab for memory dataset */
+ start[0] = 1;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 7;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Union non-overlapping 8x26 hyperslab for memory dataset (to form a 15x26 disjoint selection) */
+ start[0] = 22;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 8;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE4_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ xfer = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer, FAIL, "H5Pcreate");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, xfer, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Close transfer property list */
+ ret = H5Pclose(xfer);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Compare data read with data written out */
+ for (i = 0; i < SPACE3_DIM1; i++) {
+ /* Jump over gap in middle */
+ if (i < 7)
+ tbuf = wbuf + ((i + 1) * SPACE2_DIM2);
+ else
+ tbuf = wbuf + ((i + 15) * SPACE2_DIM2);
+ tbuf2 = rbuf + (i * SPACE3_DIM2);
+ for (j = 0; j < SPACE3_DIM2; j++, tbuf++, tbuf2++) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n",
+ __LINE__, i, j, (int)*tbuf, (int)*tbuf2);
+ } /* end for */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Test case of two blocks which overlap corners and must be split */
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 2x15x13 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 10x10 hyperslab for memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Union overlapping 15x20 hyperslab for memory dataset (forming a irregularly shaped region) */
+ start[0] = 8;
+ start[1] = 5;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 20;
+ count[1] = 15;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE5_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0, tbuf2 = rbuf; i < SPACE2_DIM1; i++) {
+ tbuf = wbuf + (i * SPACE2_DIM2) + begin[i];
+ for (j = 0; j < (int)len[i]; j++, tbuf++, tbuf2++) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n",
+ __LINE__, i, j, (int)*tbuf, (int)*tbuf2);
+ } /* end for */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_union() */
+
+/****************************************************************
+**
+** test_select_hyper_union_stagger(): Test basic H5S (dataspace) selection code.
+** Tests unions of staggered hyperslabs. (Uses H5Scombine_hyperslab
+** and H5Smodify_select instead of H5Sselect_hyperslab)
+**
+****************************************************************/
+static void
+test_select_hyper_union_stagger(void)
+{
+ hid_t file_id; /* File ID */
+ hid_t dset_id; /* Dataset ID */
+ hid_t dataspace; /* File dataspace ID */
+ hid_t memspace; /* Memory dataspace ID */
+ hid_t tmp_space; /* Temporary dataspace ID */
+ hid_t tmp2_space; /* Another emporary dataspace ID */
+ hsize_t dimsm[2] = {7, 7}; /* Memory array dimensions */
+ hsize_t dimsf[2] = {6, 5}; /* File array dimensions */
+ hsize_t count[2] = {3, 1}; /* 1st Hyperslab size */
+ hsize_t count2[2] = {3, 1}; /* 2nd Hyperslab size */
+ hsize_t count3[2] = {2, 1}; /* 3rd Hyperslab size */
+ hsize_t start[2] = {0, 0}; /* 1st Hyperslab offset */
+ hsize_t start2[2] = {2, 1}; /* 2nd Hyperslab offset */
+ hsize_t start3[2] = {4, 2}; /* 3rd Hyperslab offset */
+ hsize_t count_out[2] = {4, 2}; /* Hyperslab size in memory */
+ hsize_t start_out[2] = {0, 3}; /* Hyperslab offset in memory */
+ int data[6][5]; /* Data to write */
+ int data_out[7][7]; /* Data read in */
+ int input_loc[8][2] = {{0, 0}, {1, 0}, {2, 0}, {2, 1}, {3, 1}, {4, 1}, {4, 2}, {5, 2}};
+ int output_loc[8][2] = {{0, 3}, {0, 4}, {1, 3}, {1, 4}, {2, 3}, {2, 4}, {3, 3}, {3, 4}};
+ int dsetrank = 2; /* File Dataset rank */
+ int memrank = 2; /* Memory Dataset rank */
+ int i, j; /* Local counting variables */
+ herr_t error;
+ hsize_t stride[2] = {1, 1};
+ hsize_t block[2] = {1, 1};
+
+ /* Initialize data to write */
+ for (i = 0; i < 6; i++)
+ for (j = 0; j < 5; j++)
+ data[i][j] = j * 10 + i;
+
+ /* Create file */
+ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Create File Dataspace */
+ dataspace = H5Screate_simple(dsetrank, dimsf, NULL);
+ CHECK(dataspace, FAIL, "H5Screate_simple");
+
+ /* Create File Dataset */
+ dset_id =
+ H5Dcreate2(file_id, "IntArray", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ /* Write File Dataset */
+ error = H5Dwrite(dset_id, H5T_NATIVE_INT, dataspace, dataspace, H5P_DEFAULT, data);
+ CHECK(error, FAIL, "H5Dwrite");
+
+ /* Close things */
+ error = H5Sclose(dataspace);
+ CHECK(error, FAIL, "H5Sclose");
+ error = H5Dclose(dset_id);
+ CHECK(error, FAIL, "H5Dclose");
+ error = H5Fclose(file_id);
+ CHECK(error, FAIL, "H5Fclose");
+
+ /* Initialize input buffer */
+ HDmemset(data_out, 0, 7 * 7 * sizeof(int));
+
+ /* Open file */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+
+ /* Open dataset */
+ dset_id = H5Dopen2(file_id, "IntArray", H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dopen2");
+
+ /* Get the dataspace */
+ dataspace = H5Dget_space(dset_id);
+ CHECK(dataspace, FAIL, "H5Dget_space");
+
+ /* Select the hyperslabs */
+ error = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+ tmp_space = H5Scombine_hyperslab(dataspace, H5S_SELECT_OR, start2, stride, count2, block);
+ CHECK(tmp_space, FAIL, "H5Scombine_hyperslab");
+
+ /* Copy the file dataspace and select hyperslab */
+ tmp2_space = H5Scopy(dataspace);
+ CHECK(tmp2_space, FAIL, "H5Scopy");
+ error = H5Sselect_hyperslab(tmp2_space, H5S_SELECT_SET, start3, stride, count3, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Combine the copied dataspace with the temporary dataspace */
+ error = H5Smodify_select(tmp_space, H5S_SELECT_OR, tmp2_space);
+ CHECK(error, FAIL, "H5Smodify_select");
+
+ /* Create Memory Dataspace */
+ memspace = H5Screate_simple(memrank, dimsm, NULL);
+ CHECK(memspace, FAIL, "H5Screate_simple");
+
+ /* Select hyperslab in memory */
+ error = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start_out, stride, count_out, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Read File Dataset */
+ error = H5Dread(dset_id, H5T_NATIVE_INT, memspace, tmp_space, H5P_DEFAULT, data_out);
+ CHECK(error, FAIL, "H5Dread");
+
+ /* Verify input data */
+ for (i = 0; i < 8; i++) {
+ if (data[input_loc[i][0]][input_loc[i][1]] != data_out[output_loc[i][0]][output_loc[i][1]]) {
+ HDprintf("input data #%d is wrong!\n", i);
+ HDprintf("input_loc=[%d][%d]\n", input_loc[i][0], input_loc[i][1]);
+ HDprintf("output_loc=[%d][%d]\n", output_loc[i][0], output_loc[i][1]);
+ HDprintf("data=%d\n", data[input_loc[i][0]][input_loc[i][1]]);
+ TestErrPrintf("data_out=%d\n", data_out[output_loc[i][0]][output_loc[i][1]]);
+ } /* end if */
+ } /* end for */
+
+ /* Close things */
+ error = H5Sclose(tmp2_space);
+ CHECK(error, FAIL, "H5Sclose");
+ error = H5Sclose(tmp_space);
+ CHECK(error, FAIL, "H5Sclose");
+ error = H5Sclose(dataspace);
+ CHECK(error, FAIL, "H5Sclose");
+ error = H5Sclose(memspace);
+ CHECK(error, FAIL, "H5Sclose");
+ error = H5Dclose(dset_id);
+ CHECK(error, FAIL, "H5Dclose");
+ error = H5Fclose(file_id);
+ CHECK(error, FAIL, "H5Fclose");
+}
+
+/****************************************************************
+**
+** test_select_hyper_union_3d(): Test basic H5S (dataspace) selection code.
+** Tests unions of hyperslabs in 3-D (Uses H5Scombine_hyperslab
+** and H5Scombine_select instead of H5Sselect_hyperslab)
+**
+****************************************************************/
+static void
+test_select_hyper_union_3d(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hid_t tmp_space; /* Temporary Dataspace ID */
+ hid_t tmp2_space; /* Another temporary Dataspace ID */
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3};
+ hsize_t dims3[] = {SPACE3_DIM1, SPACE3_DIM2};
+ hsize_t start[SPACE1_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE1_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE1_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE1_RANK]; /* Block size of hyperslab */
+ struct row_list {
+ size_t z;
+ size_t y;
+ size_t x;
+ size_t l;
+ } rows[] = {
+ /* Array of x,y,z coordinates & length for each row written from memory */
+ {0, 0, 0, 6}, /* 1st face of 3-D object */
+ {0, 1, 0, 6}, {0, 2, 0, 6}, {0, 3, 0, 6}, {0, 4, 0, 6}, {1, 0, 0, 6}, /* 2nd face of 3-D object */
+ {1, 1, 0, 6}, {1, 2, 0, 6}, {1, 3, 0, 6}, {1, 4, 0, 6}, {2, 0, 0, 6}, /* 3rd face of 3-D object */
+ {2, 1, 0, 10}, {2, 2, 0, 10}, {2, 3, 0, 10}, {2, 4, 0, 10}, {2, 5, 2, 8},
+ {2, 6, 2, 8}, {3, 0, 0, 6}, /* 4th face of 3-D object */
+ {3, 1, 0, 10}, {3, 2, 0, 10}, {3, 3, 0, 10}, {3, 4, 0, 10}, {3, 5, 2, 8},
+ {3, 6, 2, 8}, {4, 0, 0, 6}, /* 5th face of 3-D object */
+ {4, 1, 0, 10}, {4, 2, 0, 10}, {4, 3, 0, 10}, {4, 4, 0, 10}, {4, 5, 2, 8},
+ {4, 6, 2, 8}, {5, 1, 2, 8}, /* 6th face of 3-D object */
+ {5, 2, 2, 8}, {5, 3, 2, 8}, {5, 4, 2, 8}, {5, 5, 2, 8}, {5, 6, 2, 8},
+ {6, 1, 2, 8}, /* 7th face of 3-D object */
+ {6, 2, 2, 8}, {6, 3, 2, 8}, {6, 4, 2, 8}, {6, 5, 2, 8}, {6, 6, 2, 8},
+ {7, 1, 2, 8}, /* 8th face of 3-D object */
+ {7, 2, 2, 8}, {7, 3, 2, 8}, {7, 4, 2, 8}, {7, 5, 2, 8}, {7, 6, 2, 8}};
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j, k; /* Counters */
+ herr_t ret; /* Generic return value */
+ hsize_t npoints; /* Number of elements in selection */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions with unions of 3-D hyperslabs\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE4_DIM1 * SPACE4_DIM2 * SPACE4_DIM3);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), SPACE3_DIM1 * SPACE3_DIM2);
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE4_DIM1; i++)
+ for (j = 0; j < SPACE4_DIM2; j++)
+ for (k = 0; k < SPACE4_DIM3; k++)
+ *tbuf++ = (uint8_t)((((i * SPACE4_DIM2) + j) * SPACE4_DIM3) + k);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Test case of two blocks which overlap corners and must be split */
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE4_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 2x15x13 hyperslab for disk dataset */
+ start[0] = 1;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 2;
+ count[1] = 15;
+ count[2] = 13;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select 5x5x6 hyperslab for memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 5;
+ count[1] = 5;
+ count[2] = 6;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Union overlapping 15x20 hyperslab for memory dataset (forming a irregularly shaped region) */
+ start[0] = 2;
+ start[1] = 1;
+ start[2] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ stride[2] = 1;
+ count[0] = 6;
+ count[1] = 6;
+ count[2] = 8;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ tmp_space = H5Scombine_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(tmp_space, FAIL, "H5Sselect_hyperslab");
+
+ /* Combine dataspaces and create new dataspace */
+ tmp2_space = H5Scombine_select(sid2, H5S_SELECT_OR, tmp_space);
+ CHECK(tmp2_space, FAIL, "H5Scombin_select");
+
+ npoints = (hsize_t)H5Sget_select_npoints(tmp2_space);
+ VERIFY(npoints, 15 * 26, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE1_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, tmp2_space, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close temporary dataspaces */
+ ret = H5Sclose(tmp_space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(tmp2_space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE3_RANK, dims3, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 15x26 hyperslab for reading memory dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 15;
+ count[1] = 26;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read with data written out */
+ for (i = 0, tbuf2 = rbuf; i < (int)(sizeof(rows) / sizeof(struct row_list)); i++) {
+ tbuf = wbuf + (rows[i].z * SPACE4_DIM3 * SPACE4_DIM2) + (rows[i].y * SPACE4_DIM3) + rows[i].x;
+ for (j = 0; j < (int)rows[i].l; j++, tbuf++, tbuf2++) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n",
+ __LINE__, i, j, (int)*tbuf, (int)*tbuf2);
+ } /* end for */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_union_3d() */
+
+/****************************************************************
+**
+** test_select_hyper_valid_combination(): Tests invalid and valid
+** combinations of selections on dataspace for H5Scombine_select
+** and H5Smodify_select.
+**
+****************************************************************/
+static void
+test_select_hyper_valid_combination(void)
+{
+ hid_t single_pt_sid; /* Dataspace ID with single point selection */
+ hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */
+ hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */
+ hid_t non_existent_sid = -1; /* A non-existent space id */
+ hid_t tmp_sid; /* Temporary dataspace ID */
+ hsize_t dims2D[] = {SPACE9_DIM1, SPACE9_DIM2};
+ hsize_t dims3D[] = {SPACE4_DIM1, SPACE4_DIM2, SPACE4_DIM3};
+
+ hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */
+ hsize_t start[SPACE4_RANK]; /* Hyperslab start */
+ hsize_t stride[SPACE4_RANK]; /* Hyperslab stride */
+ hsize_t count[SPACE4_RANK]; /* Hyperslab block count */
+ hsize_t block[SPACE4_RANK]; /* Hyperslab block size */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Selection Combination Validity\n"));
+ HDassert(SPACE9_DIM2 >= POINT1_NPOINTS);
+
+ /* Create dataspace for single point selection */
+ single_pt_sid = H5Screate_simple(SPACE9_RANK, dims2D, NULL);
+ CHECK(single_pt_sid, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for multiple point selection */
+ coord1[0][0] = 2;
+ coord1[0][1] = 2;
+ ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create dataspace for single hyperslab selection */
+ single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims2D, NULL);
+ CHECK(single_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Select 10x10 hyperslab for single hyperslab selection */
+ start[0] = 1;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = (SPACE9_DIM1 - 2);
+ block[1] = (SPACE9_DIM2 - 2);
+ ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for regular hyperslab selection */
+ regular_hyper_sid = H5Screate_simple(SPACE4_RANK, dims3D, NULL);
+ CHECK(regular_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Select regular, strided hyperslab selection */
+ start[0] = 2;
+ start[1] = 2;
+ start[2] = 2;
+ stride[0] = 2;
+ stride[1] = 2;
+ stride[2] = 2;
+ count[0] = 5;
+ count[1] = 2;
+ count[2] = 5;
+ block[0] = 1;
+ block[1] = 1;
+ block[2] = 1;
+ ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Test all the selections created */
+
+ /* Test the invalid combinations between point and hyperslab */
+ H5E_BEGIN_TRY
+ {
+ tmp_sid = H5Scombine_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_sid, FAIL, "H5Scombine_select");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_sid = H5Smodify_select(single_pt_sid, H5S_SELECT_AND, single_hyper_sid);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_sid, FAIL, "H5Smodify_select");
+
+ /* Test the invalid combination between two hyperslab but of different dimension size */
+ H5E_BEGIN_TRY
+ {
+ tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_sid, FAIL, "H5Scombine_select");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, regular_hyper_sid);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_sid, FAIL, "H5Smodify_select");
+
+ /* Test invalid operation inputs to the two functions */
+ H5E_BEGIN_TRY
+ {
+ tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_sid, FAIL, "H5Scombine_select");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_SET, single_hyper_sid);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_sid, FAIL, "H5Smodify_select");
+
+ /* Test inputs in case of non-existent space ids */
+ H5E_BEGIN_TRY
+ {
+ tmp_sid = H5Scombine_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_sid, FAIL, "H5Scombine_select");
+
+ H5E_BEGIN_TRY
+ {
+ tmp_sid = H5Smodify_select(single_hyper_sid, H5S_SELECT_AND, non_existent_sid);
+ }
+ H5E_END_TRY;
+ VERIFY(tmp_sid, FAIL, "H5Smodify_select");
+
+ /* Close dataspaces */
+ ret = H5Sclose(single_pt_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(regular_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_select_hyper_valid_combination() */
+
+/****************************************************************
+**
+** test_select_hyper_and_2d(): Test basic H5S (dataspace) selection code.
+** Tests 'and' of hyperslabs in 2-D
+**
+****************************************************************/
+static void
+test_select_hyper_and_2d(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims2[] = {SPACE2A_DIM1};
+ hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ hssize_t npoints; /* Number of elements in selection */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions with intersection of 2-D hyperslabs\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 10x10 hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Intersect overlapping 10x10 hyperslab */
+ start[0] = 5;
+ start[1] = 5;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_AND, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid1);
+ VERIFY(npoints, 5 * 5, "H5Sget_select_npoints");
+
+ /* Select 25 hyperslab for memory dataset */
+ start[0] = 0;
+ stride[0] = 1;
+ count[0] = 25;
+ block[0] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 5 * 5, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read entire dataset from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++, tbuf++) {
+ if ((i >= 5 && i <= 9) && (j >= 5 && j <= 9)) {
+ if (*tbuf != *tbuf2)
+ HDprintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__,
+ i, j, (int)*tbuf, (int)*tbuf2);
+ tbuf2++;
+ } /* end if */
+ else {
+ if (*tbuf != 0)
+ HDprintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j,
+ (int)*tbuf);
+ } /* end else */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_and_2d() */
+
+/****************************************************************
+**
+** test_select_hyper_xor_2d(): Test basic H5S (dataspace) selection code.
+** Tests 'xor' of hyperslabs in 2-D
+**
+****************************************************************/
+static void
+test_select_hyper_xor_2d(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims2[] = {SPACE2A_DIM1};
+ hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ hssize_t npoints; /* Number of elements in selection */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions with XOR of 2-D hyperslabs\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 10x10 hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Intersect overlapping 10x10 hyperslab */
+ start[0] = 5;
+ start[1] = 5;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_XOR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid1);
+ VERIFY(npoints, 150, "H5Sget_select_npoints");
+
+ /* Select 25 hyperslab for memory dataset */
+ start[0] = 0;
+ stride[0] = 1;
+ count[0] = 150;
+ block[0] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 150, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read entire dataset from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++, tbuf++) {
+ if (((i >= 0 && i <= 4) && (j >= 0 && j <= 9)) ||
+ ((i >= 5 && i <= 9) && ((j >= 0 && j <= 4) || (j >= 10 && j <= 14))) ||
+ ((i >= 10 && i <= 14) && (j >= 5 && j <= 14))) {
+ if (*tbuf != *tbuf2)
+ HDprintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__,
+ i, j, (int)*tbuf, (int)*tbuf2);
+ tbuf2++;
+ } /* end if */
+ else {
+ if (*tbuf != 0)
+ HDprintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j,
+ (int)*tbuf);
+ } /* end else */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_xor_2d() */
+
+/****************************************************************
+**
+** test_select_hyper_notb_2d(): Test basic H5S (dataspace) selection code.
+** Tests 'notb' of hyperslabs in 2-D
+**
+****************************************************************/
+static void
+test_select_hyper_notb_2d(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims2[] = {SPACE2A_DIM1};
+ hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ hssize_t npoints; /* Number of elements in selection */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions with NOTB of 2-D hyperslabs\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 10x10 hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Intersect overlapping 10x10 hyperslab */
+ start[0] = 5;
+ start[1] = 5;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_NOTB, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid1);
+ VERIFY(npoints, 75, "H5Sget_select_npoints");
+
+ /* Select 75 hyperslab for memory dataset */
+ start[0] = 0;
+ stride[0] = 1;
+ count[0] = 75;
+ block[0] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 75, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read entire dataset from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++, tbuf++) {
+ if (((i >= 0 && i <= 4) && (j >= 0 && j <= 9)) || ((i >= 5 && i <= 9) && (j >= 0 && j <= 4))) {
+ if (*tbuf != *tbuf2)
+ HDprintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n", __LINE__,
+ i, j, (int)*tbuf, (int)*tbuf2);
+ tbuf2++;
+ } /* end if */
+ else {
+ if (*tbuf != 0)
+ HDprintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__, i, j,
+ (int)*tbuf);
+ } /* end else */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_notb_2d() */
+
+/****************************************************************
+**
+** test_select_hyper_nota_2d(): Test basic H5S (dataspace) selection code.
+** Tests 'nota' of hyperslabs in 2-D
+**
+****************************************************************/
+static void
+test_select_hyper_nota_2d(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE2_DIM1, SPACE2_DIM2};
+ hsize_t dims2[] = {SPACE2A_DIM1};
+ hsize_t start[SPACE2_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE2_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE2_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE2_RANK]; /* Block size of hyperslab */
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf, /* temporary buffer pointer */
+ *tbuf2; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+ hssize_t npoints; /* Number of elements in selection */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions with NOTA of 2-D hyperslabs\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE2_DIM1 * SPACE2_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)(SPACE2_DIM1 * SPACE2_DIM2));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE2_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE2_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE2A_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Select 10x10 hyperslab for disk dataset */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Intersect overlapping 10x10 hyperslab */
+ start[0] = 5;
+ start[1] = 5;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 10;
+ count[1] = 10;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_NOTA, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid1);
+ VERIFY(npoints, 75, "H5Sget_select_npoints");
+
+ /* Select 75 hyperslab for memory dataset */
+ start[0] = 0;
+ stride[0] = 1;
+ count[0] = 75;
+ block[0] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, 75, "H5Sget_select_npoints");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE2_NAME, H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write selection to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read entire dataset from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = rbuf, tbuf2 = wbuf; i < SPACE2_DIM1; i++)
+ for (j = 0; j < SPACE2_DIM2; j++, tbuf++) {
+ if (((i >= 10 && i <= 14) && (j >= 5 && j <= 14)) ||
+ ((i >= 5 && i <= 9) && (j >= 10 && j <= 14))) {
+ if (*tbuf != *tbuf2)
+ TestErrPrintf("%d: hyperslab values don't match!, i=%d, j=%d, *tbuf=%d, *tbuf2=%d\n",
+ __LINE__, i, j, (int)*tbuf, (int)*tbuf2);
+ tbuf2++;
+ } /* end if */
+ else {
+ if (*tbuf != 0)
+ TestErrPrintf("%d: hyperslab element has wrong value!, i=%d, j=%d, *tbuf=%d\n", __LINE__,
+ i, j, (int)*tbuf);
+ } /* end else */
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_nota_2d() */
+
+/****************************************************************
+**
+** test_select_hyper_iter2(): Iterator for checking hyperslab iteration
+**
+****************************************************************/
+static herr_t
+test_select_hyper_iter2(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned ndim, const hsize_t *point,
+ void *_operator_data)
+{
+ int *tbuf = (int *)_elem, /* temporary buffer pointer */
+ **tbuf2 = (int **)_operator_data; /* temporary buffer handle */
+ unsigned u; /* Local counting variable */
+
+ if (*tbuf != **tbuf2) {
+ TestErrPrintf("Error in hyperslab iteration!\n");
+ HDprintf("location: { ");
+ for (u = 0; u < ndim; u++) {
+ HDprintf("%2d", (int)point[u]);
+ if (u < (ndim - 1))
+ HDprintf(", ");
+ } /* end for */
+ HDprintf("}\n");
+ HDprintf("*tbuf=%d, **tbuf2=%d\n", *tbuf, **tbuf2);
+ return (-1);
+ } /* end if */
+ else {
+ (*tbuf2)++;
+ return (0);
+ }
+} /* end test_select_hyper_iter2() */
+
+/****************************************************************
+**
+** test_select_hyper_union_random_5d(): Test basic H5S (dataspace) selection code.
+** Tests random unions of 5-D hyperslabs
+**
+****************************************************************/
+static void
+test_select_hyper_union_random_5d(hid_t read_plist)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE5_DIM1, SPACE5_DIM2, SPACE5_DIM3, SPACE5_DIM4, SPACE5_DIM5};
+ hsize_t dims2[] = {SPACE6_DIM1};
+ hsize_t start[SPACE5_RANK]; /* Starting location of hyperslab */
+ hsize_t count[SPACE5_RANK]; /* Element count of hyperslab */
+ int *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j, k, l, m; /* Counters */
+ herr_t ret; /* Generic return value */
+ hssize_t npoints, /* Number of elements in file selection */
+ npoints2; /* Number of elements in memory selection */
+ unsigned seed; /* Random number seed for each test */
+ unsigned test_num; /* Count of tests being executed */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab Selection Functions with random unions of 5-D hyperslabs\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (int *)HDmalloc(sizeof(int) * SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (int *)HDcalloc(sizeof(int),
+ (size_t)(SPACE5_DIM1 * SPACE5_DIM2 * SPACE5_DIM3 * SPACE5_DIM4 * SPACE5_DIM5));
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE5_DIM1; i++)
+ for (j = 0; j < SPACE5_DIM2; j++)
+ for (k = 0; k < SPACE5_DIM3; k++)
+ for (l = 0; l < SPACE5_DIM4; l++)
+ for (m = 0; m < SPACE5_DIM5; m++)
+ *tbuf++ = (int)(((((((i * SPACE5_DIM2) + j) * SPACE5_DIM3) + k) * SPACE5_DIM4) + l) *
+ SPACE5_DIM5) +
+ m;
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE5_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, SPACE5_NAME, H5T_NATIVE_INT, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write entire dataset to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Create dataspace for reading buffer */
+ sid2 = H5Screate_simple(SPACE6_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Get initial random # seed */
+ seed = (unsigned)HDtime(NULL) + (unsigned)HDclock();
+
+ /* Crunch through a bunch of random hyperslab reads from the file dataset */
+ for (test_num = 0; test_num < NRAND_HYPER; test_num++) {
+ /* Save random # seed for later use */
+ /* (Used in case of errors, to regenerate the hyperslab sequence) */
+ seed += (unsigned)HDclock();
+ HDsrandom(seed);
+
+ for (i = 0; i < NHYPERSLABS; i++) {
+ /* Select random hyperslab location & size for selection */
+ for (j = 0; j < SPACE5_RANK; j++) {
+ start[j] = ((hsize_t)HDrandom() % dims1[j]);
+ count[j] = (((hsize_t)HDrandom() % (dims1[j] - start[j])) + 1);
+ } /* end for */
+
+ /* Select hyperslab */
+ ret = H5Sselect_hyperslab(sid1, (i == 0 ? H5S_SELECT_SET : H5S_SELECT_OR), start, NULL, count,
+ NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ if (ret < 0) {
+ TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed);
+ break;
+ } /* end if */
+ } /* end for */
+
+ /* Get the number of elements selected */
+ npoints = H5Sget_select_npoints(sid1);
+ CHECK(npoints, 0, "H5Sget_select_npoints");
+
+ /* Select linear 1-D hyperslab for memory dataset */
+ start[0] = 0;
+ count[0] = (hsize_t)npoints;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ npoints2 = H5Sget_select_npoints(sid2);
+ VERIFY(npoints, npoints2, "H5Sget_select_npoints");
+
+ /* Read selection from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, sid2, sid1, read_plist, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+ if (ret < 0) {
+ TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed);
+ break;
+ } /* end if */
+
+ /* Compare data read with data written out */
+ tbuf = rbuf;
+ ret = H5Diterate(wbuf, H5T_NATIVE_INT, sid1, test_select_hyper_iter2, &tbuf);
+ if (ret < 0) {
+ TestErrPrintf("Random hyperslabs for seed %u failed!\n", seed);
+ break;
+ } /* end if */
+
+ /* Set the read buffer back to all zeroes */
+ HDmemset(rbuf, 0, (size_t)SPACE6_DIM1);
+ } /* end for */
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_union_random_5d() */
+
+/****************************************************************
+**
+** test_select_hyper_chunk(): Test basic H5S (dataspace) selection code.
+** Tests large hyperslab selection in chunked dataset
+**
+****************************************************************/
+static void
+test_select_hyper_chunk(hid_t fapl_plist, hid_t xfer_plist)
+{
+ hsize_t dimsf[3]; /* dataset dimensions */
+ hsize_t chunk_dimsf[3] = {CHUNK_X, CHUNK_Y, CHUNK_Z}; /* chunk sizes */
+ short *data; /* data to write */
+ short *tmpdata; /* data to write */
+
+ /*
+ * Data and output buffer initialization.
+ */
+ hid_t file, dataset; /* handles */
+ hid_t dataspace;
+ hid_t memspace;
+ hid_t plist;
+ hsize_t dimsm[3]; /* memory space dimensions */
+ hsize_t dims_out[3]; /* dataset dimensions */
+ herr_t status;
+
+ short *data_out; /* output buffer */
+ short *tmpdata_out; /* output buffer */
+
+ hsize_t count[3]; /* size of the hyperslab in the file */
+ hsize_t offset[3]; /* hyperslab offset in the file */
+ hsize_t count_out[3]; /* size of the hyperslab in memory */
+ hsize_t offset_out[3]; /* hyperslab offset in memory */
+ int i, j, k, status_n, rank;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Hyperslab I/O on Large Chunks\n"));
+
+ /* Allocate the transfer buffers */
+ data = (short *)HDmalloc(sizeof(short) * X * Y * Z);
+ CHECK_PTR(data, "HDmalloc");
+ data_out = (short *)HDcalloc((size_t)(NX * NY * NZ), sizeof(short));
+ CHECK_PTR(data_out, "HDcalloc");
+
+ /*
+ * Data buffer initialization.
+ */
+ tmpdata = data;
+ for (j = 0; j < X; j++)
+ for (i = 0; i < Y; i++)
+ for (k = 0; k < Z; k++)
+ *tmpdata++ = (short)((k + 1) % 256);
+
+ /*
+ * Create a new file using H5F_ACC_TRUNC access,
+ * the default file creation properties, and the default file
+ * access properties.
+ */
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_plist);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /*
+ * Describe the size of the array and create the dataspace for fixed
+ * size dataset.
+ */
+ dimsf[0] = X;
+ dimsf[1] = Y;
+ dimsf[2] = Z;
+ dataspace = H5Screate_simple(RANK_F, dimsf, NULL);
+ CHECK(dataspace, FAIL, "H5Screate_simple");
+
+ /*
+ * Create a new dataset within the file using defined dataspace and
+ * chunking properties.
+ */
+ plist = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plist, FAIL, "H5Pcreate");
+ status = H5Pset_chunk(plist, RANK_F, chunk_dimsf);
+ CHECK(status, FAIL, "H5Pset_chunk");
+ dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, plist, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /*
+ * Define hyperslab in the dataset.
+ */
+ offset[0] = 0;
+ offset[1] = 0;
+ offset[2] = 0;
+ count[0] = NX_SUB;
+ count[1] = NY_SUB;
+ count[2] = NZ_SUB;
+ status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL);
+ CHECK(status, FAIL, "H5Sselect_hyperslab");
+
+ /*
+ * Define the memory dataspace.
+ */
+ dimsm[0] = NX;
+ dimsm[1] = NY;
+ dimsm[2] = NZ;
+ memspace = H5Screate_simple(RANK_M, dimsm, NULL);
+ CHECK(memspace, FAIL, "H5Screate_simple");
+
+ /*
+ * Define memory hyperslab.
+ */
+ offset_out[0] = 0;
+ offset_out[1] = 0;
+ offset_out[2] = 0;
+ count_out[0] = NX_SUB;
+ count_out[1] = NY_SUB;
+ count_out[2] = NZ_SUB;
+ status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count_out, NULL);
+ CHECK(status, FAIL, "H5Sselect_hyperslab");
+
+ /*
+ * Write the data to the dataset using hyperslabs
+ */
+ status = H5Dwrite(dataset, H5T_NATIVE_SHORT, memspace, dataspace, xfer_plist, data);
+ CHECK(status, FAIL, "H5Dwrite");
+
+ /*
+ * Close/release resources.
+ */
+ status = H5Pclose(plist);
+ CHECK(status, FAIL, "H5Pclose");
+ status = H5Sclose(dataspace);
+ CHECK(status, FAIL, "H5Sclose");
+ status = H5Sclose(memspace);
+ CHECK(status, FAIL, "H5Sclose");
+ status = H5Dclose(dataset);
+ CHECK(status, FAIL, "H5Dclose");
+ status = H5Fclose(file);
+ CHECK(status, FAIL, "H5Fclose");
+
+ /*************************************************************
+
+ This reads the hyperslab from the test.h5 file just
+ created, into a 3-dimensional plane of the 3-dimensional
+ array.
+
+ ************************************************************/
+
+ /*
+ * Open the file and the dataset.
+ */
+ file = H5Fopen(FILENAME, H5F_ACC_RDONLY, fapl_plist);
+ CHECK(file, FAIL, "H5Fopen");
+ dataset = H5Dopen2(file, DATASETNAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ dataspace = H5Dget_space(dataset); /* dataspace handle */
+ CHECK(dataspace, FAIL, "H5Dget_space");
+ rank = H5Sget_simple_extent_ndims(dataspace);
+ VERIFY(rank, 3, "H5Sget_simple_extent_ndims");
+ status_n = H5Sget_simple_extent_dims(dataspace, dims_out, NULL);
+ CHECK(status_n, FAIL, "H5Sget_simple_extent_dims");
+ VERIFY(dims_out[0], dimsf[0], "Dataset dimensions");
+ VERIFY(dims_out[1], dimsf[1], "Dataset dimensions");
+ VERIFY(dims_out[2], dimsf[2], "Dataset dimensions");
+
+ /*
+ * Define hyperslab in the dataset.
+ */
+ offset[0] = 0;
+ offset[1] = 0;
+ offset[2] = 0;
+ count[0] = NX_SUB;
+ count[1] = NY_SUB;
+ count[2] = NZ_SUB;
+ status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, NULL, count, NULL);
+ CHECK(status, FAIL, "H5Sselect_hyperslab");
+
+ /*
+ * Define the memory dataspace.
+ */
+ dimsm[0] = NX;
+ dimsm[1] = NY;
+ dimsm[2] = NZ;
+ memspace = H5Screate_simple(RANK_M, dimsm, NULL);
+ CHECK(memspace, FAIL, "H5Screate_simple");
+
+ /*
+ * Define memory hyperslab.
+ */
+ offset_out[0] = 0;
+ offset_out[1] = 0;
+ offset_out[2] = 0;
+ count_out[0] = NX_SUB;
+ count_out[1] = NY_SUB;
+ count_out[2] = NZ_SUB;
+ status = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, offset_out, NULL, count_out, NULL);
+ CHECK(status, FAIL, "H5Sselect_hyperslab");
+
+ /*
+ * Read data from hyperslab in the file into the hyperslab in
+ * memory and display.
+ */
+ status = H5Dread(dataset, H5T_NATIVE_SHORT, memspace, dataspace, xfer_plist, data_out);
+ CHECK(status, FAIL, "H5Dread");
+
+ /* Compare data written with data read in */
+ tmpdata = data;
+ tmpdata_out = data_out;
+ for (j = 0; j < X; j++)
+ for (i = 0; i < Y; i++)
+ for (k = 0; k < Z; k++, tmpdata++, tmpdata_out++) {
+ if (*tmpdata != *tmpdata_out)
+ TestErrPrintf("Line %d: Error! j=%d, i=%d, k=%d, *tmpdata=%x, *tmpdata_out=%x\n",
+ __LINE__, j, i, k, (unsigned)*tmpdata, (unsigned)*tmpdata_out);
+ } /* end for */
+
+ /*
+ * Close and release resources.
+ */
+ status = H5Dclose(dataset);
+ CHECK(status, FAIL, "H5Dclose");
+ status = H5Sclose(dataspace);
+ CHECK(status, FAIL, "H5Sclose");
+ status = H5Sclose(memspace);
+ CHECK(status, FAIL, "H5Sclose");
+ status = H5Fclose(file);
+ CHECK(status, FAIL, "H5Fclose");
+ HDfree(data);
+ HDfree(data_out);
+} /* test_select_hyper_chunk() */
+
+/****************************************************************
+**
+** test_select_point_chunk(): Test basic H5S (dataspace) selection code.
+** Tests combinations of hyperslab and point selections on
+** chunked datasets.
+**
+****************************************************************/
+static void
+test_select_point_chunk(void)
+{
+ hsize_t dimsf[SPACE7_RANK]; /* dataset dimensions */
+ hsize_t chunk_dimsf[SPACE7_RANK] = {SPACE7_CHUNK_DIM1, SPACE7_CHUNK_DIM2}; /* chunk sizes */
+ unsigned *data; /* data to write */
+ unsigned *tmpdata; /* data to write */
+
+ /*
+ * Data and output buffer initialization.
+ */
+ hid_t file, dataset; /* handles */
+ hid_t dataspace;
+ hid_t pnt1_space; /* Dataspace to hold 1st point selection */
+ hid_t pnt2_space; /* Dataspace to hold 2nd point selection */
+ hid_t hyp1_space; /* Dataspace to hold 1st hyperslab selection */
+ hid_t hyp2_space; /* Dataspace to hold 2nd hyperslab selection */
+ hid_t dcpl;
+ herr_t ret; /* Generic return value */
+
+ unsigned *data_out; /* output buffer */
+
+ hsize_t start[SPACE7_RANK]; /* hyperslab offset */
+ hsize_t count[SPACE7_RANK]; /* size of the hyperslab */
+ hsize_t points[SPACE7_NPOINTS][SPACE7_RANK]; /* points for selection */
+ unsigned i, j; /* Local index variables */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Point Selections on Chunked Datasets\n"));
+
+ /* Allocate the transfer buffers */
+ data = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(data, "HDmalloc");
+ data_out = (unsigned *)HDcalloc((size_t)(SPACE7_DIM1 * SPACE7_DIM2), sizeof(unsigned));
+ CHECK_PTR(data_out, "HDcalloc");
+
+ /*
+ * Data buffer initialization.
+ */
+ tmpdata = data;
+ for (i = 0; i < SPACE7_DIM1; i++)
+ for (j = 0; j < SPACE7_DIM1; j++)
+ *tmpdata++ = ((i * SPACE7_DIM2) + j) % 256;
+
+ /*
+ * Create a new file using H5F_ACC_TRUNC access,
+ * the default file creation properties and file
+ * access properties.
+ */
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create file dataspace */
+ dimsf[0] = SPACE7_DIM1;
+ dimsf[1] = SPACE7_DIM2;
+ dataspace = H5Screate_simple(SPACE7_RANK, dimsf, NULL);
+ CHECK(dataspace, FAIL, "H5Screate_simple");
+
+ /*
+ * Create a new dataset within the file using defined dataspace and
+ * chunking properties.
+ */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl, SPACE7_RANK, chunk_dimsf);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+ dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Create 1st point selection */
+ pnt1_space = H5Scopy(dataspace);
+ CHECK(pnt1_space, FAIL, "H5Scopy");
+
+ points[0][0] = 3;
+ points[0][1] = 3;
+ points[1][0] = 3;
+ points[1][1] = 8;
+ points[2][0] = 8;
+ points[2][1] = 3;
+ points[3][0] = 8;
+ points[3][1] = 8;
+ points[4][0] = 1; /* In same chunk as point #0, but "earlier" in chunk */
+ points[4][1] = 1;
+ points[5][0] = 1; /* In same chunk as point #1, but "earlier" in chunk */
+ points[5][1] = 6;
+ points[6][0] = 6; /* In same chunk as point #2, but "earlier" in chunk */
+ points[6][1] = 1;
+ points[7][0] = 6; /* In same chunk as point #3, but "earlier" in chunk */
+ points[7][1] = 6;
+ ret = H5Sselect_elements(pnt1_space, H5S_SELECT_SET, (size_t)SPACE7_NPOINTS, (const hsize_t *)points);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create 1st hyperslab selection */
+ hyp1_space = H5Scopy(dataspace);
+ CHECK(hyp1_space, FAIL, "H5Scopy");
+
+ start[0] = 2;
+ start[1] = 2;
+ count[0] = 4;
+ count[1] = 2;
+ ret = H5Sselect_hyperslab(hyp1_space, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write out data using 1st point selection for file & hyperslab for memory */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UINT, hyp1_space, pnt1_space, H5P_DEFAULT, data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Create 2nd point selection */
+ pnt2_space = H5Scopy(dataspace);
+ CHECK(pnt2_space, FAIL, "H5Scopy");
+
+ points[0][0] = 4;
+ points[0][1] = 4;
+ points[1][0] = 4;
+ points[1][1] = 9;
+ points[2][0] = 9;
+ points[2][1] = 4;
+ points[3][0] = 9;
+ points[3][1] = 9;
+ points[4][0] = 2; /* In same chunk as point #0, but "earlier" in chunk */
+ points[4][1] = 2;
+ points[5][0] = 2; /* In same chunk as point #1, but "earlier" in chunk */
+ points[5][1] = 7;
+ points[6][0] = 7; /* In same chunk as point #2, but "earlier" in chunk */
+ points[6][1] = 2;
+ points[7][0] = 7; /* In same chunk as point #3, but "earlier" in chunk */
+ points[7][1] = 7;
+ ret = H5Sselect_elements(pnt2_space, H5S_SELECT_SET, (size_t)SPACE7_NPOINTS, (const hsize_t *)points);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create 2nd hyperslab selection */
+ hyp2_space = H5Scopy(dataspace);
+ CHECK(hyp2_space, FAIL, "H5Scopy");
+
+ start[0] = 2;
+ start[1] = 4;
+ count[0] = 4;
+ count[1] = 2;
+ ret = H5Sselect_hyperslab(hyp2_space, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write out data using 2nd hyperslab selection for file & point for memory */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UINT, pnt2_space, hyp2_space, H5P_DEFAULT, data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close everything (except selections) */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Sclose(dataspace);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open file & dataset */
+ file = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fopen");
+ dataset = H5Dopen2(file, DATASETNAME, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Read data using 1st point selection for file and hyperslab for memory */
+ ret = H5Dread(dataset, H5T_NATIVE_UINT, hyp1_space, pnt1_space, H5P_DEFAULT, data_out);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify data (later) */
+
+ /* Read data using 2nd hyperslab selection for file and point for memory */
+ ret = H5Dread(dataset, H5T_NATIVE_UINT, pnt2_space, hyp2_space, H5P_DEFAULT, data_out);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify data (later) */
+
+ /* Close everything (including selections) */
+ ret = H5Sclose(pnt1_space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(pnt2_space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(hyp1_space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(hyp2_space);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ HDfree(data);
+ HDfree(data_out);
+} /* test_select_point_chunk() */
+
+/****************************************************************
+**
+** test_select_sclar_chunk(): Test basic H5S (dataspace) selection code.
+** Tests using a scalar dataspace (in memory) to access chunked datasets.
+**
+****************************************************************/
+static void
+test_select_scalar_chunk(void)
+{
+ hid_t file_id; /* File ID */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t dsid; /* Dataset ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t m_sid; /* Memory dataspace */
+ hsize_t dims[] = {2}; /* Dataset dimensions */
+ hsize_t maxdims[] = {H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t offset[] = {0}; /* Hyperslab start */
+ hsize_t count[] = {1}; /* Hyperslab count */
+ unsigned data = 2; /* Data to write */
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Scalar Dataspaces and Chunked Datasets\n"));
+
+ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ dims[0] = 1024U;
+ ret = H5Pset_chunk(dcpl, 1, dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create 1-D dataspace */
+ sid = H5Screate_simple(1, dims, maxdims);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ dsid = H5Dcreate2(file_id, "dset", H5T_NATIVE_UINT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dsid, FAIL, "H5Dcreate2");
+
+ /* Select scalar area (offset 0, count 1) */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create scalar memory dataspace */
+ m_sid = H5Screate(H5S_SCALAR);
+ CHECK(m_sid, FAIL, "H5Screate");
+
+ /* Write out data using scalar dataspace for memory dataspace */
+ ret = H5Dwrite(dsid, H5T_NATIVE_UINT, m_sid, sid, H5P_DEFAULT, &data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close resources */
+ ret = H5Sclose(m_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(dsid);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_select_scalar_chunk() */
+
+/****************************************************************
+**
+** test_select_valid(): Test basic H5S (dataspace) selection code.
+** Tests selection validity
+**
+****************************************************************/
+static void
+test_select_valid(void)
+{
+ herr_t error;
+ htri_t valid;
+ hid_t main_space, sub_space;
+ hsize_t safe_start[2] = {1, 1};
+ hsize_t safe_count[2] = {1, 1};
+ hsize_t start[2];
+ hsize_t dims[2], maxdims[2], size[2], count[2];
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Selection Validity\n"));
+
+ MESSAGE(8, ("Case 1 : sub_space is not a valid dataspace\n"));
+ dims[0] = dims[1] = H5S_UNLIMITED;
+
+ H5E_BEGIN_TRY
+ {
+ sub_space = H5Screate_simple(2, dims, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(sub_space, FAIL, "H5Screate_simple");
+
+ H5E_BEGIN_TRY
+ {
+ valid = H5Sselect_valid(sub_space);
+ }
+ H5E_END_TRY;
+ VERIFY(valid, FAIL, "H5Sselect_valid");
+
+ /* Set arrays and dataspace for the rest of the cases */
+ count[0] = count[1] = 1;
+ dims[0] = dims[1] = maxdims[0] = maxdims[1] = 10;
+
+ main_space = H5Screate_simple(2, dims, maxdims);
+ CHECK(main_space, FAIL, "H5Screate_simple");
+
+ MESSAGE(8, ("Case 2 : sub_space is a valid but closed dataspace\n"));
+ sub_space = H5Scopy(main_space);
+ CHECK(sub_space, FAIL, "H5Scopy");
+
+ error = H5Sclose(sub_space);
+ CHECK(error, FAIL, "H5Sclose");
+
+ H5E_BEGIN_TRY
+ {
+ valid = H5Sselect_valid(sub_space);
+ }
+ H5E_END_TRY;
+ VERIFY(valid, FAIL, "H5Sselect_valid");
+
+ MESSAGE(8, ("Case 3 : in the dimensions\nTry offset (4,4) and size(6,6), the original space is of size "
+ "(10,10)\n"));
+ start[0] = start[1] = 4;
+ size[0] = size[1] = 6;
+
+ sub_space = H5Scopy(main_space);
+ CHECK(sub_space, FAIL, "H5Scopy");
+
+ error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ valid = H5Sselect_valid(sub_space);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ valid = H5Sselect_valid(sub_space);
+ VERIFY(valid, TRUE, "H5Sselect_valid");
+
+ error = H5Sclose(sub_space);
+ CHECK(error, FAIL, "H5Sclose");
+
+ MESSAGE(8, ("Case 4 : exceed dimensions by 1\nTry offset (5,5) and size(6,6), the original space is of "
+ "size (10,10)\n"));
+ start[0] = start[1] = 5;
+ size[0] = size[1] = 6;
+
+ sub_space = H5Scopy(main_space);
+ CHECK(sub_space, FAIL, "H5Scopy");
+
+ error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ valid = H5Sselect_valid(sub_space);
+ VERIFY(valid, FALSE, "H5Sselect_valid");
+
+ error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ valid = H5Sselect_valid(sub_space);
+ VERIFY(valid, FALSE, "H5Sselect_valid");
+
+ error = H5Sclose(sub_space);
+ CHECK(error, FAIL, "H5Sclose");
+
+ MESSAGE(8, ("Case 5 : exceed dimensions by 2\nTry offset (6,6) and size(6,6), the original space is of "
+ "size (10,10)\n"));
+ start[0] = start[1] = 6;
+ size[0] = size[1] = 6;
+
+ sub_space = H5Scopy(main_space);
+ CHECK(sub_space, FAIL, "H5Scopy");
+
+ error = H5Sselect_hyperslab(sub_space, H5S_SELECT_SET, start, size, count, size);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ valid = H5Sselect_valid(sub_space);
+ VERIFY(valid, FALSE, "H5Sselect_valid");
+
+ error = H5Sselect_hyperslab(sub_space, H5S_SELECT_OR, safe_start, NULL, safe_count, NULL);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ valid = H5Sselect_valid(sub_space);
+ VERIFY(valid, FALSE, "H5Sselect_valid");
+
+ error = H5Sclose(sub_space);
+ CHECK(error, FAIL, "H5Sclose");
+ error = H5Sclose(main_space);
+ CHECK(error, FAIL, "H5Sclose");
+} /* test_select_valid() */
+
+/****************************************************************
+**
+** test_select_combine(): Test basic H5S (dataspace) selection code.
+** Tests combining "all" and "none" selections with hyperslab
+** operations.
+**
+****************************************************************/
+static void
+test_select_combine(void)
+{
+ hid_t base_id; /* Base dataspace for test */
+ hid_t all_id; /* Dataspace for "all" selection */
+ hid_t none_id; /* Dataspace for "none" selection */
+ hid_t space1; /* Temporary dataspace #1 */
+ hsize_t start[SPACE7_RANK]; /* Hyperslab start */
+ hsize_t stride[SPACE7_RANK]; /* Hyperslab stride */
+ hsize_t count[SPACE7_RANK]; /* Hyperslab count */
+ hsize_t block[SPACE7_RANK]; /* Hyperslab block */
+ hsize_t dims[SPACE7_RANK] = {SPACE7_DIM1, SPACE7_DIM2}; /* Dimensions of dataspace */
+ H5S_sel_type sel_type; /* Selection type */
+ hssize_t nblocks; /* Number of hyperslab blocks */
+ hsize_t blocks[16][2][SPACE7_RANK]; /* List of blocks */
+ herr_t error;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Selection Combinations\n"));
+
+ /* Create dataspace for dataset on disk */
+ base_id = H5Screate_simple(SPACE7_RANK, dims, NULL);
+ CHECK(base_id, FAIL, "H5Screate_simple");
+
+ /* Copy base dataspace and set selection to "all" */
+ all_id = H5Scopy(base_id);
+ CHECK(all_id, FAIL, "H5Scopy");
+ error = H5Sselect_all(all_id);
+ CHECK(error, FAIL, "H5Sselect_all");
+ sel_type = H5Sget_select_type(all_id);
+ VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type");
+
+ /* Copy base dataspace and set selection to "none" */
+ none_id = H5Scopy(base_id);
+ CHECK(none_id, FAIL, "H5Scopy");
+ error = H5Sselect_none(none_id);
+ CHECK(error, FAIL, "H5Sselect_none");
+ sel_type = H5Sget_select_type(none_id);
+ VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type");
+
+ /* Copy "all" selection & space */
+ space1 = H5Scopy(all_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'OR' "all" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that it's still "all" selection */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_ALL, "H5Sget_select_type");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "all" selection & space */
+ space1 = H5Scopy(all_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'AND' "all" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_AND, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is the same at the original block */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type");
+
+ /* Verify that there is only one block */
+ nblocks = H5Sget_select_hyper_nblocks(space1);
+ VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks");
+
+ /* Retrieve the block defined */
+ HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */
+ error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks);
+ CHECK(error, FAIL, "H5Sget_select_hyper_blocklist");
+
+ /* Verify that the correct block is defined */
+ VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "all" selection & space */
+ space1 = H5Scopy(all_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'XOR' "all" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_XOR, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is an inversion of the original block */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type");
+
+ /* Verify that there are two blocks */
+ nblocks = H5Sget_select_hyper_nblocks(space1);
+ VERIFY(nblocks, 2, "H5Sget_select_hyper_nblocks");
+
+ /* Retrieve the block defined */
+ HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */
+ error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks);
+ CHECK(error, FAIL, "H5Sget_select_hyper_blocklist");
+
+ /* Verify that the correct block is defined */
+ VERIFY(blocks[0][0][0], 0, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][0][1], 5, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][0], 4, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][1], 9, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[1][0][0], 5, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[1][0][1], 0, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[1][1][0], 9, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[1][1][1], 9, "H5Sget_select_hyper_blocklist");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "all" selection & space */
+ space1 = H5Scopy(all_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'NOTB' "all" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTB, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is an inversion of the original block */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type");
+
+ /* Verify that there are two blocks */
+ nblocks = H5Sget_select_hyper_nblocks(space1);
+ VERIFY(nblocks, 2, "H5Sget_select_hyper_nblocks");
+
+ /* Retrieve the block defined */
+ HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */
+ error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks);
+ CHECK(error, FAIL, "H5Sget_select_hyper_blocklist");
+
+ /* Verify that the correct block is defined */
+ VERIFY(blocks[0][0][0], 0, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][0][1], 5, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][0], 4, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][1], 9, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[1][0][0], 5, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[1][0][1], 0, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[1][1][0], 9, "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[1][1][1], 9, "H5Sget_select_hyper_blocklist");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "all" selection & space */
+ space1 = H5Scopy(all_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'NOTA' "all" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTA, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is the "none" selection */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "none" selection & space */
+ space1 = H5Scopy(none_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'OR' "none" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is the same as the original hyperslab */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type");
+
+ /* Verify that there is only one block */
+ nblocks = H5Sget_select_hyper_nblocks(space1);
+ VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks");
+
+ /* Retrieve the block defined */
+ HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */
+ error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks);
+ CHECK(error, FAIL, "H5Sget_select_hyper_blocklist");
+
+ /* Verify that the correct block is defined */
+ VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "none" selection & space */
+ space1 = H5Scopy(none_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'AND' "none" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_AND, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is the "none" selection */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "none" selection & space */
+ space1 = H5Scopy(none_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'XOR' "none" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_XOR, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is the same as the original hyperslab */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type");
+
+ /* Verify that there is only one block */
+ nblocks = H5Sget_select_hyper_nblocks(space1);
+ VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks");
+
+ /* Retrieve the block defined */
+ HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */
+ error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks);
+ CHECK(error, FAIL, "H5Sget_select_hyper_blocklist");
+
+ /* Verify that the correct block is defined */
+ VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "none" selection & space */
+ space1 = H5Scopy(none_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'NOTB' "none" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTB, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is the "none" selection */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Copy "none" selection & space */
+ space1 = H5Scopy(none_id);
+ CHECK(space1, FAIL, "H5Scopy");
+
+ /* 'NOTA' "none" selection with another hyperslab */
+ start[0] = start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 1;
+ block[0] = block[1] = 5;
+ error = H5Sselect_hyperslab(space1, H5S_SELECT_NOTA, start, stride, count, block);
+ CHECK(error, FAIL, "H5Sselect_hyperslab");
+
+ /* Verify that the new selection is the same as the original hyperslab */
+ sel_type = H5Sget_select_type(space1);
+ VERIFY(sel_type, H5S_SEL_HYPERSLABS, "H5Sget_select_type");
+
+ /* Verify that there is only one block */
+ nblocks = H5Sget_select_hyper_nblocks(space1);
+ VERIFY(nblocks, 1, "H5Sget_select_hyper_nblocks");
+
+ /* Retrieve the block defined */
+ HDmemset(blocks, -1, sizeof(blocks)); /* Reset block list */
+ error = H5Sget_select_hyper_blocklist(space1, (hsize_t)0, (hsize_t)nblocks, (hsize_t *)blocks);
+ CHECK(error, FAIL, "H5Sget_select_hyper_blocklist");
+
+ /* Verify that the correct block is defined */
+ VERIFY(blocks[0][0][0], (hsize_t)start[0], "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][0][1], (hsize_t)start[1], "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][0], (block[0] - 1), "H5Sget_select_hyper_blocklist");
+ VERIFY(blocks[0][1][1], (block[1] - 1), "H5Sget_select_hyper_blocklist");
+
+ /* Close temporary dataspace */
+ error = H5Sclose(space1);
+ CHECK(error, FAIL, "H5Sclose");
+
+ /* Close dataspaces */
+ error = H5Sclose(base_id);
+ CHECK(error, FAIL, "H5Sclose");
+
+ error = H5Sclose(all_id);
+ CHECK(error, FAIL, "H5Sclose");
+
+ error = H5Sclose(none_id);
+ CHECK(error, FAIL, "H5Sclose");
+} /* test_select_combine() */
+
+/*
+ * Typedef for iteration structure used in the fill value tests
+ */
+typedef struct {
+ unsigned short fill_value; /* The fill value to check */
+ size_t curr_coord; /* Current coordinate to examine */
+ hsize_t *coords; /* Pointer to selection's coordinates */
+} fill_iter_info;
+
+/****************************************************************
+**
+** test_select_hyper_iter3(): Iterator for checking hyperslab iteration
+**
+****************************************************************/
+static herr_t
+test_select_hyper_iter3(void *_elem, hid_t H5_ATTR_UNUSED type_id, unsigned ndim, const hsize_t *point,
+ void *_operator_data)
+{
+ unsigned *tbuf = (unsigned *)_elem; /* temporary buffer pointer */
+ fill_iter_info *iter_info =
+ (fill_iter_info *)_operator_data; /* Get the pointer to the iterator information */
+ hsize_t *coord_ptr; /* Pointer to the coordinate information for a point*/
+
+ /* Check value in current buffer location */
+ if (*tbuf != iter_info->fill_value)
+ return (-1);
+ else {
+ /* Check number of dimensions */
+ if (ndim != SPACE7_RANK)
+ return (-1);
+ else {
+ /* Check Coordinates */
+ coord_ptr = iter_info->coords + (2 * iter_info->curr_coord);
+ iter_info->curr_coord++;
+ if (coord_ptr[0] != point[0])
+ return (-1);
+ else if (coord_ptr[1] != point[1])
+ return (-1);
+ else
+ return (0);
+ } /* end else */
+ } /* end else */
+} /* end test_select_hyper_iter3() */
+
+/****************************************************************
+**
+** test_select_fill_all(): Test basic H5S (dataspace) selection code.
+** Tests filling "all" selections
+**
+****************************************************************/
+static void
+test_select_fill_all(void)
+{
+ hid_t sid1; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
+ unsigned fill_value; /* Fill value */
+ fill_iter_info iter_info; /* Iterator information structure */
+ hsize_t points[SPACE7_DIM1 * SPACE7_DIM2][SPACE7_RANK]; /* Coordinates of selection */
+ unsigned *wbuf, /* buffer to write to disk */
+ *tbuf; /* temporary buffer pointer */
+ unsigned u, v; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Filling 'all' Selections\n"));
+
+ /* Allocate memory buffer */
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+
+ /* Initialize memory buffer */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (u * SPACE7_DIM2) + v;
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Space defaults to "all" selection */
+
+ /* Set fill value */
+ fill_value = SPACE7_FILL;
+
+ /* Fill selection in memory */
+ ret = H5Dfill(&fill_value, H5T_NATIVE_UINT, wbuf, H5T_NATIVE_UINT, sid1);
+ CHECK(ret, FAIL, "H5Dfill");
+
+ /* Verify memory buffer the hard way... */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++)
+ if (*tbuf != fill_value)
+ TestErrPrintf("Error! v=%d, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf, fill_value);
+
+ /* Set the coordinates of the selection */
+ for (u = 0; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++) {
+ points[(u * SPACE7_DIM2) + v][0] = u;
+ points[(u * SPACE7_DIM2) + v][1] = v;
+ } /* end for */
+
+ /* Initialize the iterator structure */
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)points;
+
+ /* Iterate through selection, verifying correct data */
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+} /* test_select_fill_all() */
+
+/****************************************************************
+**
+** test_select_fill_point(): Test basic H5S (dataspace) selection code.
+** Tests filling "point" selections
+**
+****************************************************************/
+static void
+test_select_fill_point(hssize_t *offset)
+{
+ hid_t sid1; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */
+ hsize_t points[5][SPACE7_RANK] = {{2, 4}, {3, 8}, {8, 4}, {7, 5}, {7, 7}};
+ size_t num_points = 5; /* Number of points selected */
+ int fill_value; /* Fill value */
+ fill_iter_info iter_info; /* Iterator information structure */
+ unsigned *wbuf, /* buffer to write to disk */
+ *tbuf; /* temporary buffer pointer */
+ unsigned u, v, w; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Filling 'point' Selections\n"));
+
+ /* Allocate memory buffer */
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+
+ /* Initialize memory buffer */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (unsigned short)(u * SPACE7_DIM2) + v;
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Select "point" selection */
+ ret = H5Sselect_elements(sid1, H5S_SELECT_SET, num_points, (const hsize_t *)points);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ if (offset != NULL) {
+ HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t));
+
+ /* Set offset, if provided */
+ ret = H5Soffset_simple(sid1, real_offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ } /* end if */
+ else
+ HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t));
+
+ /* Set fill value */
+ fill_value = SPACE7_FILL;
+
+ /* Fill selection in memory */
+ ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1);
+ CHECK(ret, FAIL, "H5Dfill");
+
+ /* Verify memory buffer the hard way... */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++, tbuf++) {
+ for (w = 0; w < (unsigned)num_points; w++) {
+ if (u == (unsigned)(points[w][0] + (hsize_t)real_offset[0]) &&
+ v == (unsigned)(points[w][1] + (hsize_t)real_offset[1])) {
+ if (*tbuf != (unsigned)fill_value)
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf,
+ (unsigned)fill_value);
+ break;
+ } /* end if */
+ } /* end for */
+ if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v))
+ TestErrPrintf("Error! v=%d, u=%d, *tbuf=%u, should be: %u\n", v, u, *tbuf,
+ ((u * SPACE7_DIM2) + v));
+ } /* end for */
+
+ /* Initialize the iterator structure */
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)points;
+
+ /* Add in the offset */
+ for (u = 0; u < (unsigned)num_points; u++) {
+ points[u][0] = (hsize_t)((hssize_t)points[u][0] + real_offset[0]);
+ points[u][1] = (hsize_t)((hssize_t)points[u][1] + real_offset[1]);
+ } /* end for */
+
+ /* Iterate through selection, verifying correct data */
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+} /* test_select_fill_point() */
+
+/****************************************************************
+**
+** test_select_fill_hyper_simple(): Test basic H5S (dataspace) selection code.
+** Tests filling "simple" (i.e. one block) hyperslab selections
+**
+****************************************************************/
+static void
+test_select_fill_hyper_simple(hssize_t *offset)
+{
+ hid_t sid1; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */
+ hsize_t start[SPACE7_RANK]; /* Hyperslab start */
+ hsize_t count[SPACE7_RANK]; /* Hyperslab block size */
+ size_t num_points; /* Number of points in selection */
+ hsize_t points[16][SPACE7_RANK]; /* Coordinates selected */
+ int fill_value; /* Fill value */
+ fill_iter_info iter_info; /* Iterator information structure */
+ unsigned *wbuf, /* buffer to write to disk */
+ *tbuf; /* temporary buffer pointer */
+ unsigned u, v; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Filling Simple 'hyperslab' Selections\n"));
+
+ /* Allocate memory buffer */
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+
+ /* Initialize memory buffer */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (unsigned short)(u * SPACE7_DIM2) + v;
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Select "hyperslab" selection */
+ start[0] = 3;
+ start[1] = 3;
+ count[0] = 4;
+ count[1] = 4;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ if (offset != NULL) {
+ HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t));
+
+ /* Set offset, if provided */
+ ret = H5Soffset_simple(sid1, real_offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ } /* end if */
+ else
+ HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t));
+
+ /* Set fill value */
+ fill_value = SPACE7_FILL;
+
+ /* Fill selection in memory */
+ ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1);
+ CHECK(ret, FAIL, "H5Dfill");
+
+ /* Verify memory buffer the hard way... */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++, tbuf++) {
+ if ((u >= (unsigned)((hssize_t)start[0] + real_offset[0]) &&
+ u < (unsigned)((hssize_t)(start[0] + count[0]) + real_offset[0])) &&
+ (v >= (unsigned)((hssize_t)start[1] + real_offset[1]) &&
+ v < (unsigned)((hssize_t)(start[1] + count[1]) + real_offset[1]))) {
+ if (*tbuf != (unsigned)fill_value)
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf,
+ (unsigned)fill_value);
+ } /* end if */
+ else {
+ if (*tbuf != ((unsigned)(u * SPACE7_DIM2) + v))
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, should be: %u\n", v, u, *tbuf,
+ ((u * SPACE7_DIM2) + v));
+ } /* end else */
+ } /* end for */
+
+ /* Initialize the iterator structure */
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)points;
+
+ /* Set the coordinates of the selection (with the offset) */
+ for (u = 0, num_points = 0; u < (unsigned)count[0]; u++)
+ for (v = 0; v < (unsigned)count[1]; v++, num_points++) {
+ points[num_points][0] = (hsize_t)((hssize_t)(u + start[0]) + real_offset[0]);
+ points[num_points][1] = (hsize_t)((hssize_t)(v + start[1]) + real_offset[1]);
+ } /* end for */
+
+ /* Iterate through selection, verifying correct data */
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+} /* test_select_fill_hyper_simple() */
+
+/****************************************************************
+**
+** test_select_fill_hyper_regular(): Test basic H5S (dataspace) selection code.
+** Tests filling "regular" (i.e. strided block) hyperslab selections
+**
+****************************************************************/
+static void
+test_select_fill_hyper_regular(hssize_t *offset)
+{
+ hid_t sid1; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */
+ hsize_t start[SPACE7_RANK]; /* Hyperslab start */
+ hsize_t stride[SPACE7_RANK]; /* Hyperslab stride size */
+ hsize_t count[SPACE7_RANK]; /* Hyperslab block count */
+ hsize_t block[SPACE7_RANK]; /* Hyperslab block size */
+ hsize_t points[16][SPACE7_RANK] = {
+ {2, 2}, {2, 3}, {2, 6}, {2, 7}, {3, 2}, {3, 3}, {3, 6}, {3, 7},
+ {6, 2}, {6, 3}, {6, 6}, {6, 7}, {7, 2}, {7, 3}, {7, 6}, {7, 7},
+ };
+ size_t num_points = 16; /* Number of points selected */
+ int fill_value; /* Fill value */
+ fill_iter_info iter_info; /* Iterator information structure */
+ unsigned *wbuf, /* buffer to write to disk */
+ *tbuf; /* temporary buffer pointer */
+ unsigned u, v, w; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Filling Regular 'hyperslab' Selections\n"));
+
+ /* Allocate memory buffer */
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+
+ /* Initialize memory buffer */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (u * SPACE7_DIM2) + v;
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Select "hyperslab" selection */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 4;
+ stride[1] = 4;
+ count[0] = 2;
+ count[1] = 2;
+ block[0] = 2;
+ block[1] = 2;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ if (offset != NULL) {
+ HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t));
+
+ /* Set offset, if provided */
+ ret = H5Soffset_simple(sid1, real_offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ } /* end if */
+ else
+ HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t));
+
+ /* Set fill value */
+ fill_value = SPACE7_FILL;
+
+ /* Fill selection in memory */
+ ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1);
+ CHECK(ret, FAIL, "H5Dfill");
+
+ /* Verify memory buffer the hard way... */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++, tbuf++) {
+ for (w = 0; w < (unsigned)num_points; w++) {
+ if (u == (unsigned)((hssize_t)points[w][0] + real_offset[0]) &&
+ v == (unsigned)((hssize_t)points[w][1] + real_offset[1])) {
+ if (*tbuf != (unsigned)fill_value)
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf,
+ (unsigned)fill_value);
+ break;
+ } /* end if */
+ } /* end for */
+ if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v))
+ TestErrPrintf("Error! v=%d, u=%d, *tbuf=%u, should be: %u\n", v, u, *tbuf,
+ ((u * SPACE7_DIM2) + v));
+ } /* end for */
+
+ /* Initialize the iterator structure */
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)points;
+
+ /* Add in the offset */
+ for (u = 0; u < (unsigned)num_points; u++) {
+ points[u][0] = (hsize_t)((hssize_t)points[u][0] + real_offset[0]);
+ points[u][1] = (hsize_t)((hssize_t)points[u][1] + real_offset[1]);
+ } /* end for */
+
+ /* Iterate through selection, verifying correct data */
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+} /* test_select_fill_hyper_regular() */
+
+/****************************************************************
+**
+** test_select_fill_hyper_irregular(): Test basic H5S (dataspace) selection code.
+** Tests filling "irregular" (i.e. combined blocks) hyperslab selections
+**
+****************************************************************/
+static void
+test_select_fill_hyper_irregular(hssize_t *offset)
+{
+ hid_t sid1; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hssize_t real_offset[SPACE7_RANK]; /* Actual offset to use */
+ hsize_t start[SPACE7_RANK]; /* Hyperslab start */
+ hsize_t count[SPACE7_RANK]; /* Hyperslab block count */
+ hsize_t points[32][SPACE7_RANK] = {
+ /* Yes, some of the are duplicated.. */
+ {2, 2}, {2, 3}, {2, 4}, {2, 5}, {3, 2}, {3, 3}, {3, 4}, {3, 5}, {4, 2}, {4, 3}, {4, 4},
+ {4, 5}, {5, 2}, {5, 3}, {5, 4}, {5, 5}, {4, 4}, {4, 5}, {4, 6}, {4, 7}, {5, 4}, {5, 5},
+ {5, 6}, {5, 7}, {6, 4}, {6, 5}, {6, 6}, {6, 7}, {7, 4}, {7, 5}, {7, 6}, {7, 7},
+ };
+ hsize_t iter_points[28][SPACE7_RANK] = {
+ /* Coordinates, as iterated through */
+ {2, 2}, {2, 3}, {2, 4}, {2, 5}, {3, 2}, {3, 3}, {3, 4}, {3, 5}, {4, 2}, {4, 3},
+ {4, 4}, {4, 5}, {4, 6}, {4, 7}, {5, 2}, {5, 3}, {5, 4}, {5, 5}, {5, 6}, {5, 7},
+ {6, 4}, {6, 5}, {6, 6}, {6, 7}, {7, 4}, {7, 5}, {7, 6}, {7, 7},
+ };
+ size_t num_points = 32; /* Number of points selected */
+ size_t num_iter_points = 28; /* Number of resulting points */
+ int fill_value; /* Fill value */
+ fill_iter_info iter_info; /* Iterator information structure */
+ unsigned *wbuf, /* buffer to write to disk */
+ *tbuf; /* temporary buffer pointer */
+ unsigned u, v, w; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Filling Irregular 'hyperslab' Selections\n"));
+
+ /* Allocate memory buffer */
+ wbuf = (unsigned *)HDmalloc(sizeof(unsigned) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+
+ /* Initialize memory buffer */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++)
+ *tbuf++ = (u * SPACE7_DIM2) + v;
+
+ /* Create dataspace for dataset on disk */
+ sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Select first "hyperslab" selection */
+ start[0] = 2;
+ start[1] = 2;
+ count[0] = 4;
+ count[1] = 4;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Combine with second "hyperslab" selection */
+ start[0] = 4;
+ start[1] = 4;
+ count[0] = 4;
+ count[1] = 4;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_OR, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ if (offset != NULL) {
+ HDmemcpy(real_offset, offset, SPACE7_RANK * sizeof(hssize_t));
+
+ /* Set offset, if provided */
+ ret = H5Soffset_simple(sid1, real_offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+ } /* end if */
+ else
+ HDmemset(real_offset, 0, SPACE7_RANK * sizeof(hssize_t));
+
+ /* Set fill value */
+ fill_value = SPACE7_FILL;
+
+ /* Fill selection in memory */
+ ret = H5Dfill(&fill_value, H5T_NATIVE_INT, wbuf, H5T_NATIVE_UINT, sid1);
+ CHECK(ret, FAIL, "H5Dfill");
+
+ /* Verify memory buffer the hard way... */
+ for (u = 0, tbuf = wbuf; u < SPACE7_DIM1; u++)
+ for (v = 0; v < SPACE7_DIM2; v++, tbuf++) {
+ for (w = 0; w < (unsigned)num_points; w++) {
+ if (u == (unsigned)((hssize_t)points[w][0] + real_offset[0]) &&
+ v == (unsigned)((hssize_t)points[w][1] + real_offset[1])) {
+ if (*tbuf != (unsigned)fill_value)
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, fill_value=%u\n", v, u, *tbuf,
+ (unsigned)fill_value);
+ break;
+ } /* end if */
+ } /* end for */
+ if (w == (unsigned)num_points && *tbuf != ((u * SPACE7_DIM2) + v))
+ TestErrPrintf("Error! v=%u, u=%u, *tbuf=%u, should be: %u\n", v, u, *tbuf,
+ ((u * SPACE7_DIM2) + v));
+ } /* end for */
+
+ /* Initialize the iterator structure */
+ iter_info.fill_value = SPACE7_FILL;
+ iter_info.curr_coord = 0;
+ iter_info.coords = (hsize_t *)iter_points;
+
+ /* Add in the offset */
+ for (u = 0; u < (unsigned)num_iter_points; u++) {
+ iter_points[u][0] = (hsize_t)((hssize_t)iter_points[u][0] + real_offset[0]);
+ iter_points[u][1] = (hsize_t)((hssize_t)iter_points[u][1] + real_offset[1]);
+ } /* end for */
+
+ /* Iterate through selection, verifying correct data */
+ ret = H5Diterate(wbuf, H5T_NATIVE_UINT, sid1, test_select_hyper_iter3, &iter_info);
+ CHECK(ret, FAIL, "H5Diterate");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+} /* test_select_fill_hyper_irregular() */
+
+/****************************************************************
+**
+** test_select_none(): Test basic H5S (dataspace) selection code.
+** Tests I/O on 0-sized point selections
+**
+****************************************************************/
+static void
+test_select_none(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims1[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2};
+ uint8_t *wbuf, /* buffer to write to disk */
+ *rbuf, /* buffer to read from disk */
+ *tbuf; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing I/O on 0-sized Selections\n"));
+
+ /* Allocate write & read buffers */
+ wbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize write buffer */
+ for (i = 0, tbuf = wbuf; i < SPACE7_DIM1; i++)
+ for (j = 0; j < SPACE7_DIM2; j++)
+ *tbuf++ = (uint8_t)((i * SPACE7_DIM2) + j);
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE7_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Make "none" selection in both disk and memory datasets */
+ ret = H5Sselect_none(sid1);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ ret = H5Sselect_none(sid2);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Attempt to read "nothing" from disk (before space is allocated) */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Write "nothing" to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Write "nothing" to disk (with a datatype conversion :-) */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Write "nothing" to disk (with NULL buffer argument) */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, NULL);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read "nothing" from disk (with NULL buffer argument) */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, sid2, sid1, H5P_DEFAULT, NULL);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_none() */
+
+/****************************************************************
+**
+** test_scalar_select(): Test basic H5S (dataspace) selection code.
+** Tests selections on scalar dataspaces
+**
+****************************************************************/
+static void
+test_scalar_select(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hsize_t coord1[SPACE7_RANK]; /* Coordinates for point selection */
+ hsize_t start[SPACE7_RANK]; /* Hyperslab start */
+ hsize_t count[SPACE7_RANK]; /* Hyperslab block count */
+ uint8_t *wbuf_uint8, /* buffer to write to disk */
+ rval_uint8, /* value read back in */
+ *tbuf_uint8; /* temporary buffer pointer */
+ unsigned short *wbuf_ushort, /* another buffer to write to disk */
+ rval_ushort, /* value read back in */
+ *tbuf_ushort; /* temporary buffer pointer */
+ int i, j; /* Counters */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing I/O on Selections in Scalar Dataspaces\n"));
+
+ /* Allocate write & read buffers */
+ wbuf_uint8 = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf_uint8, "HDmalloc");
+ wbuf_ushort = (unsigned short *)HDmalloc(sizeof(unsigned short) * SPACE7_DIM1 * SPACE7_DIM2);
+ CHECK_PTR(wbuf_ushort, "HDmalloc");
+
+ /* Initialize write buffers */
+ for (i = 0, tbuf_uint8 = wbuf_uint8, tbuf_ushort = wbuf_ushort; i < SPACE7_DIM1; i++)
+ for (j = 0; j < SPACE7_DIM2; j++) {
+ *tbuf_uint8++ = (uint8_t)((i * SPACE7_DIM2) + j);
+ *tbuf_ushort++ = (unsigned short)((j * SPACE7_DIM2) + i);
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate(H5S_SCALAR);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate_simple(SPACE7_RANK, dims2, NULL);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Select one element in memory with a point selection */
+ coord1[0] = 0;
+ coord1[1] = 2;
+ ret = H5Sselect_elements(sid2, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Write single point to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read scalar element from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid1, sid1, H5P_DEFAULT, &rval_uint8);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check value read back in */
+ if (rval_uint8 != *(wbuf_uint8 + 2))
+ TestErrPrintf("Error! rval=%u, should be: *(wbuf+2)=%u\n", (unsigned)rval_uint8,
+ (unsigned)*(wbuf_uint8 + 2));
+
+ /* Write single point to disk (with a datatype conversion) */
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read scalar element from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid1, sid1, H5P_DEFAULT, &rval_ushort);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check value read back in */
+ if (rval_ushort != *(wbuf_ushort + 2))
+ TestErrPrintf("Error! rval=%u, should be: *(wbuf+2)=%u\n", (unsigned)rval_ushort,
+ (unsigned)*(wbuf_ushort + 2));
+
+ /* Select one element in memory with a hyperslab selection */
+ start[0] = 4;
+ start[1] = 3;
+ count[0] = 1;
+ count[1] = 1;
+ ret = H5Sselect_hyperslab(sid2, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write single hyperslab element to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read scalar element from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid1, sid1, H5P_DEFAULT, &rval_uint8);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check value read back in */
+ if (rval_uint8 != *(wbuf_uint8 + (SPACE7_DIM2 * 4) + 3))
+ TestErrPrintf("Error! rval=%u, should be: *(wbuf+(SPACE7_DIM2*4)+3)=%u\n", (unsigned)rval_uint8,
+ (unsigned)*(wbuf_uint8 + (SPACE7_DIM2 * 4) + 3));
+
+ /* Write single hyperslab element to disk (with a datatype conversion) */
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read scalar element from disk */
+ ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid1, sid1, H5P_DEFAULT, &rval_ushort);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check value read back in */
+ if (rval_ushort != *(wbuf_ushort + (SPACE7_DIM2 * 4) + 3))
+ TestErrPrintf("Error! rval=%u, should be: *(wbuf+(SPACE7_DIM2*4)+3)=%u\n", (unsigned)rval_ushort,
+ (unsigned)*(wbuf_ushort + (SPACE7_DIM2 * 4) + 3));
+
+ /* Select no elements in memory & file with "none" selections */
+ ret = H5Sselect_none(sid1);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ ret = H5Sselect_none(sid2);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Write no data to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, wbuf_uint8);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Write no data to disk (with a datatype conversion) */
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, wbuf_ushort);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free memory buffers */
+ HDfree(wbuf_uint8);
+ HDfree(wbuf_ushort);
+} /* test_scalar_select() */
+
+/****************************************************************
+**
+** test_scalar_select2(): Tests selections on scalar dataspace,
+** verify H5Sselect_hyperslab and H5Sselect_elements fails for
+** scalar dataspace.
+**
+****************************************************************/
+static void
+test_scalar_select2(void)
+{
+ hid_t sid; /* Dataspace ID */
+ hsize_t coord1[1]; /* Coordinates for point selection */
+ hsize_t start[1]; /* Hyperslab start */
+ hsize_t count[1]; /* Hyperslab block count */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Selections in Scalar Dataspaces\n"));
+
+ /* Create dataspace for dataset */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Select one element in memory with a point selection */
+ coord1[0] = 0;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sselect_elements");
+
+ /* Select one element in memory with a hyperslab selection */
+ start[0] = 0;
+ count[0] = 0;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, count, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select no elements in memory & file with "none" selection */
+ ret = H5Sselect_none(sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Select all elements in memory & file with "all" selection */
+ ret = H5Sselect_all(sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_scalar_select2() */
+
+/****************************************************************
+**
+** test_scalar_select3(): Test basic H5S (dataspace) selection code.
+** Tests selections on scalar dataspaces in memory
+**
+****************************************************************/
+static void
+test_scalar_select3(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1, sid2; /* Dataspace ID */
+ hsize_t dims2[] = {SPACE7_DIM1, SPACE7_DIM2};
+ hsize_t coord1[SPACE7_RANK]; /* Coordinates for point selection */
+ hsize_t start[SPACE7_RANK]; /* Hyperslab start */
+ hsize_t count[SPACE7_RANK]; /* Hyperslab block count */
+ uint8_t wval_uint8, /* Value written out */
+ rval_uint8; /* Value read in */
+ unsigned short wval_ushort, /* Another value written out */
+ rval_ushort; /* Another value read in */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing I/O on Selections in Scalar Dataspaces in Memory\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid1 = H5Screate_simple(SPACE7_RANK, dims2, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create dataspace for writing buffer */
+ sid2 = H5Screate(H5S_SCALAR);
+ CHECK(sid2, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", H5T_NATIVE_UCHAR, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Select one element in file with a point selection */
+ coord1[0] = 0;
+ coord1[1] = 2;
+ ret = H5Sselect_elements(sid1, H5S_SELECT_SET, (size_t)1, (const hsize_t *)&coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Write single point to disk */
+ wval_uint8 = 12;
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read scalar element from disk */
+ rval_uint8 = 0;
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &rval_uint8);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check value read back in */
+ if (rval_uint8 != wval_uint8)
+ TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_uint8,
+ (unsigned)wval_uint8);
+
+ /* Write single point to disk (with a datatype conversion) */
+ wval_ushort = 23;
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read scalar element from disk */
+ rval_ushort = 0;
+ ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &rval_ushort);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check value read back in */
+ if (rval_ushort != wval_ushort)
+ TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_ushort,
+ (unsigned)wval_ushort);
+
+ /* Select one element in file with a hyperslab selection */
+ start[0] = 4;
+ start[1] = 3;
+ count[0] = 1;
+ count[1] = 1;
+ ret = H5Sselect_hyperslab(sid1, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write single hyperslab element to disk */
+ wval_uint8 = 92;
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read scalar element from disk */
+ rval_uint8 = 0;
+ ret = H5Dread(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &rval_uint8);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check value read back in */
+ if (rval_uint8 != wval_uint8)
+ TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_uint8,
+ (unsigned)wval_uint8);
+
+ /* Write single hyperslab element to disk (with a datatype conversion) */
+ wval_ushort = 107;
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read scalar element from disk */
+ rval_ushort = 0;
+ ret = H5Dread(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &rval_ushort);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check value read back in */
+ if (rval_ushort != wval_ushort)
+ TestErrPrintf("%u: Error! rval=%u, should be: wval=%u\n", (unsigned)__LINE__, (unsigned)rval_ushort,
+ (unsigned)wval_ushort);
+
+ /* Select no elements in memory & file with "none" selections */
+ ret = H5Sselect_none(sid1);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ ret = H5Sselect_none(sid2);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Write no data to disk */
+ ret = H5Dwrite(dataset, H5T_NATIVE_UCHAR, sid2, sid1, H5P_DEFAULT, &wval_uint8);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Write no data to disk (with a datatype conversion) */
+ ret = H5Dwrite(dataset, H5T_NATIVE_USHORT, sid2, sid1, H5P_DEFAULT, &wval_ushort);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_scalar_select3() */
+
+/****************************************************************
+**
+** test_shape_same(): Tests selections on dataspace, verify that
+** "shape same" routine is working correctly.
+**
+****************************************************************/
+static void
+test_shape_same(void)
+{
+ hid_t all_sid; /* Dataspace ID with "all" selection */
+ hid_t none_sid; /* Dataspace ID with "none" selection */
+ hid_t single_pt_sid; /* Dataspace ID with single point selection */
+ hid_t mult_pt_sid; /* Dataspace ID with multiple point selection */
+ hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */
+ hid_t single_hyper_all_sid; /* Dataspace ID with single block hyperslab
+ * selection that is the entire dataspace
+ */
+ hid_t single_hyper_pt_sid; /* Dataspace ID with single block hyperslab
+ * selection that is the same as the single
+ * point selection
+ */
+ hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */
+ hid_t irreg_hyper_sid; /* Dataspace ID with irregular hyperslab selection */
+ hid_t none_hyper_sid; /* Dataspace ID with "no hyperslabs" selection */
+ hid_t scalar_all_sid; /* ID for scalar dataspace with "all" selection */
+ hid_t scalar_none_sid; /* ID for scalar dataspace with "none" selection */
+ hid_t tmp_sid; /* Temporary dataspace ID */
+ hsize_t dims[] = {SPACE9_DIM1, SPACE9_DIM2};
+ hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */
+ hsize_t coord2[SPACE9_DIM2][SPACE9_RANK]; /* Coordinates for multiple point selection */
+ hsize_t start[SPACE9_RANK]; /* Hyperslab start */
+ hsize_t stride[SPACE9_RANK]; /* Hyperslab stride */
+ hsize_t count[SPACE9_RANK]; /* Hyperslab block count */
+ hsize_t block[SPACE9_RANK]; /* Hyperslab block size */
+ unsigned u, v; /* Local index variables */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Same Shape Comparisons\n"));
+ HDassert(SPACE9_DIM2 >= POINT1_NPOINTS);
+
+ /* Create dataspace for "all" selection */
+ all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(all_sid, FAIL, "H5Screate_simple");
+
+ /* Select entire extent for dataspace */
+ ret = H5Sselect_all(all_sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ /* Create dataspace for "none" selection */
+ none_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(none_sid, FAIL, "H5Screate_simple");
+
+ /* Un-Select entire extent for dataspace */
+ ret = H5Sselect_none(none_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Create dataspace for single point selection */
+ single_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(single_pt_sid, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for multiple point selection */
+ coord1[0][0] = 2;
+ coord1[0][1] = 2;
+ ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create dataspace for multiple point selection */
+ mult_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(mult_pt_sid, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for multiple point selection */
+ coord2[0][0] = 2;
+ coord2[0][1] = 2;
+ coord2[1][0] = 7;
+ coord2[1][1] = 2;
+ coord2[2][0] = 1;
+ coord2[2][1] = 4;
+ coord2[3][0] = 2;
+ coord2[3][1] = 6;
+ coord2[4][0] = 0;
+ coord2[4][1] = 8;
+ coord2[5][0] = 3;
+ coord2[5][1] = 2;
+ coord2[6][0] = 4;
+ coord2[6][1] = 4;
+ coord2[7][0] = 1;
+ coord2[7][1] = 0;
+ coord2[8][0] = 5;
+ coord2[8][1] = 1;
+ coord2[9][0] = 9;
+ coord2[9][1] = 3;
+ ret = H5Sselect_elements(mult_pt_sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create dataspace for single hyperslab selection */
+ single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(single_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Select 10x10 hyperslab for single hyperslab selection */
+ start[0] = 1;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = (SPACE9_DIM1 - 2);
+ block[1] = (SPACE9_DIM2 - 2);
+ ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for single hyperslab selection with entire extent selected */
+ single_hyper_all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(single_hyper_all_sid, FAIL, "H5Screate_simple");
+
+ /* Select entire extent for hyperslab selection */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = SPACE9_DIM1;
+ block[1] = SPACE9_DIM2;
+ ret = H5Sselect_hyperslab(single_hyper_all_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for single hyperslab selection with single point selected */
+ single_hyper_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(single_hyper_pt_sid, FAIL, "H5Screate_simple");
+
+ /* Select entire extent for hyperslab selection */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(single_hyper_pt_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for regular hyperslab selection */
+ regular_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(regular_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Select regular, strided hyperslab selection */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 2;
+ stride[1] = 2;
+ count[0] = 5;
+ count[1] = 2;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for irregular hyperslab selection */
+ irreg_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(irreg_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Create irregular hyperslab selection by OR'ing two blocks together */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ start[1] = 4;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 3;
+ block[1] = 3;
+ ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for "no" hyperslab selection */
+ none_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(none_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Create "no" hyperslab selection by XOR'ing same blocks together */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_XOR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create scalar dataspace for "all" selection */
+ scalar_all_sid = H5Screate(H5S_SCALAR);
+ CHECK(scalar_all_sid, FAIL, "H5Screate");
+
+ /* Create scalar dataspace for "none" selection */
+ scalar_none_sid = H5Screate(H5S_SCALAR);
+ CHECK(scalar_none_sid, FAIL, "H5Screate");
+
+ /* Un-Select entire extent for dataspace */
+ ret = H5Sselect_none(scalar_none_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Compare "all" selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(all_sid, all_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(all_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(all_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(all_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(all_sid, single_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(all_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(all_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(all_sid, single_hyper_all_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(all_sid, single_hyper_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(all_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(all_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(all_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(all_sid, scalar_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(all_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare "none" selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(none_sid, none_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(none_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(none_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(none_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(none_sid, single_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(none_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(none_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(none_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(none_sid, single_hyper_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(none_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(none_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(none_sid, none_hyper_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(none_sid, scalar_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(none_sid, scalar_none_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare single point selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(single_pt_sid, single_pt_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(single_pt_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(single_pt_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(single_pt_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(single_pt_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(single_pt_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(single_pt_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(single_pt_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(single_pt_sid, single_hyper_pt_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(single_pt_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(single_pt_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(single_pt_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(single_pt_sid, scalar_all_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(single_pt_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare multiple point selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(mult_pt_sid, mult_pt_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(mult_pt_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(mult_pt_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(mult_pt_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(mult_pt_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(mult_pt_sid, single_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(mult_pt_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(mult_pt_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(mult_pt_sid, single_hyper_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(mult_pt_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(mult_pt_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(mult_pt_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(mult_pt_sid, scalar_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(mult_pt_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare single "normal" hyperslab selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(single_hyper_sid, single_hyper_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(single_hyper_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(single_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(single_hyper_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(single_hyper_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(single_hyper_sid, single_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(single_hyper_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, single_hyper_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+#ifdef NOT_YET
+ /* In theory, these two selections are the same shape, but the
+ * H5Sselect_shape_same() routine is just not this sophisticated yet and it
+ * would take too much effort to make this work. The worst case is that the
+ * non-optimized chunk mapping routines will be invoked instead of the more
+ * optimized routines, so this only hurts performance, not correctness
+ */
+ /* Construct point selection which matches "plain" hyperslab selection */
+ /* Create dataspace for point selection */
+ tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(tmp_sid, FAIL, "H5Screate_simple");
+
+ /* Select sequence of points for point selection */
+ for (u = 1; u < (SPACE9_DIM1 - 1); u++) {
+ for (v = 1; v < (SPACE9_DIM2 - 1); v++) {
+ coord2[v - 1][0] = u;
+ coord2[v - 1][1] = v;
+ } /* end for */
+
+ ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, (SPACE9_DIM2 - 2), coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+ } /* end for */
+
+ /* Compare against hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+#endif /* NOT_YET */
+
+ /* Construct hyperslab selection which matches "plain" hyperslab selection */
+ /* Create dataspace for hyperslab selection */
+ tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(tmp_sid, FAIL, "H5Screate_simple");
+
+ /* Un-select entire extent */
+ ret = H5Sselect_none(tmp_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Select sequence of rows for hyperslab selection */
+ for (u = 1; u < (SPACE9_DIM1 - 1); u++) {
+ start[0] = u;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = (SPACE9_DIM2 - 2);
+ ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end for */
+
+ /* Compare against hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, scalar_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare single "all" hyperslab selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_all_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(single_hyper_all_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, all_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, single_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, single_hyper_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+#ifdef NOT_YET
+ /* In theory, these two selections are the same shape, but the
+ * H5S_select_shape_same() routine is just not this sophisticated yet and it
+ * would take too much effort to make this work. The worst case is that the
+ * non-optimized chunk mapping routines will be invoked instead of the more
+ * optimized routines, so this only hurts performance, not correctness
+ */
+ /* Construct point selection which matches "all" hyperslab selection */
+ /* Create dataspace for point selection */
+ tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(tmp_sid, FAIL, "H5Screate_simple");
+
+ /* Select sequence of points for point selection */
+ for (u = 0; u < SPACE9_DIM1; u++) {
+ for (v = 0; v < SPACE9_DIM2; v++) {
+ coord2[v][0] = u;
+ coord2[v][1] = v;
+ } /* end for */
+ ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, SPACE9_DIM2, coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+ } /* end for */
+
+ /* Compare against hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+#endif /* NOT_YET */
+
+ /* Construct hyperslab selection which matches "all" hyperslab selection */
+ /* Create dataspace for hyperslab selection */
+ tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(tmp_sid, FAIL, "H5Screate_simple");
+
+ /* Un-select entire extent */
+ ret = H5Sselect_none(tmp_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Select sequence of rows for hyperslab selection */
+ for (u = 0; u < SPACE9_DIM2; u++) {
+ start[0] = u;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = SPACE9_DIM2;
+ ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end for */
+
+ /* Compare against hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, scalar_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_all_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare single "point" hyperslab selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_pt_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(single_hyper_pt_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(single_hyper_pt_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, single_pt_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, scalar_all_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(single_hyper_pt_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare regular, strided hyperslab selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(regular_hyper_sid, regular_hyper_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(regular_hyper_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, single_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, single_hyper_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Construct point selection which matches regular, strided hyperslab selection */
+ /* Create dataspace for point selection */
+ tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(tmp_sid, FAIL, "H5Screate_simple");
+
+ /* Select sequence of points for point selection */
+ for (u = 2; u < 11; u += 2) {
+ for (v = 0; v < 2; v++) {
+ coord2[v][0] = u;
+ coord2[v][1] = (v * 2) + 2;
+ } /* end for */
+ ret = H5Sselect_elements(tmp_sid, H5S_SELECT_APPEND, (size_t)2, (const hsize_t *)coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+ } /* end for */
+
+ /* Compare against hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Construct hyperslab selection which matches regular, strided hyperslab selection */
+ /* Create dataspace for hyperslab selection */
+ tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(tmp_sid, FAIL, "H5Screate_simple");
+
+ /* Un-select entire extent */
+ ret = H5Sselect_none(tmp_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Select sequence of rows for hyperslab selection */
+ for (u = 2; u < 11; u += 2) {
+ start[0] = u;
+ start[1] = 3;
+ stride[0] = 1;
+ stride[1] = 2;
+ count[0] = 1;
+ count[1] = 2;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end for */
+
+ /* Compare against hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Construct regular hyperslab selection with an offset which matches regular, strided hyperslab selection
+ */
+ /* Create dataspace for hyperslab selection */
+ tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(tmp_sid, FAIL, "H5Screate_simple");
+
+ /* Select regular, strided hyperslab selection at an offset */
+ start[0] = 1;
+ start[1] = 1;
+ stride[0] = 2;
+ stride[1] = 2;
+ count[0] = 5;
+ count[1] = 2;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Compare against hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, scalar_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(regular_hyper_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare irregular hyperslab selection to all the selections created */
+ /* Compare against itself */
+ check = H5Sselect_shape_same(irreg_hyper_sid, irreg_hyper_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(irreg_hyper_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(irreg_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, single_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, single_hyper_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Construct hyperslab selection which matches irregular hyperslab selection */
+ /* Create dataspace for hyperslab selection */
+ tmp_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(tmp_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Select sequence of columns for hyperslab selection */
+ for (u = 0; u < 3; u++) {
+ start[0] = 4;
+ start[1] = u + 4;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 3;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(tmp_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end for */
+
+ /* Compare against hyperslab selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, scalar_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(irreg_hyper_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare scalar "all" dataspace with all selections created */
+
+ /* Compare against itself */
+ check = H5Sselect_shape_same(scalar_all_sid, scalar_all_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(scalar_all_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(scalar_all_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(scalar_all_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(scalar_all_sid, none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(scalar_all_sid, single_pt_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(scalar_all_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(scalar_all_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(scalar_all_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(scalar_all_sid, single_hyper_pt_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(scalar_all_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(scalar_all_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(scalar_all_sid, none_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "none" hyperslab selection */
+ check = H5Sselect_shape_same(scalar_all_sid, scalar_none_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare scalar "none" dataspace with all selections created */
+
+ /* Compare against itself */
+ check = H5Sselect_shape_same(scalar_none_sid, scalar_none_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against copy of itself */
+ tmp_sid = H5Scopy(scalar_none_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+
+ check = H5Sselect_shape_same(scalar_none_sid, tmp_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Compare against "all" selection */
+ check = H5Sselect_shape_same(scalar_none_sid, all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "none" selection */
+ check = H5Sselect_shape_same(scalar_none_sid, none_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against single point selection */
+ check = H5Sselect_shape_same(scalar_none_sid, single_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against multiple point selection */
+ check = H5Sselect_shape_same(scalar_none_sid, mult_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "plain" single hyperslab selection */
+ check = H5Sselect_shape_same(scalar_none_sid, single_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "all" single hyperslab selection */
+ check = H5Sselect_shape_same(scalar_none_sid, single_hyper_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "single point" single hyperslab selection */
+ check = H5Sselect_shape_same(scalar_none_sid, single_hyper_pt_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against regular, strided hyperslab selection */
+ check = H5Sselect_shape_same(scalar_none_sid, regular_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against irregular hyperslab selection */
+ check = H5Sselect_shape_same(scalar_none_sid, irreg_hyper_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "no" hyperslab selection */
+ check = H5Sselect_shape_same(scalar_none_sid, none_hyper_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Compare against scalar "all" hyperslab selection */
+ check = H5Sselect_shape_same(scalar_none_sid, scalar_all_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Close dataspaces */
+ ret = H5Sclose(all_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(none_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_pt_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(mult_pt_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_hyper_all_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_hyper_pt_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(regular_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(irreg_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(none_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(scalar_all_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(scalar_none_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_shape_same() */
+
+/****************************************************************
+**
+** test_shape_same_dr__smoke_check_1():
+**
+** Create a square, 2-D dataspace (10 X 10), and select
+** all of it.
+**
+** Similarly, create nine, 3-D dataspaces (10 X 10 X 10),
+** and select (10 X 10 X 1) hyperslabs in each, three with
+** the slab parallel to the xy plane, three parallel to the
+** xz plane, and three parallel to the yz plane.
+**
+** Assuming that z is the fastest changing dimension,
+** H5Sselect_shape_same() should return TRUE when comparing
+** the full 2-D space against any hyperslab parallel to the
+** yz plane in the 3-D space, and FALSE when comparing the
+** full 2-D space against the other two hyperslabs.
+**
+** Also create two additional 3-D dataspaces (10 X 10 X 10),
+** and select a (10 X 10 X 2) hyperslab parallel to the yz
+** axis in one of them, and two parallel (10 X 10 X 1) hyper
+** slabs parallel to the yz axis in the other.
+** H5Sselect_shape_same() should return FALSE when comparing
+** each to the 2-D selection.
+**
+****************************************************************/
+static void
+test_shape_same_dr__smoke_check_1(void)
+{
+ hid_t small_square_sid;
+ hid_t small_cube_xy_slice_0_sid;
+ hid_t small_cube_xy_slice_1_sid;
+ hid_t small_cube_xy_slice_2_sid;
+ hid_t small_cube_xz_slice_0_sid;
+ hid_t small_cube_xz_slice_1_sid;
+ hid_t small_cube_xz_slice_2_sid;
+ hid_t small_cube_yz_slice_0_sid;
+ hid_t small_cube_yz_slice_1_sid;
+ hid_t small_cube_yz_slice_2_sid;
+ hid_t small_cube_yz_slice_3_sid;
+ hid_t small_cube_yz_slice_4_sid;
+ hsize_t small_cube_dims[] = {10, 10, 10};
+ hsize_t start[3];
+ hsize_t stride[3];
+ hsize_t count[3];
+ hsize_t block[3];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, (" Smoke check 1: Slices through a cube.\n"));
+
+ /* Create the 10 x 10 dataspace */
+ small_square_sid = H5Screate_simple(2, small_cube_dims, NULL);
+ CHECK(small_square_sid, FAIL, "H5Screate_simple");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */
+ small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ /* stride is a bit silly here, since we are only selecting a single */
+ /* contiguous plane, but include it anyway, with values large enough */
+ /* to ensure that we will only get the single block selected. */
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 10; /* x */
+ block[1] = 10; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 5;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */
+ small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ /* stride is a bit silly here, since we are only selecting a single */
+ /* contiguous chunk, but include it anyway, with values large enough */
+ /* to ensure that we will only get the single chunk. */
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 10; /* x */
+ block[1] = 1; /* y */
+ block[2] = 10; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 4;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */
+ small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_3_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_3_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_4_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_4_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ /* stride is a bit silly here, since we are only selecting a single */
+ /* contiguous chunk, but include it anyway, with values large enough */
+ /* to ensure that we will only get the single chunk. */
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 10; /* y */
+ block[2] = 10; /* z */
+
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 9;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ block[0] = 2;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3;
+ block[0] = 1;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_4_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 6;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_4_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* setup is done -- run the tests: */
+
+ /* Compare against "xy" selection */
+ check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "xz" selection */
+ check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "yz" selection */
+ check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_3_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_4_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Close dataspaces */
+ ret = H5Sclose(small_square_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_3_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_4_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__smoke_check_1() */
+
+/****************************************************************
+**
+** test_shape_same_dr__smoke_check_2():
+**
+** Create a square, 2-D dataspace (10 X 10), and select
+** a "checker board" hyperslab as follows:
+**
+** * * - - * * - - * *
+** * * - - * * - - * *
+** - - * * - - * * - -
+** - - * * - - * * - -
+** * * - - * * - - * *
+** * * - - * * - - * *
+** - - * * - - * * - -
+** - - * * - - * * - -
+** * * - - * * - - * *
+** * * - - * * - - * *
+**
+** where asterisks indicate selected elements, and dashes
+** indicate unselected elements.
+**
+** Similarly, create nine, 3-D dataspaces (10 X 10 X 10),
+** and select similar (10 X 10 X 1) checker board hyper
+** slabs in each, three with the slab parallel to the xy
+** plane, three parallel to the xz plane, and three parallel
+** to the yz plane.
+**
+** Assuming that z is the fastest changing dimension,
+** H5Sselect_shape_same() should return TRUE when comparing
+** the 2-D space checker board selection against a checker
+** board hyperslab parallel to the yz plane in the 3-D
+** space, and FALSE when comparing the 2-D checkerboard
+** selection against two hyperslabs parallel to the xy
+** or xz planes.
+**
+** Also create an additional 3-D dataspaces (10 X 10 X 10),
+** and select a checker board parallel with the yz axis,
+** save with some squares being on different planes.
+** H5Sselect_shape_same() should return FALSE when
+** comparing this selection to the 2-D selection.
+**
+****************************************************************/
+static void
+test_shape_same_dr__smoke_check_2(void)
+{
+ hid_t small_square_sid;
+ hid_t small_cube_xy_slice_0_sid;
+ hid_t small_cube_xy_slice_1_sid;
+ hid_t small_cube_xy_slice_2_sid;
+ hid_t small_cube_xz_slice_0_sid;
+ hid_t small_cube_xz_slice_1_sid;
+ hid_t small_cube_xz_slice_2_sid;
+ hid_t small_cube_yz_slice_0_sid;
+ hid_t small_cube_yz_slice_1_sid;
+ hid_t small_cube_yz_slice_2_sid;
+ hid_t small_cube_yz_slice_3_sid;
+ hsize_t small_cube_dims[] = {10, 10, 10};
+ hsize_t start[3];
+ hsize_t stride[3];
+ hsize_t count[3];
+ hsize_t block[3];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, (" Smoke check 2: Checker board slices through a cube.\n"));
+
+ /* Create the 10 x 10 dataspace */
+ small_square_sid = H5Screate_simple(2, small_cube_dims, NULL);
+ CHECK(small_square_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+
+ stride[0] = 4; /* x */
+ stride[1] = 4; /* y */
+
+ count[0] = 3; /* x */
+ count[1] = 3; /* y */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 2; /* x */
+ start[1] = 2; /* y */
+
+ stride[0] = 4; /* x */
+ stride[1] = 4; /* y */
+
+ count[0] = 2; /* x */
+ count[1] = 2; /* y */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */
+ small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ stride[0] = 4; /* x */
+ stride[1] = 4; /* y */
+ stride[2] = 20; /* z -- large enough that there will only be one slice */
+
+ count[0] = 3; /* x */
+ count[1] = 3; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 3;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 2; /* x */
+ start[1] = 2; /* y */
+ start[2] = 0; /* z */
+
+ stride[0] = 4; /* x */
+ stride[1] = 4; /* y */
+ stride[2] = 20; /* z -- large enough that there will only be one slice */
+
+ count[0] = 2; /* x */
+ count[1] = 2; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 3;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[2] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */
+ small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ stride[0] = 4; /* x */
+ stride[1] = 20; /* y -- large enough that there will only be one slice */
+ stride[2] = 4; /* z */
+
+ count[0] = 3; /* x */
+ count[1] = 1; /* y */
+ count[2] = 3; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 1; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 5;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 2; /* x */
+ start[1] = 0; /* y */
+ start[2] = 2; /* z */
+
+ stride[0] = 4; /* x */
+ stride[1] = 20; /* y -- large enough that there will only be one slice */
+ stride[2] = 4; /* z */
+
+ count[0] = 2; /* x */
+ count[1] = 1; /* y */
+ count[2] = 2; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 1; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 5;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[1] = 9;
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */
+ small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_3_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_3_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 0; /* x */
+ start[1] = 0; /* y */
+ start[2] = 0; /* z */
+
+ stride[0] = 20; /* x -- large enough that there will only be one slice */
+ stride[1] = 4; /* y */
+ stride[2] = 4; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 3; /* y */
+ count[2] = 3; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 2; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 8;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 9;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 0; /* x */
+ start[1] = 2; /* y */
+ start[2] = 2; /* z */
+
+ stride[0] = 20; /* x -- large enough that there will only be one slice */
+ stride[1] = 4; /* y */
+ stride[2] = 4; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 2; /* y */
+ count[2] = 2; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 2; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 8;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 9;
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ /* This test gets the right answer, but it fails the shape same
+ * test in an unexpected point. Bring this up with Quincey, as
+ * the oddness looks like it is not related to my code.
+ * -- JRM
+ */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_3_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* setup is done -- run the tests: */
+
+ /* Compare against "xy" selection */
+ check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "xz" selection */
+ check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "yz" selection */
+ check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_3_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Close dataspaces */
+ ret = H5Sclose(small_square_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_3_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_shape_same_dr__smoke_check_2() */
+
+/****************************************************************
+**
+** test_shape_same_dr__smoke_check_3():
+**
+** Create a square, 2-D dataspace (10 X 10), and select an
+** irregular hyperslab as follows:
+**
+** y
+** 9 - - - - - - - - - -
+** 8 - - - - - - - - - -
+** 7 - - - * * * * - - -
+** 6 - - * * * * * - - -
+** 5 - - * * - - - - - -
+** 4 - - * * - * * - - -
+** 3 - - * * - * * - - -
+** 2 - - - - - - - - - -
+** 1 - - - - - - - - - -
+** 0 - - - - - - - - - -
+** 0 1 2 3 4 5 6 7 8 9 x
+**
+** where asterisks indicate selected elements, and dashes
+** indicate unselected elements.
+**
+** Similarly, create nine, 3-D dataspaces (10 X 10 X 10),
+** and select similar irregular hyperslabs in each, three
+** with the slab parallel to the xy plane, three parallel
+** to the xz plane, and three parallel to the yz plane.
+** Further, translate the irregular slab in 2/3rds of the
+** cases.
+**
+** Assuming that z is the fastest changing dimension,
+** H5Sselect_shape_same() should return TRUE when
+** comparing the 2-D irregular hyperslab selection
+** against the irregular hyperslab selections parallel
+** to the yz plane in the 3-D space, and FALSE when
+** comparing it against the irregular hyperslabs
+** selections parallel to the xy or xz planes.
+**
+****************************************************************/
+static void
+test_shape_same_dr__smoke_check_3(void)
+{
+ hid_t small_square_sid;
+ hid_t small_cube_xy_slice_0_sid;
+ hid_t small_cube_xy_slice_1_sid;
+ hid_t small_cube_xy_slice_2_sid;
+ hid_t small_cube_xz_slice_0_sid;
+ hid_t small_cube_xz_slice_1_sid;
+ hid_t small_cube_xz_slice_2_sid;
+ hid_t small_cube_yz_slice_0_sid;
+ hid_t small_cube_yz_slice_1_sid;
+ hid_t small_cube_yz_slice_2_sid;
+ hsize_t small_cube_dims[] = {10, 10, 10};
+ hsize_t start[3];
+ hsize_t stride[3];
+ hsize_t count[3];
+ hsize_t block[3];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, (" Smoke check 3: Offset subsets of slices through a cube.\n"));
+
+ /* Create the 10 x 10 dataspace */
+ small_square_sid = H5Screate_simple(2, small_cube_dims, NULL);
+ CHECK(small_square_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 2; /* x */
+ start[1] = 3; /* y */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+
+ block[0] = 2; /* x */
+ block[1] = 4; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3; /* x */
+ start[1] = 6; /* y */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+
+ block[0] = 4; /* x */
+ block[1] = 2; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 5; /* x */
+ start[1] = 3; /* y */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ ret = H5Sselect_hyperslab(small_square_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xy axis */
+ small_cube_xy_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xy_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xy_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 2; /* x */
+ start[1] = 3; /* y */
+ start[2] = 5; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 4; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[1] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[1] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3; /* x */
+ start[1] = 6; /* y */
+ start[2] = 5; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 4; /* x */
+ block[1] = 2; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[1] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[1] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 5; /* x */
+ start[1] = 3; /* y */
+ start[2] = 5; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 2; /* y */
+ block[2] = 1; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[1] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[1] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xy_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslab parallel to the xz axis */
+ small_cube_xz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_xz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_xz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 2; /* x */
+ start[1] = 5; /* y */
+ start[2] = 3; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 1; /* y */
+ block[2] = 4; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3; /* x */
+ start[1] = 5; /* y */
+ start[2] = 6; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 4; /* x */
+ block[1] = 1; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 5; /* x */
+ start[1] = 5; /* y */
+ start[2] = 3; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 2; /* x */
+ block[1] = 1; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[0] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_xz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* QAK: Start here.
+ */
+ /* Create the 10 X 10 X 10 dataspaces for the hyperslabs parallel to the yz axis */
+ small_cube_yz_slice_0_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_0_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_1_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_1_sid, FAIL, "H5Screate_simple");
+
+ small_cube_yz_slice_2_sid = H5Screate_simple(3, small_cube_dims, NULL);
+ CHECK(small_cube_yz_slice_2_sid, FAIL, "H5Screate_simple");
+
+ start[0] = 8; /* x */
+ start[1] = 2; /* y */
+ start[2] = 3; /* z */
+
+ stride[0] = 20; /* x -- large enough that there will only be one slice */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 2; /* y */
+ block[2] = 4; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[1] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 8; /* x */
+ start[1] = 3; /* y */
+ start[2] = 6; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 4; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[1] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 8; /* x */
+ start[1] = 5; /* y */
+ start[2] = 3; /* z */
+
+ stride[0] = 20; /* x */
+ stride[1] = 20; /* y */
+ stride[2] = 20; /* z */
+
+ count[0] = 1; /* x */
+ count[1] = 1; /* y */
+ count[2] = 1; /* z */
+
+ block[0] = 1; /* x */
+ block[1] = 2; /* y */
+ block[2] = 2; /* z */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the starting point to the origin */
+ start[1] -= 1; /* x */
+ start[2] -= 2; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_1_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* move the irregular selection to the upper right hand corner */
+ start[0] += 5; /* x */
+ start[2] += 5; /* y */
+ ret = H5Sselect_hyperslab(small_cube_yz_slice_2_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* setup is done -- run the tests: */
+
+ /* Compare against "xy" selection */
+ check = H5Sselect_shape_same(small_cube_xy_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xy_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xy_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "xz" selection */
+ check = H5Sselect_shape_same(small_cube_xz_slice_0_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xz_slice_1_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_xz_slice_2_sid, small_square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Compare against "yz" selection */
+ check = H5Sselect_shape_same(small_cube_yz_slice_0_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_1_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(small_cube_yz_slice_2_sid, small_square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ /* Close dataspaces */
+ ret = H5Sclose(small_square_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xy_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_xz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_cube_yz_slice_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_shape_same_dr__smoke_check_3() */
+
+/****************************************************************
+**
+** test_shape_same_dr__smoke_check_4():
+**
+** Create a square, 2-D dataspace (10 X 10), and select
+** the entire space.
+**
+** Similarly, create 3-D and 4-D dataspaces:
+**
+** (1 X 10 X 10)
+** (10 X 1 X 10)
+** (10 X 10 X 1)
+** (10 X 10 X 10)
+**
+** (1 X 1 X 10 X 10)
+** (1 X 10 X 1 X 10)
+** (1 X 10 X 10 X 1)
+** (10 X 1 X 1 X 10)
+** (10 X 1 X 10 X 1)
+** (10 X 10 X 1 X 1)
+** (10 X 1 X 10 X 10)
+**
+** And select these entire spaces as well.
+**
+** Compare the 2-D space against all the other spaces
+** with H5Sselect_shape_same(). The (1 X 10 X 10) &
+** (1 X 1 X 10 X 10) should return TRUE. All others
+** should return FALSE.
+**
+****************************************************************/
+static void
+test_shape_same_dr__smoke_check_4(void)
+{
+ hid_t square_sid;
+ hid_t three_d_space_0_sid;
+ hid_t three_d_space_1_sid;
+ hid_t three_d_space_2_sid;
+ hid_t three_d_space_3_sid;
+ hid_t four_d_space_0_sid;
+ hid_t four_d_space_1_sid;
+ hid_t four_d_space_2_sid;
+ hid_t four_d_space_3_sid;
+ hid_t four_d_space_4_sid;
+ hid_t four_d_space_5_sid;
+ hid_t four_d_space_6_sid;
+ hsize_t dims[] = {10, 10, 10, 10};
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MESSAGE(7, (" Smoke check 4: Spaces of different dimension but same size.\n"));
+
+ /* Create the 10 x 10 dataspace */
+ square_sid = H5Screate_simple(2, dims, NULL);
+ CHECK(square_sid, FAIL, "H5Screate_simple");
+
+ /* create (1 X 10 X 10) dataspace */
+ dims[0] = 1;
+ dims[1] = 10;
+ dims[2] = 10;
+ three_d_space_0_sid = H5Screate_simple(3, dims, NULL);
+ CHECK(three_d_space_0_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 1 X 10) dataspace */
+ dims[0] = 10;
+ dims[1] = 1;
+ dims[2] = 10;
+ three_d_space_1_sid = H5Screate_simple(3, dims, NULL);
+ CHECK(three_d_space_1_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 10 X 1) dataspace */
+ dims[0] = 10;
+ dims[1] = 10;
+ dims[2] = 1;
+ three_d_space_2_sid = H5Screate_simple(3, dims, NULL);
+ CHECK(three_d_space_2_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 10 X 10) dataspace */
+ dims[0] = 10;
+ dims[1] = 10;
+ dims[2] = 10;
+ three_d_space_3_sid = H5Screate_simple(3, dims, NULL);
+ CHECK(three_d_space_3_sid, FAIL, "H5Screate_simple");
+
+ /* create (1 X 1 X 10 X 10) dataspace */
+ dims[0] = 1;
+ dims[1] = 1;
+ dims[2] = 10;
+ dims[3] = 10;
+ four_d_space_0_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_0_sid, FAIL, "H5Screate_simple");
+
+ /* create (1 X 10 X 1 X 10) dataspace */
+ dims[0] = 1;
+ dims[1] = 10;
+ dims[2] = 1;
+ dims[3] = 10;
+ four_d_space_1_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_1_sid, FAIL, "H5Screate_simple");
+
+ /* create (1 X 10 X 10 X 1) dataspace */
+ dims[0] = 1;
+ dims[1] = 10;
+ dims[2] = 10;
+ dims[3] = 1;
+ four_d_space_2_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_2_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 1 X 1 X 10) dataspace */
+ dims[0] = 10;
+ dims[1] = 1;
+ dims[2] = 1;
+ dims[3] = 10;
+ four_d_space_3_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_3_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 1 X 10 X 1) dataspace */
+ dims[0] = 10;
+ dims[1] = 1;
+ dims[2] = 10;
+ dims[3] = 1;
+ four_d_space_4_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_4_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 10 X 1 X 1) dataspace */
+ dims[0] = 10;
+ dims[1] = 10;
+ dims[2] = 1;
+ dims[3] = 1;
+ four_d_space_5_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_5_sid, FAIL, "H5Screate_simple");
+
+ /* create (10 X 1 X 10 X 10) dataspace */
+ dims[0] = 10;
+ dims[1] = 1;
+ dims[2] = 10;
+ dims[3] = 10;
+ four_d_space_6_sid = H5Screate_simple(4, dims, NULL);
+ CHECK(four_d_space_6_sid, FAIL, "H5Screate_simple");
+
+ /* setup is done -- run the tests: */
+
+ check = H5Sselect_shape_same(three_d_space_0_sid, square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(three_d_space_1_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(three_d_space_2_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(three_d_space_3_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(four_d_space_0_sid, square_sid);
+ VERIFY(check, TRUE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(four_d_space_1_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(four_d_space_2_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(four_d_space_3_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(four_d_space_4_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(four_d_space_5_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ check = H5Sselect_shape_same(four_d_space_6_sid, square_sid);
+ VERIFY(check, FALSE, "H5Sselect_shape_same");
+
+ /* Close dataspaces */
+ ret = H5Sclose(square_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(three_d_space_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(three_d_space_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(three_d_space_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(three_d_space_3_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_2_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_3_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_4_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_5_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(four_d_space_6_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_shape_same_dr__smoke_check_4() */
+
+/****************************************************************
+**
+** test_shape_same_dr__full_space_vs_slice(): Tests selection
+** of a full n-cube dataspace vs an n-dimensional slice of
+** of an m-cube (m > n) in a call to H5Sselect_shape_same().
+** Note that this test does not require the n-cube and the
+** n-dimensional slice to have the same rank (although
+** H5Sselect_shape_same() should always return FALSE if
+** they don't).
+**
+** Per Quincey's suggestion, only test up to 5 dimensional
+** spaces.
+**
+****************************************************************/
+static void
+test_shape_same_dr__full_space_vs_slice(int test_num, int small_rank, int large_rank, int offset,
+ hsize_t edge_size, hbool_t dim_selected[], hbool_t expected_result)
+{
+ char test_desc_0[128];
+ char test_desc_1[256];
+ int i;
+ hid_t n_cube_0_sid; /* the fully selected hyper cube */
+ hid_t n_cube_1_sid; /* the hyper cube in which a slice is selected */
+ hsize_t dims[SS_DR_MAX_RANK];
+ hsize_t start[SS_DR_MAX_RANK];
+ hsize_t *start_ptr;
+ hsize_t stride[SS_DR_MAX_RANK];
+ hsize_t *stride_ptr;
+ hsize_t count[SS_DR_MAX_RANK];
+ hsize_t *count_ptr;
+ hsize_t block[SS_DR_MAX_RANK];
+ hsize_t *block_ptr;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ HDassert(0 < small_rank);
+ HDassert(small_rank <= large_rank);
+ HDassert(large_rank <= SS_DR_MAX_RANK);
+ HDassert(0 <= offset);
+ HDassert(offset < large_rank);
+ HDassert(edge_size > 0);
+ HDassert(edge_size <= 1000);
+
+ HDsnprintf(test_desc_0, sizeof(test_desc_0), "\tn-cube slice through m-cube (n <= m) test %d.\n",
+ test_num);
+ MESSAGE(7, ("%s", test_desc_0));
+
+ /* This statement must be updated if SS_DR_MAX_RANK is changed */
+ HDsnprintf(test_desc_1, sizeof(test_desc_1),
+ "\t\tranks: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d.\n", small_rank, large_rank, offset,
+ (int)dim_selected[0], (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3],
+ (int)dim_selected[4]);
+ MESSAGE(7, ("%s", test_desc_1));
+
+ /* copy the edge size into the dims array */
+ for (i = 0; i < SS_DR_MAX_RANK; i++)
+ dims[i] = edge_size;
+
+ /* Create the small n-cube */
+ n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL);
+ CHECK(n_cube_0_sid, FAIL, "H5Screate_simple");
+
+ /* Create the large n-cube */
+ n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL);
+ CHECK(n_cube_1_sid, FAIL, "H5Screate_simple");
+
+ /* set up start, stride, count, and block for the hyperslab selection */
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ stride[i] = 2 * edge_size; /* a bit silly in this case */
+ count[i] = 1;
+ if (dim_selected[i]) {
+ start[i] = 0;
+ block[i] = edge_size;
+ }
+ else {
+ start[i] = (hsize_t)offset;
+ block[i] = 1;
+ }
+ }
+
+ /* since large rank may be less than SS_DR_MAX_RANK, we may not
+ * use the entire start, stride, count, and block arrays. This
+ * is a problem, since it is inconvenient to set up the dim_selected
+ * array to reflect the large rank, and thus if large_rank <
+ * SS_DR_MAX_RANK, we need to hide the lower index entries
+ * from H5Sselect_hyperslab().
+ *
+ * Do this by setting up pointers to the first valid entry in start,
+ * stride, count, and block below, and pass these pointers in
+ * to H5Sselect_hyperslab() instead of the array base addresses.
+ */
+
+ i = SS_DR_MAX_RANK - large_rank;
+ HDassert(i >= 0);
+
+ start_ptr = &(start[i]);
+ stride_ptr = &(stride[i]);
+ count_ptr = &(count[i]);
+ block_ptr = &(block[i]);
+
+ /* select the hyperslab */
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_SET, start_ptr, stride_ptr, count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* setup is done -- run the test: */
+ check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid);
+ VERIFY(check, expected_result, "H5Sselect_shape_same");
+
+ /* Close dataspaces */
+ ret = H5Sclose(n_cube_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(n_cube_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_shape_same_dr__full_space_vs_slice() */
+
+/****************************************************************
+**
+** test_shape_same_dr__run_full_space_vs_slice_tests():
+**
+** Run the test_shape_same_dr__full_space_vs_slice() test
+** over a variety of ranks and offsets.
+**
+** At present, we test H5Sselect_shape_same() with
+** fully selected 1, 2, 3, and 4 cubes as one parameter, and
+** 1, 2, 3, and 4 dimensional slices through a n-cube of rank
+** no more than 5 (and at least the rank of the slice).
+** We stop at rank 5, as Quincey suggested that it would be
+** sufficient.
+**
+** All the n-cubes will have lengths of the same size, so
+** H5Sselect_shape_same() should return true iff:
+**
+** 1) the rank for the fully selected n cube equals the
+** number of dimensions selected in the slice through the
+** m-cube (m >= n).
+**
+** 2) The dimensions selected in the slice through the m-cube
+** are the dimensions with the most quickly changing
+** indices.
+**
+****************************************************************/
+static void
+test_shape_same_dr__run_full_space_vs_slice_tests(void)
+{
+ hbool_t dim_selected[5];
+ hbool_t expected_result;
+ int i, j;
+ int v, w, x, y, z;
+ int test_num = 0;
+ int small_rank;
+ int large_rank;
+ hsize_t edge_size = 10;
+
+ for (large_rank = 1; large_rank <= 5; large_rank++) {
+ for (small_rank = 1; small_rank <= large_rank; small_rank++) {
+ v = 0;
+ do {
+ if (v == 0)
+ dim_selected[0] = FALSE;
+ else
+ dim_selected[0] = TRUE;
+
+ w = 0;
+ do {
+ if (w == 0)
+ dim_selected[1] = FALSE;
+ else
+ dim_selected[1] = TRUE;
+
+ x = 0;
+ do {
+ if (x == 0)
+ dim_selected[2] = FALSE;
+ else
+ dim_selected[2] = TRUE;
+
+ y = 0;
+ do {
+ if (y == 0)
+ dim_selected[3] = FALSE;
+ else
+ dim_selected[3] = TRUE;
+
+ z = 0;
+ do {
+ if (z == 0)
+ dim_selected[4] = FALSE;
+ else
+ dim_selected[4] = TRUE;
+
+ /* compute the expected result: */
+ i = 0;
+ j = 4;
+ expected_result = TRUE;
+ while ((i < small_rank) && expected_result) {
+ if (!dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ }
+
+ while ((i < large_rank) && expected_result) {
+ if (dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ }
+
+ /* everything is set up -- run the tests */
+
+ test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank, 0,
+ edge_size, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank,
+ large_rank / 2, edge_size,
+ dim_selected, expected_result);
+
+ test_shape_same_dr__full_space_vs_slice(test_num++, small_rank, large_rank,
+ large_rank - 1, edge_size,
+ dim_selected, expected_result);
+
+ z++;
+ } while ((z < 2) && (large_rank >= 1));
+
+ y++;
+ } while ((y < 2) && (large_rank >= 2));
+
+ x++;
+ } while ((x < 2) && (large_rank >= 3));
+
+ w++;
+ } while ((w < 2) && (large_rank >= 4));
+
+ v++;
+ } while ((v < 2) && (large_rank >= 5));
+ } /* end for */
+ } /* end for */
+} /* test_shape_same_dr__run_full_space_vs_slice_tests() */
+
+/****************************************************************
+**
+** test_shape_same_dr__checkerboard(): Tests selection of a
+** "checker board" subset of a full n-cube dataspace vs
+** a "checker board" n-dimensional slice of an m-cube (m > n).
+** in a call to H5Sselect_shape_same().
+**
+** Note that this test does not require the n-cube and the
+** n-dimensional slice to have the same rank (although
+** H5Sselect_shape_same() should always return FALSE if
+** they don't).
+**
+** Per Quincey's suggestion, only test up to 5 dimensional
+** spaces.
+**
+****************************************************************/
+static void
+test_shape_same_dr__checkerboard(int test_num, int small_rank, int large_rank, int offset, hsize_t edge_size,
+ hsize_t checker_size, hbool_t dim_selected[], hbool_t expected_result)
+{
+ char test_desc_0[128];
+ char test_desc_1[256];
+ int i;
+ int dims_selected = 0;
+ hid_t n_cube_0_sid; /* the checker board selected
+ * hyper cube
+ */
+ hid_t n_cube_1_sid; /* the hyper cube in which a
+ * checkerboard slice is selected
+ */
+ hsize_t dims[SS_DR_MAX_RANK];
+ hsize_t base_start[2];
+ hsize_t start[SS_DR_MAX_RANK];
+ hsize_t *start_ptr;
+ hsize_t base_stride[2];
+ hsize_t stride[SS_DR_MAX_RANK];
+ hsize_t *stride_ptr;
+ hsize_t base_count[2];
+ hsize_t count[SS_DR_MAX_RANK];
+ hsize_t *count_ptr;
+ hsize_t base_block[2];
+ hsize_t block[SS_DR_MAX_RANK];
+ hsize_t *block_ptr;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ HDassert(0 < small_rank);
+ HDassert(small_rank <= large_rank);
+ HDassert(large_rank <= SS_DR_MAX_RANK);
+ HDassert(0 < checker_size);
+ HDassert(checker_size <= edge_size);
+ HDassert(edge_size <= 1000);
+ HDassert(0 <= offset);
+ HDassert(offset < (int)edge_size);
+
+ for (i = SS_DR_MAX_RANK - large_rank; i < SS_DR_MAX_RANK; i++)
+ if (dim_selected[i] == TRUE)
+ dims_selected++;
+
+ HDassert(dims_selected >= 0);
+ HDassert(dims_selected <= large_rank);
+
+ HDsnprintf(test_desc_0, sizeof(test_desc_0),
+ "\tcheckerboard n-cube slice through m-cube (n <= m) test %d.\n", test_num);
+ MESSAGE(7, ("%s", test_desc_0));
+
+ /* This statement must be updated if SS_DR_MAX_RANK is changed */
+ HDsnprintf(test_desc_1, sizeof(test_desc_1),
+ "\tranks: %d/%d edge/chkr size: %d/%d offset: %d dim_selected: %d/%d/%d/%d/%d:%d.\n",
+ small_rank, large_rank, (int)edge_size, (int)checker_size, offset, (int)dim_selected[0],
+ (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4],
+ dims_selected);
+ MESSAGE(7, ("%s", test_desc_1));
+
+ /* copy the edge size into the dims array */
+ for (i = 0; i < SS_DR_MAX_RANK; i++)
+ dims[i] = edge_size;
+
+ /* Create the small n-cube */
+ n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL);
+ CHECK(n_cube_0_sid, FAIL, "H5Screate_simple");
+
+ /* Select a "checkerboard" pattern in the small n-cube.
+ *
+ * In the 1-D case, the "checkerboard" would look like this:
+ *
+ * * * - - * * - - * *
+ *
+ * and in the 2-D case, it would look like this:
+ *
+ * * * - - * * - - * *
+ * * * - - * * - - * *
+ * - - * * - - * * - -
+ * - - * * - - * * - -
+ * * * - - * * - - * *
+ * * * - - * * - - * *
+ * - - * * - - * * - -
+ * - - * * - - * * - -
+ * * * - - * * - - * *
+ * * * - - * * - - * *
+ *
+ * In both cases, asterisks indicate selected elements,
+ * and dashes indicate unselected elements.
+ *
+ * 3-D and 4-D ascii art is somewhat painful, so I'll
+ * leave those selections to your imagination. :-)
+ *
+ * Note, that since the edge_size and checker_size are
+ * parameters that are passed in, the selection need
+ * not look exactly like the selection shown above.
+ * At present, the function allows checker sizes that
+ * are not even divisors of the edge size -- thus
+ * something like the following is also possible:
+ *
+ * * * * - - - * * * -
+ * * * * - - - * * * -
+ * * * * - - - * * * -
+ * - - - * * * - - - *
+ * - - - * * * - - - *
+ * - - - * * * - - - *
+ * * * * - - - * * * -
+ * * * * - - - * * * -
+ * * * * - - - * * * -
+ * - - - * * * - - - *
+ *
+ * As the above pattern can't be selected in one
+ * call to H5Sselect_hyperslab(), and since the
+ * values in the start, stride, count, and block
+ * arrays will be repeated over all entries in
+ * the selected space case, and over all selected
+ * dimensions in the selected hyperslab case, we
+ * compute these values first and store them in
+ * in the base_start, base_stride, base_count,
+ * and base_block arrays.
+ */
+
+ base_start[0] = 0;
+ base_start[1] = checker_size;
+
+ base_stride[0] = 2 * checker_size;
+ base_stride[1] = 2 * checker_size;
+
+ /* Note that the following computation depends on the C99
+ * requirement that integer division discard any fraction
+ * (truncation towards zero) to function correctly. As we
+ * now require C99, this shouldn't be a problem, but noting
+ * it may save us some pain if we are ever obliged to support
+ * pre-C99 compilers again.
+ */
+
+ base_count[0] = edge_size / (checker_size * 2);
+ if ((edge_size % (checker_size * 2)) > 0)
+ base_count[0]++;
+
+ base_count[1] = (edge_size - checker_size) / (checker_size * 2);
+ if (((edge_size - checker_size) % (checker_size * 2)) > 0)
+ base_count[1]++;
+
+ base_block[0] = checker_size;
+ base_block[1] = checker_size;
+
+ /* now setup start, stride, count, and block arrays for
+ * the first call to H5Sselect_hyperslab().
+ */
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ start[i] = base_start[0];
+ stride[i] = base_stride[0];
+ count[i] = base_count[0];
+ block[i] = base_block[0];
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* if small_rank == 1, or if edge_size == checker_size, we
+ * are done, as either there is no added dimension in which
+ * to place offset selected "checkers".
+ *
+ * Otherwise, set up start, stride, count and block, and
+ * make the additional selection.
+ */
+
+ if ((small_rank > 1) && (checker_size < edge_size)) {
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ start[i] = base_start[1];
+ stride[i] = base_stride[1];
+ count[i] = base_count[1];
+ block[i] = base_block[1];
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end if */
+
+ /* Weirdness alert:
+ *
+ * Some how, it seems that selections can extend beyond the
+ * boundaries of the target dataspace -- hence the following
+ * code to manually clip the selection back to the dataspace
+ * proper.
+ */
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ start[i] = 0;
+ stride[i] = edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_AND, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the large n-cube */
+ n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL);
+ CHECK(n_cube_1_sid, FAIL, "H5Screate_simple");
+
+ /* Now select the checkerboard selection in the (possibly larger) n-cube.
+ *
+ * Since we have already calculated the base start, stride, count,
+ * and block, re-use the values in setting up start, stride, count,
+ * and block.
+ */
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ if (dim_selected[i]) {
+ start[i] = base_start[0];
+ stride[i] = base_stride[0];
+ count[i] = base_count[0];
+ block[i] = base_block[0];
+ } /* end if */
+ else {
+ start[i] = (hsize_t)offset;
+ stride[i] = (hsize_t)(2 * edge_size);
+ count[i] = 1;
+ block[i] = 1;
+ } /* end else */
+ } /* end for */
+
+ /* Since large rank may be less than SS_DR_MAX_RANK, we may not
+ * use the entire start, stride, count, and block arrays. This
+ * is a problem, since it is inconvenient to set up the dim_selected
+ * array to reflect the large rank, and thus if large_rank <
+ * SS_DR_MAX_RANK, we need to hide the lower index entries
+ * from H5Sselect_hyperslab().
+ *
+ * Do this by setting up pointers to the first valid entry in start,
+ * stride, count, and block below, and pass these pointers in
+ * to H5Sselect_hyperslab() instead of the array base addresses.
+ */
+
+ i = SS_DR_MAX_RANK - large_rank;
+ HDassert(i >= 0);
+
+ start_ptr = &(start[i]);
+ stride_ptr = &(stride[i]);
+ count_ptr = &(count[i]);
+ block_ptr = &(block[i]);
+
+ /* select the hyperslab */
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_SET, start_ptr, stride_ptr, count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* As before, if the number of dimensions selected is less than or
+ * equal to 1, or if edge_size == checker_size, we are done, as
+ * either there is no added dimension in which to place offset selected
+ * "checkers", or the hyperslab is completely occupied by one
+ * "checker".
+ *
+ * Otherwise, set up start, stride, count and block, and
+ * make the additional selection.
+ */
+ if ((dims_selected > 1) && (checker_size < edge_size)) {
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ if (dim_selected[i]) {
+ start[i] = base_start[1];
+ stride[i] = base_stride[1];
+ count[i] = base_count[1];
+ block[i] = base_block[1];
+ } /* end if */
+ else {
+ start[i] = (hsize_t)offset;
+ stride[i] = (hsize_t)(2 * edge_size);
+ count[i] = 1;
+ block[i] = 1;
+ } /* end else */
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_OR, start_ptr, stride_ptr, count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end if */
+
+ /* Weirdness alert:
+ *
+ * Again, it seems that selections can extend beyond the
+ * boundaries of the target dataspace -- hence the following
+ * code to manually clip the selection back to the dataspace
+ * proper.
+ */
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ start[i] = 0;
+ stride[i] = edge_size;
+ count[i] = 1;
+ block[i] = edge_size;
+ } /* end for */
+
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_AND, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* setup is done -- run the test: */
+ check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid);
+ VERIFY(check, expected_result, "H5Sselect_shape_same");
+
+ /* Close dataspaces */
+ ret = H5Sclose(n_cube_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(n_cube_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_shape_same_dr__checkerboard() */
+
+/****************************************************************
+**
+** test_shape_same_dr__run_checkerboard_tests():
+**
+** In this set of tests, we test H5Sselect_shape_same()
+** with a "checkerboard" selection of 1, 2, 3, and 4 cubes as
+** one parameter, and 1, 2, 3, and 4 dimensional checkerboard
+** slices through a n-cube of rank no more than 5 (and at
+** least the rank of the slice).
+**
+** All the n-cubes will have lengths of the same size, so
+** H5Sselect_shape_same() should return true iff:
+**
+** 1) the rank of the n cube equals the number of dimensions
+** selected in the checker board slice through the m-cube
+** (m >= n).
+**
+** 2) The dimensions selected in the checkerboard slice
+** through the m-cube are the dimensions with the most
+** quickly changing indices.
+**
+****************************************************************/
+static void
+test_shape_same_dr__run_checkerboard_tests(void)
+{
+ hbool_t dim_selected[5];
+ hbool_t expected_result;
+ int i, j;
+ int v, w, x, y, z;
+ int test_num = 0;
+ int small_rank;
+ int large_rank;
+
+ for (large_rank = 1; large_rank <= 5; large_rank++) {
+ for (small_rank = 1; small_rank <= large_rank; small_rank++) {
+ v = 0;
+ do {
+ if (v == 0)
+ dim_selected[0] = FALSE;
+ else
+ dim_selected[0] = TRUE;
+
+ w = 0;
+ do {
+ if (w == 0)
+ dim_selected[1] = FALSE;
+ else
+ dim_selected[1] = TRUE;
+
+ x = 0;
+ do {
+ if (x == 0)
+ dim_selected[2] = FALSE;
+ else
+ dim_selected[2] = TRUE;
+
+ y = 0;
+ do {
+ if (y == 0)
+ dim_selected[3] = FALSE;
+ else
+ dim_selected[3] = TRUE;
+
+ z = 0;
+ do {
+ if (z == 0)
+ dim_selected[4] = FALSE;
+ else
+ dim_selected[4] = TRUE;
+
+ /* compute the expected result: */
+ i = 0;
+ j = 4;
+ expected_result = TRUE;
+ while ((i < small_rank) && expected_result) {
+ if (!dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+ while ((i < large_rank) && expected_result) {
+ if (dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+ /* everything is set up -- run the tests */
+
+ /* run test with edge size 16, checker
+ * size 1, and a variety of offsets
+ */
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 0,
+ /* edge_size */ 16,
+ /* checker_size */ 1, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 5,
+ /* edge_size */ 16,
+ /* checker_size */ 1, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 15,
+ /* edge_size */ 16,
+ /* checker_size */ 1, dim_selected,
+ expected_result);
+
+ /* run test with edge size 10, checker
+ * size 2, and a variety of offsets
+ */
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 0,
+ /* edge_size */ 10,
+ /* checker_size */ 2, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 5,
+ /* edge_size */ 10,
+ /* checker_size */ 2, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 9,
+ /* edge_size */ 10,
+ /* checker_size */ 2, dim_selected,
+ expected_result);
+
+ /* run test with edge size 10, checker
+ * size 3, and a variety of offsets
+ */
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 0,
+ /* edge_size */ 10,
+ /* checker_size */ 3, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 5,
+ /* edge_size */ 10,
+ /* checker_size */ 3, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 9,
+ /* edge_size */ 10,
+ /* checker_size */ 3, dim_selected,
+ expected_result);
+
+ /* run test with edge size 8, checker
+ * size 8, and a variety of offsets
+ */
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 0,
+ /* edge_size */ 8,
+ /* checker_size */ 8, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 4,
+ /* edge_size */ 8,
+ /* checker_size */ 8, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__checkerboard(test_num++, small_rank, large_rank,
+ /* offset */ 7,
+ /* edge_size */ 8,
+ /* checker_size */ 8, dim_selected,
+ expected_result);
+
+ z++;
+ } while ((z < 2) && (large_rank >= 1));
+
+ y++;
+ } while ((y < 2) && (large_rank >= 2));
+
+ x++;
+ } while ((x < 2) && (large_rank >= 3));
+
+ w++;
+ } while ((w < 2) && (large_rank >= 4));
+
+ v++;
+ } while ((v < 2) && (large_rank >= 5));
+ } /* end for */
+ } /* end for */
+} /* test_shape_same_dr__run_checkerboard_tests() */
+
+/****************************************************************
+**
+** test_shape_same_dr__irregular():
+**
+** Tests selection of an "irregular" subset of a full
+** n-cube dataspace vs an identical "irregular" subset
+** of an n-dimensional slice of an m-cube (m > n).
+** in a call to H5Sselect_shape_same().
+**
+** Note that this test does not require the n-cube and the
+** n-dimensional slice to have the same rank (although
+** H5Sselect_shape_same() should always return FALSE if
+** they don't).
+**
+****************************************************************/
+static void
+test_shape_same_dr__irregular(int test_num, int small_rank, int large_rank, int pattern_offset,
+ int slice_offset, hbool_t dim_selected[], hbool_t expected_result)
+{
+ char test_desc_0[128];
+ char test_desc_1[256];
+ int edge_size = 10;
+ int i;
+ int j;
+ int k;
+ int dims_selected = 0;
+ hid_t n_cube_0_sid; /* the hyper cube containing
+ * an irregular selection
+ */
+ hid_t n_cube_1_sid; /* the hyper cube in which a
+ * slice contains an irregular
+ * selection.
+ */
+ hsize_t dims[SS_DR_MAX_RANK];
+ hsize_t start_0[SS_DR_MAX_RANK] = {2, 2, 2, 2, 5};
+ hsize_t stride_0[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_0[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1};
+ hsize_t block_0[SS_DR_MAX_RANK] = {2, 2, 2, 2, 3};
+
+ hsize_t start_1[SS_DR_MAX_RANK] = {2, 2, 2, 5, 2};
+ hsize_t stride_1[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_1[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1};
+ hsize_t block_1[SS_DR_MAX_RANK] = {2, 2, 2, 3, 2};
+
+ hsize_t start_2[SS_DR_MAX_RANK] = {2, 2, 5, 2, 2};
+ hsize_t stride_2[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_2[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1};
+ hsize_t block_2[SS_DR_MAX_RANK] = {2, 2, 3, 2, 2};
+
+ hsize_t start_3[SS_DR_MAX_RANK] = {2, 5, 2, 2, 2};
+ hsize_t stride_3[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_3[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1};
+ hsize_t block_3[SS_DR_MAX_RANK] = {2, 3, 2, 2, 2};
+
+ hsize_t start_4[SS_DR_MAX_RANK] = {5, 2, 2, 2, 2};
+ hsize_t stride_4[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t count_4[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1};
+ hsize_t block_4[SS_DR_MAX_RANK] = {3, 2, 2, 2, 2};
+
+ hsize_t clip_start[SS_DR_MAX_RANK] = {0, 0, 0, 0, 0};
+ hsize_t clip_stride[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+ hsize_t clip_count[SS_DR_MAX_RANK] = {1, 1, 1, 1, 1};
+ hsize_t clip_block[SS_DR_MAX_RANK] = {10, 10, 10, 10, 10};
+
+ hsize_t *(starts[SS_DR_MAX_RANK]) = {start_0, start_1, start_2, start_3, start_4};
+ hsize_t *(strides[SS_DR_MAX_RANK]) = {stride_0, stride_1, stride_2, stride_3, stride_4};
+ hsize_t *(counts[SS_DR_MAX_RANK]) = {count_0, count_1, count_2, count_3, count_4};
+ hsize_t *(blocks[SS_DR_MAX_RANK]) = {block_0, block_1, block_2, block_3, block_4};
+
+ hsize_t start[SS_DR_MAX_RANK];
+ hsize_t *start_ptr;
+ hsize_t stride[SS_DR_MAX_RANK];
+ hsize_t *stride_ptr;
+ hsize_t count[SS_DR_MAX_RANK];
+ hsize_t *count_ptr;
+ hsize_t block[SS_DR_MAX_RANK];
+ hsize_t *block_ptr;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ HDassert(0 < small_rank);
+ HDassert(small_rank <= large_rank);
+ HDassert(large_rank <= SS_DR_MAX_RANK);
+ HDassert(9 <= edge_size);
+ HDassert(edge_size <= 1000);
+ HDassert(0 <= slice_offset);
+ HDassert(slice_offset < edge_size);
+ HDassert(-2 <= pattern_offset);
+ HDassert(pattern_offset <= 2);
+
+ for (i = SS_DR_MAX_RANK - large_rank; i < SS_DR_MAX_RANK; i++)
+ if (dim_selected[i] == TRUE)
+ dims_selected++;
+
+ HDassert(dims_selected >= 0);
+ HDassert(dims_selected <= large_rank);
+
+ HDsnprintf(test_desc_0, sizeof(test_desc_0),
+ "\tirregular sub set of n-cube slice through m-cube (n <= m) test %d.\n", test_num);
+ MESSAGE(7, ("%s", test_desc_0));
+
+ /* This statement must be updated if SS_DR_MAX_RANK is changed */
+ HDsnprintf(test_desc_1, sizeof(test_desc_1),
+ "\tranks: %d/%d edge: %d s/p offset: %d/%d dim_selected: %d/%d/%d/%d/%d:%d.\n", small_rank,
+ large_rank, edge_size, slice_offset, pattern_offset, (int)dim_selected[0],
+ (int)dim_selected[1], (int)dim_selected[2], (int)dim_selected[3], (int)dim_selected[4],
+ dims_selected);
+ MESSAGE(7, ("%s", test_desc_1));
+
+ /* copy the edge size into the dims array */
+ for (i = 0; i < SS_DR_MAX_RANK; i++)
+ dims[i] = (hsize_t)edge_size;
+
+ /* Create the small n-cube */
+ n_cube_0_sid = H5Screate_simple(small_rank, dims, NULL);
+ CHECK(n_cube_0_sid, FAIL, "H5Screate_simple");
+
+ /* Select an "irregular" pattern in the small n-cube. This
+ * pattern can be though of a set of four 3 x 2 x 2 X 2
+ * four dimensional prisims, each parallel to one of the
+ * axies and none of them intersecting with the other.
+ *
+ * In the lesser dimensional cases, this 4D pattern is
+ * projected onto the lower dimensional space.
+ *
+ * In the 1-D case, the projection of the pattern looks
+ * like this:
+ *
+ * - - * * - * * * - -
+ * 0 1 2 3 4 5 6 7 8 9 x
+ *
+ * and in the 2-D case, it would look like this:
+ *
+ *
+ * y
+ * 9 - - - - - - - - - -
+ * 8 - - - - - - - - - -
+ * 7 - - * * - - - - - -
+ * 6 - - * * - - - - - -
+ * 5 - - * * - - - - - -
+ * 4 - - - - - - - - - -
+ * 3 - - * * - * * * - -
+ * 2 - - * * - * * * - -
+ * 1 - - - - - - - - - -
+ * 0 - - - - - - - - - -
+ * 0 1 2 3 4 5 6 7 8 9 x
+ *
+ * In both cases, asterisks indicate selected elements,
+ * and dashes indicate unselected elements.
+ *
+ * Note that is this case, since the edge size is fixed,
+ * the pattern does not change. However, we do use the
+ * displacement parameter to allow it to be moved around
+ * within the n-cube or hyperslab.
+ */
+
+ /* first, ensure that the small n-cube has no selection */
+ ret = H5Sselect_none(n_cube_0_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* now, select the irregular pattern */
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_OR, starts[i], strides[i], counts[i], blocks[i]);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end for */
+
+ /* finally, clip the selection to ensure that it lies fully
+ * within the n-cube.
+ */
+ ret = H5Sselect_hyperslab(n_cube_0_sid, H5S_SELECT_AND, clip_start, clip_stride, clip_count, clip_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create the large n-cube */
+ n_cube_1_sid = H5Screate_simple(large_rank, dims, NULL);
+ CHECK(n_cube_1_sid, FAIL, "H5Screate_simple");
+
+ /* Ensure that the large n-cube has no selection */
+ H5Sselect_none(n_cube_1_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Since large rank may be less than SS_DR_MAX_RANK, we may not
+ * use the entire start, stride, count, and block arrays. This
+ * is a problem, since it is inconvenient to set up the dim_selected
+ * array to reflect the large rank, and thus if large_rank <
+ * SS_DR_MAX_RANK, we need to hide the lower index entries
+ * from H5Sselect_hyperslab().
+ *
+ * Do this by setting up pointers to the first valid entry in start,
+ * stride, count, and block below, and pass these pointers in
+ * to H5Sselect_hyperslab() instead of the array base addresses.
+ */
+
+ i = SS_DR_MAX_RANK - large_rank;
+ HDassert(i >= 0);
+
+ start_ptr = &(start[i]);
+ stride_ptr = &(stride[i]);
+ count_ptr = &(count[i]);
+ block_ptr = &(block[i]);
+
+ /* Now select the irregular selection in the (possibly larger) n-cube.
+ *
+ * Basic idea is to project the pattern used in the smaller n-cube
+ * onto the dimensions selected in the larger n-cube, with the displacement
+ * specified.
+ */
+ for (i = 0; i < SS_DR_MAX_RANK; i++) {
+ j = 0;
+ for (k = 0; k < SS_DR_MAX_RANK; k++) {
+ if (dim_selected[k]) {
+ start[k] = (starts[i])[j] + (hsize_t)pattern_offset;
+ stride[k] = (strides[i])[j];
+ count[k] = (counts[i])[j];
+ block[k] = (blocks[i])[j];
+ j++;
+ } /* end if */
+ else {
+ start[k] = (hsize_t)slice_offset;
+ stride[k] = (hsize_t)(2 * edge_size);
+ count[k] = 1;
+ block[k] = 1;
+ } /* end else */
+ } /* end for */
+
+ /* select the hyperslab */
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_OR, start_ptr, stride_ptr, count_ptr, block_ptr);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ } /* end for */
+
+ /* it is possible that the selection extends beyond the dataspace.
+ * clip the selection to ensure that it doesn't.
+ */
+ ret = H5Sselect_hyperslab(n_cube_1_sid, H5S_SELECT_AND, clip_start, clip_stride, clip_count, clip_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* setup is done -- run the test: */
+ check = H5Sselect_shape_same(n_cube_0_sid, n_cube_1_sid);
+ VERIFY(check, expected_result, "H5Sselect_shape_same");
+
+ /* Close dataspaces */
+ ret = H5Sclose(n_cube_0_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(n_cube_1_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_shape_same_dr__irregular() */
+
+/****************************************************************
+**
+** test_shape_same_dr__run_irregular_tests():
+**
+** In this set of tests, we test H5Sselect_shape_same()
+** with an "irregular" subselection of 1, 2, 3, and 4 cubes as
+** one parameter, and irregular subselections of 1, 2, 3,
+** and 4 dimensional slices through a n-cube of rank no more
+** than 5 (and at least the rank of the slice) as the other.
+** Note that the "irregular" selection may be offset between
+** the n-cube and the slice.
+**
+** All the irregular selections will be identical (modulo rank)
+** so H5Sselect_shape_same() should return true iff:
+**
+** 1) the rank of the n cube equals the number of dimensions
+** selected in the irregular slice through the m-cube
+** (m >= n).
+**
+** 2) The dimensions selected in the irregular slice
+** through the m-cube are the dimensions with the most
+** quickly changing indices.
+**
+****************************************************************/
+static void
+test_shape_same_dr__run_irregular_tests(void)
+{
+ hbool_t dim_selected[5];
+ hbool_t expected_result;
+ int i, j;
+ int v, w, x, y, z;
+ int test_num = 0;
+ int small_rank;
+ int large_rank;
+
+ for (large_rank = 1; large_rank <= 5; large_rank++) {
+ for (small_rank = 1; small_rank <= large_rank; small_rank++) {
+ v = 0;
+ do {
+ if (v == 0)
+ dim_selected[0] = FALSE;
+ else
+ dim_selected[0] = TRUE;
+
+ w = 0;
+ do {
+ if (w == 0)
+ dim_selected[1] = FALSE;
+ else
+ dim_selected[1] = TRUE;
+
+ x = 0;
+ do {
+ if (x == 0)
+ dim_selected[2] = FALSE;
+ else
+ dim_selected[2] = TRUE;
+
+ y = 0;
+ do {
+ if (y == 0)
+ dim_selected[3] = FALSE;
+ else
+ dim_selected[3] = TRUE;
+
+ z = 0;
+ do {
+ if (z == 0)
+ dim_selected[4] = FALSE;
+ else
+ dim_selected[4] = TRUE;
+
+ /* compute the expected result: */
+ i = 0;
+ j = 4;
+ expected_result = TRUE;
+ while ((i < small_rank) && expected_result) {
+ if (!dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+ while ((i < large_rank) && expected_result) {
+ if (dim_selected[j])
+ expected_result = FALSE;
+ i++;
+ j--;
+ } /* end while */
+
+ /* everything is set up -- run the tests */
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ -2,
+ /* slice_offset */ 0, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ -2,
+ /* slice_offset */ 4, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ -2,
+ /* slice_offset */ 9, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ 0,
+ /* slice_offset */ 0, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ 0,
+ /* slice_offset */ 6, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ 0,
+ /* slice_offset */ 9, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ 2,
+ /* slice_offset */ 0, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ 2,
+ /* slice_offset */ 5, dim_selected,
+ expected_result);
+
+ test_shape_same_dr__irregular(test_num++, small_rank, large_rank,
+ /* pattern_offset */ 2,
+ /* slice_offset */ 9, dim_selected,
+ expected_result);
+
+ z++;
+ } while ((z < 2) && (large_rank >= 1));
+
+ y++;
+ } while ((y < 2) && (large_rank >= 2));
+
+ x++;
+ } while ((x < 2) && (large_rank >= 3));
+
+ w++;
+ } while ((w < 2) && (large_rank >= 4));
+
+ v++;
+ } while ((v < 2) && (large_rank >= 5));
+ } /* end for */
+ } /* end for */
+} /* test_shape_same_dr__run_irregular_tests() */
+
+/****************************************************************
+**
+** test_shape_same_dr(): Tests selections on dataspace with
+** different ranks, to verify that "shape same" routine
+** is now handling this case correctly.
+**
+****************************************************************/
+static void
+test_shape_same_dr(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Same Shape/Different Rank Comparisons\n"));
+
+ /* first run some smoke checks */
+ test_shape_same_dr__smoke_check_1();
+ test_shape_same_dr__smoke_check_2();
+ test_shape_same_dr__smoke_check_3();
+ test_shape_same_dr__smoke_check_4();
+
+ /* now run more intensive tests. */
+ test_shape_same_dr__run_full_space_vs_slice_tests();
+ test_shape_same_dr__run_checkerboard_tests();
+ test_shape_same_dr__run_irregular_tests();
+} /* test_shape_same_dr() */
+
+/****************************************************************
+**
+** test_space_rebuild(): Tests selection rebuild routine,
+** We will test whether selection in span-tree form can be rebuilt
+** into a regular selection.
+**
+**
+****************************************************************/
+static void
+test_space_rebuild(void)
+{
+ /* regular space IDs in span-tree form */
+ hid_t sid_reg1, sid_reg2, sid_reg3, sid_reg4, sid_reg5;
+
+ /* Original regular Space IDs */
+ hid_t sid_reg_ori1, sid_reg_ori2, sid_reg_ori3, sid_reg_ori4, sid_reg_ori5;
+
+ /* Irregular space IDs */
+ hid_t sid_irreg1, sid_irreg2, sid_irreg3, sid_irreg4, sid_irreg5;
+
+ /* rebuild status state */
+#if 0
+ H5S_diminfo_valid_t rebuild_stat1, rebuild_stat2;
+ htri_t rebuild_check;
+#endif
+ herr_t ret;
+
+ /* dimensions of rank 1 to rank 5 */
+ hsize_t dims1[] = {SPACERE1_DIM0};
+ hsize_t dims2[] = {SPACERE2_DIM0, SPACERE2_DIM1};
+ hsize_t dims3[] = {SPACERE3_DIM0, SPACERE3_DIM1, SPACERE3_DIM2};
+ hsize_t dims4[] = {SPACERE4_DIM0, SPACERE4_DIM1, SPACERE4_DIM2, SPACERE4_DIM3};
+ hsize_t dims5[] = {SPACERE5_DIM0, SPACERE5_DIM1, SPACERE5_DIM2, SPACERE5_DIM3, SPACERE5_DIM4};
+
+ /* The start of the hyperslab */
+ hsize_t start1[SPACERE1_RANK], start2[SPACERE2_RANK], start3[SPACERE3_RANK], start4[SPACERE4_RANK],
+ start5[SPACERE5_RANK];
+
+ /* The stride of the hyperslab */
+ hsize_t stride1[SPACERE1_RANK], stride2[SPACERE2_RANK], stride3[SPACERE3_RANK], stride4[SPACERE4_RANK],
+ stride5[SPACERE5_RANK];
+
+ /* The number of blocks for the hyperslab */
+ hsize_t count1[SPACERE1_RANK], count2[SPACERE2_RANK], count3[SPACERE3_RANK], count4[SPACERE4_RANK],
+ count5[SPACERE5_RANK];
+
+ /* The size of each block for the hyperslab */
+ hsize_t block1[SPACERE1_RANK], block2[SPACERE2_RANK], block3[SPACERE3_RANK], block4[SPACERE4_RANK],
+ block5[SPACERE5_RANK];
+
+ /* Declarations for special test of rebuild */
+ hid_t sid_spec;
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing functionality to rebuild regular hyperslab selection\n"));
+
+ MESSAGE(7, ("Testing functionality to rebuild 1-D hyperslab selection\n"));
+
+ /* Create 1-D dataspace */
+ sid_reg1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL);
+ sid_reg_ori1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL);
+
+ /* Build up the original one dimensional regular selection */
+ start1[0] = 1;
+ count1[0] = 3;
+ stride1[0] = 5;
+ block1[0] = 4;
+ ret = H5Sselect_hyperslab(sid_reg_ori1, H5S_SELECT_SET, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Build up one dimensional regular selection with H5_SELECT_OR,
+ inside HDF5, it will be treated as an irregular selection. */
+
+ start1[0] = 1;
+ count1[0] = 2;
+ stride1[0] = 5;
+ block1[0] = 4;
+ ret = H5Sselect_hyperslab(sid_reg1, H5S_SELECT_SET, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start1[0] = 11;
+ count1[0] = 1;
+ stride1[0] = 5;
+ block1[0] = 4;
+ ret = H5Sselect_hyperslab(sid_reg1, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_reg1, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 and rebuild_stat2 should be
+ * H5S_DIMINFO_VALID_YES. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (ret != FAIL) {
+ /* In this case, rebuild_check should be TRUE. */
+ rebuild_check = H5Sselect_shape_same(sid_reg1, sid_reg_ori1);
+ CHECK(rebuild_check, FALSE, "H5Sselect_shape_same");
+ }
+#endif
+ /* For irregular hyperslab */
+ sid_irreg1 = H5Screate_simple(SPACERE1_RANK, dims1, NULL);
+
+ /* Build up one dimensional irregular selection with H5_SELECT_OR */
+ start1[0] = 1;
+ count1[0] = 2;
+ stride1[0] = 5;
+ block1[0] = 4;
+ ret = H5Sselect_hyperslab(sid_irreg1, H5S_SELECT_SET, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start1[0] = 12; /* Just one position switch */
+ count1[0] = 1;
+ stride1[0] = 5;
+ block1[0] = 4;
+ ret = H5Sselect_hyperslab(sid_irreg1, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_irreg1, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and
+ * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ /* No need to do shape comparison */
+#endif
+
+ MESSAGE(7, ("Testing functionality to rebuild 2-D hyperslab selection\n"));
+ /* Create 2-D dataspace */
+ sid_reg2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL);
+ sid_reg_ori2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL);
+
+ /* Build up the original two dimensional regular selection */
+ start2[0] = 2;
+ count2[0] = 2;
+ stride2[0] = 7;
+ block2[0] = 5;
+ start2[1] = 1;
+ count2[1] = 3;
+ stride2[1] = 3;
+ block2[1] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg_ori2, H5S_SELECT_SET, start2, stride2, count2, block2);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Build up two dimensional regular selection with H5_SELECT_OR, inside HDF5,
+ it will be treated as an irregular selection. */
+
+ start2[1] = 1;
+ count2[1] = 2;
+ stride2[1] = 3;
+ block2[1] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg2, H5S_SELECT_SET, start2, stride2, count2, block2);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start2[1] = 7; /* 7 = start(1) + count(2) * stride(3) */
+ count2[1] = 1;
+ stride2[1] = 3;
+ block2[1] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg2, H5S_SELECT_OR, start2, stride2, count2, block2);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_reg2, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 and rebuild_stat2 should be
+ * H5S_DIMINFO_VALID_YES. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ } /* end if */
+ if (ret != FAIL) {
+ /* In this case, rebuild_check should be TRUE. */
+ rebuild_check = H5Sselect_shape_same(sid_reg2, sid_reg_ori2);
+ CHECK(rebuild_check, FALSE, "H5Sselect_shape_same");
+ }
+#endif
+ /* 2-D irregular case */
+ sid_irreg2 = H5Screate_simple(SPACERE2_RANK, dims2, NULL);
+ /* Build up two dimensional irregular selection with H5_SELECT_OR */
+
+ start2[0] = 2;
+ count2[0] = 2;
+ stride2[0] = 7;
+ block2[0] = 5;
+ start2[1] = 1;
+ count2[1] = 1;
+ stride2[1] = 3;
+ block2[1] = 2;
+ ret = H5Sselect_hyperslab(sid_irreg2, H5S_SELECT_SET, start2, stride2, count2, block2);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start2[1] = 4;
+ count2[1] = 2;
+ stride2[1] = 4;
+ block2[1] = 3; /* Just add one element for the block */
+
+ ret = H5Sselect_hyperslab(sid_irreg2, H5S_SELECT_OR, start2, stride2, count2, block2);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_irreg2, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and
+ * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ /* No need to do shape comparison */
+#endif
+
+ MESSAGE(7, ("Testing functionality to rebuild 3-D hyperslab selection\n"));
+
+ /* Create 3-D dataspace */
+ sid_reg3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL);
+ sid_reg_ori3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL);
+
+ /* Build up the original three dimensional regular selection */
+ start3[0] = 2;
+ count3[0] = 2;
+ stride3[0] = 3;
+ block3[0] = 2;
+ start3[1] = 1;
+ count3[1] = 3;
+ stride3[1] = 3;
+ block3[1] = 2;
+
+ start3[2] = 1;
+ count3[2] = 2;
+ stride3[2] = 4;
+ block3[2] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg_ori3, H5S_SELECT_SET, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Build up three dimensional regular selection with H5_SELECT_OR, inside HDF5,
+ it will be treated as an irregular selection. */
+ start3[2] = 1;
+ count3[2] = 1;
+ stride3[2] = 4;
+ block3[2] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg3, H5S_SELECT_SET, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start3[2] = 5;
+ count3[2] = 1;
+ stride3[2] = 4;
+ block3[2] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg3, H5S_SELECT_OR, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_reg3, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 and rebuild_stat2 should be
+ * H5S_DIMINFO_VALID_YES. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (ret != FAIL) {
+ /* In this case, rebuild_check should be TRUE. */
+ rebuild_check = H5Sselect_shape_same(sid_reg3, sid_reg_ori3);
+ CHECK(rebuild_check, FALSE, "H5Sselect_shape_same");
+ }
+#endif
+
+ sid_irreg3 = H5Screate_simple(SPACERE3_RANK, dims3, NULL);
+
+ /* Build up three dimensional irregular selection with H5_SELECT_OR */
+ start3[0] = 2;
+ count3[0] = 2;
+ stride3[0] = 3;
+ block3[0] = 2;
+ start3[1] = 1;
+ count3[1] = 3;
+ stride3[1] = 3;
+ block3[1] = 2;
+
+ start3[2] = 1;
+ count3[2] = 2;
+ stride3[2] = 2;
+ block3[2] = 1;
+
+ ret = H5Sselect_hyperslab(sid_irreg3, H5S_SELECT_SET, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start3[2] = 3;
+ count3[2] = 2;
+ stride3[2] = 3; /* Just add one element for the stride */
+ block3[2] = 1;
+
+ ret = H5Sselect_hyperslab(sid_irreg3, H5S_SELECT_OR, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_irreg3, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and
+ * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ /* No need to do shape comparison */
+#endif
+
+ MESSAGE(7, ("Testing functionality to rebuild 4-D hyperslab selection\n"));
+
+ /* Create 4-D dataspace */
+ sid_reg4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL);
+ sid_reg_ori4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL);
+
+ /* Build up the original four dimensional regular selection */
+ start4[0] = 2;
+ count4[0] = 2;
+ stride4[0] = 3;
+ block4[0] = 2;
+
+ start4[1] = 1;
+ count4[1] = 3;
+ stride4[1] = 3;
+ block4[1] = 2;
+
+ start4[2] = 1;
+ count4[2] = 2;
+ stride4[2] = 4;
+ block4[2] = 2;
+
+ start4[3] = 1;
+ count4[3] = 2;
+ stride4[3] = 4;
+ block4[3] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg_ori4, H5S_SELECT_SET, start4, stride4, count4, block4);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Build up four dimensional regular selection with H5_SELECT_OR, inside HDF5,
+ it will be treated as an irregular selection. */
+ start4[3] = 1;
+ count4[3] = 1;
+ stride4[3] = 4;
+ block4[3] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg4, H5S_SELECT_SET, start4, stride4, count4, block4);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start4[3] = 5;
+ count4[3] = 1;
+ stride4[3] = 4;
+ block4[3] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg4, H5S_SELECT_OR, start4, stride4, count4, block4);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_reg4, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 and rebuild_stat2 should be
+ * H5S_DIMINFO_VALID_YES. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (ret != FAIL) {
+ /* In this case, rebuild_check should be TRUE. */
+ rebuild_check = H5Sselect_shape_same(sid_reg4, sid_reg_ori4);
+ CHECK(rebuild_check, FALSE, "H5Sselect_shape_same");
+ }
+#endif
+
+ /* Testing irregular selection */
+ sid_irreg4 = H5Screate_simple(SPACERE4_RANK, dims4, NULL);
+
+ /* Build up four dimensional irregular selection with H5_SELECT_OR */
+ start4[0] = 2;
+ count4[0] = 2;
+ stride4[0] = 3;
+ block4[0] = 2;
+ start4[1] = 1;
+ count4[1] = 3;
+ stride4[1] = 3;
+ block4[1] = 2;
+
+ start4[2] = 1;
+ count4[2] = 1;
+ stride4[2] = 4;
+ block4[2] = 2;
+
+ start4[3] = 1;
+ count4[3] = 2;
+ stride4[3] = 4;
+ block4[3] = 2; /* sub-block is one element difference */
+
+ ret = H5Sselect_hyperslab(sid_irreg4, H5S_SELECT_SET, start4, stride4, count4, block4);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start4[2] = 5;
+ count4[2] = 1;
+ stride4[2] = 4;
+ block4[2] = 2;
+
+ start4[3] = 1;
+ count4[3] = 2;
+ stride4[3] = 4;
+ block4[3] = 3; /* sub-block is one element difference */
+
+ ret = H5Sselect_hyperslab(sid_irreg4, H5S_SELECT_OR, start4, stride4, count4, block4);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_irreg4, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and
+ * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ /* No need to do shape comparison */
+#endif
+
+ MESSAGE(7, ("Testing functionality to rebuild 5-D hyperslab selection\n"));
+
+ /* Create 5-D dataspace */
+ sid_reg5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL);
+ sid_reg_ori5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL);
+
+ /* Build up the original five dimensional regular selection */
+ start5[0] = 2;
+ count5[0] = 2;
+ stride5[0] = 3;
+ block5[0] = 2;
+
+ start5[1] = 1;
+ count5[1] = 3;
+ stride5[1] = 3;
+ block5[1] = 2;
+
+ start5[2] = 1;
+ count5[2] = 2;
+ stride5[2] = 4;
+ block5[2] = 2;
+
+ start5[3] = 1;
+ count5[3] = 2;
+ stride5[3] = 4;
+ block5[3] = 2;
+
+ start5[4] = 1;
+ count5[4] = 2;
+ stride5[4] = 4;
+ block5[4] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg_ori5, H5S_SELECT_SET, start5, stride5, count5, block5);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Build up five dimensional regular selection with H5_SELECT_OR, inside HDF5,
+ it will be treated as an irregular selection. */
+ start5[4] = 1;
+ count5[4] = 1;
+ stride5[4] = 4;
+ block5[4] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg5, H5S_SELECT_SET, start5, stride5, count5, block5);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start5[4] = 5;
+ count5[4] = 1;
+ stride5[4] = 4;
+ block5[4] = 2;
+
+ ret = H5Sselect_hyperslab(sid_reg5, H5S_SELECT_OR, start5, stride5, count5, block5);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_reg5, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 and rebuild_stat2 should be
+ * H5S_DIMINFO_VALID_YES. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (ret != FAIL) {
+ /* In this case, rebuild_check should be TRUE. */
+ rebuild_check = H5Sselect_shape_same(sid_reg5, sid_reg_ori5);
+ CHECK(rebuild_check, FALSE, "H5Sselect_shape_same");
+ }
+#endif
+
+ sid_irreg5 = H5Screate_simple(SPACERE5_RANK, dims5, NULL);
+
+ /* Build up five dimensional irregular selection with H5_SELECT_OR */
+ start5[0] = 2;
+ count5[0] = 2;
+ stride5[0] = 3;
+ block5[0] = 2;
+
+ start5[1] = 1;
+ count5[1] = 3;
+ stride5[1] = 3;
+ block5[1] = 2;
+
+ start5[2] = 1;
+ count5[2] = 2;
+ stride5[2] = 4;
+ block5[2] = 2;
+
+ start5[3] = 1;
+ count5[3] = 1;
+ stride5[3] = 4;
+ block5[3] = 2;
+
+ start5[4] = 2; /* One element difference */
+ count5[4] = 1;
+ stride5[4] = 4;
+ block5[4] = 2;
+
+ ret = H5Sselect_hyperslab(sid_irreg5, H5S_SELECT_SET, start5, stride5, count5, block5);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start5[3] = 5;
+ count5[3] = 1;
+ stride5[3] = 4;
+ block5[3] = 2;
+
+ start5[4] = 1; /* One element difference */
+ count5[4] = 2;
+ stride5[4] = 4;
+ block5[4] = 2;
+
+ ret = H5Sselect_hyperslab(sid_irreg5, H5S_SELECT_OR, start5, stride5, count5, block5);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_irreg5, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and
+ * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ /* No need to do shape comparison */
+#endif
+
+ /* We use 5-D to test a special case with
+ rebuilding routine TRUE, FALSE and TRUE */
+ sid_spec = H5Screate_simple(SPACERE5_RANK, dims5, NULL);
+
+ /* Build up the original five dimensional regular selection */
+ start5[0] = 2;
+ count5[0] = 2;
+ stride5[0] = 3;
+ block5[0] = 2;
+
+ start5[1] = 1;
+ count5[1] = 3;
+ stride5[1] = 3;
+ block5[1] = 2;
+
+ start5[2] = 1;
+ count5[2] = 2;
+ stride5[2] = 4;
+ block5[2] = 2;
+
+ start5[3] = 1;
+ count5[3] = 2;
+ stride5[3] = 4;
+ block5[3] = 2;
+
+ start5[4] = 1;
+ count5[4] = 1;
+ stride5[4] = 4;
+ block5[4] = 2;
+
+ ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_SET, start5, stride5, count5, block5);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 and rebuild_stat2 should both be
+ * H5S_DIMINFO_VALID_YES. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ /* No need to do shape comparison */
+#endif
+
+ /* Adding some selections to make it real irregular */
+ start5[3] = 1;
+ count5[3] = 1;
+ stride5[3] = 4;
+ block5[3] = 2;
+
+ start5[4] = 5;
+ count5[4] = 1;
+ stride5[4] = 4;
+ block5[4] = 2;
+
+ ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_OR, start5, stride5, count5, block5);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and
+ * rebuild_stat2 should be H5S_DIMINFO_VALID_IMPOSSIBLE. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_IMPOSSIBLE) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ /* No need to do shape comparison */
+#endif
+
+ /* Add more selections to make it regular again */
+ start5[3] = 5;
+ count5[3] = 1;
+ stride5[3] = 4;
+ block5[3] = 2;
+
+ start5[4] = 5;
+ count5[4] = 1;
+ stride5[4] = 4;
+ block5[4] = 2;
+
+ ret = H5Sselect_hyperslab(sid_spec, H5S_SELECT_OR, start5, stride5, count5, block5);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ ret = H5S__get_rebuild_status_test(sid_spec, &rebuild_stat1, &rebuild_stat2);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ /* In this case, rebuild_stat1 should be H5S_DIMINFO_VALID_NO and
+ * rebuild_stat2 should be H5S_DIMINFO_VALID_YES. */
+ if (rebuild_stat1 != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ if (rebuild_stat2 != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ }
+ /* No need to do shape comparison */
+#endif
+
+ H5Sclose(sid_reg1);
+ CHECK(ret, FAIL, "H5Sclose");
+ H5Sclose(sid_irreg1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ H5Sclose(sid_reg2);
+ CHECK(ret, FAIL, "H5Sclose");
+ H5Sclose(sid_irreg2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ H5Sclose(sid_reg3);
+ CHECK(ret, FAIL, "H5Sclose");
+ H5Sclose(sid_irreg3);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ H5Sclose(sid_reg4);
+ CHECK(ret, FAIL, "H5Sclose");
+ H5Sclose(sid_irreg4);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ H5Sclose(sid_reg5);
+ CHECK(ret, FAIL, "H5Sclose");
+ H5Sclose(sid_irreg5);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ H5Sclose(sid_spec);
+ CHECK(ret, FAIL, "H5Sclose");
+}
+
+/****************************************************************
+**
+** test_space_update_diminfo(): Tests selection diminfo update
+** routine. We will test whether regular selections can be
+** quickly updated when the selection is modified.
+**
+**
+****************************************************************/
+static void
+test_space_update_diminfo(void)
+{
+ hid_t space_id; /* Dataspace id */
+#if 0
+ H5S_diminfo_valid_t diminfo_valid; /* Diminfo status */
+ H5S_diminfo_valid_t rebuild_status; /* Diminfo status after rebuid */
+#endif
+ H5S_sel_type sel_type; /* Selection type */
+ herr_t ret; /* Return value */
+
+ /* dimensions of rank 1 to rank 5 */
+ hsize_t dims1[] = {SPACEUD1_DIM0};
+ hsize_t dims3[] = {SPACEUD3_DIM0, SPACEUD3_DIM1, SPACEUD3_DIM2};
+
+ /* The start of the hyperslab */
+ hsize_t start1[1], start3[3];
+
+ /* The stride of the hyperslab */
+ hsize_t stride1[1], stride3[3];
+
+ /* The number of blocks for the hyperslab */
+ hsize_t count1[1], count3[3];
+
+ /* The size of each block for the hyperslab */
+ hsize_t block1[1], block3[3];
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing functionality to update hyperslab dimension info\n"));
+
+ MESSAGE(7, ("Testing functionality to update 1-D hyperslab dimension info\n"));
+
+ /*
+ * Test adding regularly spaced distinct blocks
+ */
+
+ /* Create 1-D dataspace */
+ space_id = H5Screate_simple(1, dims1, NULL);
+
+ /* Create single block */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add block after first, with OR */
+ start1[0] = 6;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add block before first, this time with XOR */
+ start1[0] = 0;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add two blocks after current block */
+ start1[0] = 9;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add two blocks overlapping current block, with OR */
+ start1[0] = 9;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add two blocks partially overlapping current block, with OR */
+ start1[0] = 12;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add two blocks partially overlapping current block, with XOR */
+ start1[0] = 15;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO, after rebuild it should be IMPOSSIBLE */
+ ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+ if (rebuild_status != H5S_DIMINFO_VALID_IMPOSSIBLE) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ } /* end if */
+#endif
+
+ /* Fill in missing block */
+ start1[0] = 15;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO, after rebuild it should be YES */
+ ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+ if (rebuild_status != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ } /* end if */
+#endif
+ /*
+ * Test adding contiguous blocks
+ */
+
+ /* Create single block */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add block immediately after first, with OR */
+ start1[0] = 5;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add block immediately before first, with XOR */
+ start1[0] = 1;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add differently size block immediately after current, with OR */
+ start1[0] = 7;
+ count1[0] = 1;
+ block1[0] = 7;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /*
+ * Test adding overlapping blocks
+ */
+
+ /* Create single block */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add block completely overlapping first, with OR */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add block partially overlapping first, with OR */
+ start1[0] = 4;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add block completely enclosing current, with OR */
+ start1[0] = 2;
+ count1[0] = 1;
+ block1[0] = 5;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add block completely enclosed by current, with OR */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add equally sized block partially overlapping current, with XOR */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 5;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Fill in hole in block */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 4;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO, after rebuild it should be YES */
+ ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+ if (rebuild_status != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ } /* end if */
+#endif
+
+ /* Add differently sized block partially overlapping current, with XOR */
+ start1[0] = 4;
+ count1[0] = 1;
+ block1[0] = 5;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Fill in hole in block */
+ start1[0] = 4;
+ count1[0] = 1;
+ block1[0] = 4;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO, after rebuild it should be YES */
+ ret = H5S__get_rebuild_status_test(space_id, &diminfo_valid, &rebuild_status);
+ CHECK(ret, FAIL, "H5S__get_rebuild_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+ if (rebuild_status != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_rebuild");
+ } /* end if */
+#endif
+
+ /* Add block completely overlapping current, with XOR */
+ start1[0] = 2;
+ count1[0] = 1;
+ block1[0] = 7;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_XOR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ sel_type = H5Sget_select_type(space_id);
+ VERIFY(sel_type, H5S_SEL_NONE, "H5Sget_select_type");
+
+ /*
+ * Test various conditions that break the fast algorithm
+ */
+
+ /* Create multiple blocks */
+ start1[0] = 3;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create single block with start out of phase */
+ start1[0] = 8;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks */
+ start1[0] = 3;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks with start out of phase */
+ start1[0] = 8;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks */
+ start1[0] = 3;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks with wrong stride */
+ start1[0] = 9;
+ stride1[0] = 4;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create single block */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create single block with wrong size */
+ start1[0] = 6;
+ count1[0] = 1;
+ block1[0] = 1;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create single block */
+ start1[0] = 3;
+ count1[0] = 1;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks with wrong size */
+ start1[0] = 6;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 1;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks */
+ start1[0] = 3;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create single block with wrong size */
+ start1[0] = 9;
+ count1[0] = 1;
+ block1[0] = 1;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, NULL, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks */
+ start1[0] = 3;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks with wrong size */
+ start1[0] = 9;
+ stride1[0] = 3;
+ count1[0] = 2;
+ block1[0] = 1;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start1, stride1, count1, block1);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ MESSAGE(7, ("Testing functionality to update 3-D hyperslab dimension info\n"));
+
+ /* Create 3-D dataspace */
+ space_id = H5Screate_simple(3, dims3, NULL);
+
+ /* Create multiple blocks */
+ start3[0] = 0;
+ start3[1] = 1;
+ start3[2] = 2;
+ stride3[0] = 2;
+ stride3[1] = 3;
+ stride3[2] = 4;
+ count3[0] = 4;
+ count3[1] = 3;
+ count3[2] = 2;
+ block3[0] = 1;
+ block3[1] = 2;
+ block3[2] = 3;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add blocks with same values in all dimensions */
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add blocks with same values in two dimensions */
+ start3[0] = 8;
+ stride3[0] = 1;
+ count3[0] = 1;
+ block3[0] = 1;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks */
+ start3[0] = 0;
+ start3[1] = 1;
+ start3[2] = 2;
+ stride3[0] = 2;
+ stride3[1] = 3;
+ stride3[2] = 4;
+ count3[0] = 4;
+ count3[1] = 3;
+ count3[2] = 2;
+ block3[0] = 1;
+ block3[1] = 2;
+ block3[2] = 3;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add blocks with same values in one dimension */
+ start3[0] = 8;
+ start3[1] = 10;
+ stride3[0] = 1;
+ stride3[1] = 1;
+ count3[0] = 1;
+ count3[1] = 1;
+ block3[0] = 1;
+ block3[1] = 2;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Create multiple blocks */
+ start3[0] = 0;
+ start3[1] = 1;
+ start3[2] = 2;
+ stride3[0] = 2;
+ stride3[1] = 3;
+ stride3[2] = 4;
+ count3[0] = 4;
+ count3[1] = 3;
+ count3[2] = 2;
+ block3[0] = 1;
+ block3[1] = 2;
+ block3[2] = 3;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be YES */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_YES) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ /* Add blocks with same values in no dimensions */
+ start3[0] = 8;
+ start3[1] = 10;
+ start3[2] = 10;
+ stride3[0] = 1;
+ stride3[1] = 1;
+ stride3[2] = 1;
+ count3[0] = 1;
+ count3[1] = 1;
+ count3[2] = 1;
+ block3[0] = 1;
+ block3[1] = 2;
+ block3[2] = 3;
+ ret = H5Sselect_hyperslab(space_id, H5S_SELECT_OR, start3, stride3, count3, block3);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+#if 0
+ /* diminfo_valid should be NO */
+ ret = H5S__get_diminfo_status_test(space_id, &diminfo_valid);
+ CHECK(ret, FAIL, "H5S__get_diminfo_status_test");
+ if (diminfo_valid != H5S_DIMINFO_VALID_NO) {
+ ret = FAIL;
+ CHECK(ret, FAIL, "H5S_hyper_update_diminfo");
+ } /* end if */
+#endif
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* end test_space_update_diminfo() */
+
+/****************************************************************
+**
+** test_select_hyper_chunk_offset(): Tests selections on dataspace,
+** verify that offsets for hyperslab selections are working in
+** chunked datasets.
+**
+****************************************************************/
+#if 0
+static void
+test_select_hyper_chunk_offset(void)
+{
+ hid_t fid; /* File ID */
+ hid_t sid; /* Dataspace ID */
+ hid_t msid; /* Memory dataspace ID */
+ hid_t did; /* Dataset ID */
+ const hsize_t mem_dims[1] = {SPACE10_DIM1}; /* Dataspace dimensions for memory */
+ const hsize_t dims[1] = {0}; /* Dataspace initial dimensions */
+ const hsize_t maxdims[1] = {H5S_UNLIMITED}; /* Dataspace mam dims */
+ int *wbuf; /* Buffer for writing data */
+ int *rbuf; /* Buffer for reading data */
+ hid_t dcpl; /* Dataset creation property list ID */
+ hsize_t chunks[1] = {SPACE10_CHUNK_SIZE}; /* Chunk size */
+ hsize_t start[1] = {0}; /* The start of the hyperslab */
+ hsize_t count[1] = {SPACE10_CHUNK_SIZE}; /* The size of the hyperslab */
+ int i, j; /* Local index */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing hyperslab selections using offsets in chunked datasets\n"));
+
+ /* Allocate buffers */
+ wbuf = (int *)HDmalloc(sizeof(int) * SPACE10_DIM1);
+ CHECK_PTR(wbuf, "HDmalloc");
+ rbuf = (int *)HDcalloc(sizeof(int), SPACE10_DIM1);
+ CHECK_PTR(rbuf, "HDcalloc");
+
+ /* Initialize the write buffer */
+ for (i = 0; i < SPACE10_DIM1; i++)
+ wbuf[i] = i;
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set to chunked storage layout */
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Set the chunk size */
+ ret = H5Pset_chunk(dcpl, 1, chunks);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create dataspace for memory */
+ msid = H5Screate_simple(1, mem_dims, NULL);
+ CHECK(msid, FAIL, "H5Screate_simple");
+
+ /* Select the correct chunk in the memory dataspace */
+ ret = H5Sselect_hyperslab(msid, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate_simple(1, dims, maxdims);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Create the dataset */
+ did = H5Dcreate2(fid, "fooData", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Loop over writing out each chunk */
+ for (i = SPACE10_CHUNK_SIZE; i <= SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) {
+ hssize_t offset[1]; /* Offset of selection */
+ hid_t fsid; /* File dataspace ID */
+ hsize_t size[1]; /* The size to extend the dataset to */
+
+ /* Extend the dataset */
+ size[0] = (hsize_t)i; /* The size to extend the dataset to */
+ ret = H5Dset_extent(did, size);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ /* Get the (extended) dataspace from the dataset */
+ fsid = H5Dget_space(did);
+ CHECK(fsid, FAIL, "H5Dget_space");
+
+ /* Select the correct chunk in the dataset */
+ ret = H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Set the selection offset for the file dataspace */
+ offset[0] = i - SPACE10_CHUNK_SIZE;
+ ret = H5Soffset_simple(fsid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Set the selection offset for the memory dataspace */
+ offset[0] = SPACE10_DIM1 - i;
+ ret = H5Soffset_simple(msid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Write the data to the chunk */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the file dataspace copy */
+ ret = H5Sclose(fsid);
+ CHECK(ret, FAIL, "H5Sclose");
+ }
+
+ /* Read the data back in */
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the information read in */
+ for (i = 0; i < SPACE10_DIM1; i += SPACE10_CHUNK_SIZE)
+ for (j = 0; j < SPACE10_CHUNK_SIZE; j++)
+ if (wbuf[i + j] != rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j])
+ TestErrPrintf("Line: %d - Error! i=%d, j=%d, rbuf=%d, wbuf=%d\n", __LINE__, i, j,
+ rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j], wbuf[i + j]);
+
+ /* Check with 'OR'ed set of hyperslab selections, which makes certain the
+ * hyperslab spanlist code gets tested. -QAK
+ */
+
+ /* Re-initialize the write buffer */
+ for (i = 0; i < SPACE10_DIM1; i++)
+ wbuf[i] = i * 2;
+
+ /* Change the selected the region in the memory dataspace */
+ start[0] = 0;
+ count[0] = SPACE10_CHUNK_SIZE / 3;
+ ret = H5Sselect_hyperslab(msid, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ start[0] = (2 * SPACE10_CHUNK_SIZE) / 3;
+ ret = H5Sselect_hyperslab(msid, H5S_SELECT_OR, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Loop over writing out each chunk */
+ for (i = SPACE10_CHUNK_SIZE; i <= SPACE10_DIM1; i += SPACE10_CHUNK_SIZE) {
+ hssize_t offset[1]; /* Offset of selection */
+ hid_t fsid; /* File dataspace ID */
+ hsize_t size[1]; /* The size to extend the dataset to */
+
+ /* Extend the dataset */
+ size[0] = (hsize_t)i; /* The size to extend the dataset to */
+ ret = H5Dset_extent(did, size);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ /* Get the (extended) dataspace from the dataset */
+ fsid = H5Dget_space(did);
+ CHECK(fsid, FAIL, "H5Dget_space");
+
+ /* Select the correct region in the dataset */
+ start[0] = 0;
+ ret = H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ start[0] = (2 * SPACE10_CHUNK_SIZE) / 3;
+ ret = H5Sselect_hyperslab(fsid, H5S_SELECT_OR, start, NULL, count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Set the selection offset for the file dataspace */
+ offset[0] = i - SPACE10_CHUNK_SIZE;
+ ret = H5Soffset_simple(fsid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Set the selection offset for the memory dataspace */
+ offset[0] = SPACE10_DIM1 - i;
+ ret = H5Soffset_simple(msid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Write the data to the chunk */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Close the file dataspace copy */
+ ret = H5Sclose(fsid);
+ CHECK(ret, FAIL, "H5Sclose");
+ }
+
+ /* Read the data back in */
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Verify the information read in */
+ for (i = 0; i < SPACE10_DIM1; i += SPACE10_CHUNK_SIZE)
+ for (j = 0; j < SPACE10_CHUNK_SIZE; j++)
+ /* We're not writing out the "middle" of each chunk, so don't check that */
+ if (j < (SPACE10_CHUNK_SIZE / 3) || j >= ((2 * SPACE10_CHUNK_SIZE) / 3))
+ if (wbuf[i + j] != rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j])
+ TestErrPrintf("Line: %d - Error! i=%d, j=%d, rbuf=%d, wbuf=%d\n", __LINE__, i, j,
+ rbuf[((SPACE10_DIM1 - i) - SPACE10_CHUNK_SIZE) + j], wbuf[i + j]);
+
+ /* Close the memory dataspace */
+ ret = H5Sclose(msid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Free the buffers */
+ HDfree(wbuf);
+ HDfree(rbuf);
+} /* test_select_hyper_chunk_offset() */
+#endif
+/****************************************************************
+**
+** test_select_hyper_chunk_offset2(): Tests selections on dataspace,
+** another test to verify that offsets for hyperslab selections are
+** working in chunked datasets.
+**
+****************************************************************/
+#if 0
+static void
+test_select_hyper_chunk_offset2(void)
+{
+ hid_t file, dataset; /* handles */
+ hid_t dataspace;
+ hid_t memspace;
+ hid_t dcpl; /* Dataset creation property list */
+ herr_t status;
+ unsigned data_out[SPACE12_DIM0]; /* output buffer */
+ unsigned data_in[SPACE12_CHUNK_DIM0]; /* input buffer */
+ hsize_t dims[SPACE12_RANK] = {SPACE12_DIM0}; /* Dimension size */
+ hsize_t chunk_dims[SPACE12_RANK] = {SPACE12_CHUNK_DIM0}; /* Chunk size */
+ hsize_t start[SPACE12_RANK]; /* Start of hyperslab */
+ hsize_t count[SPACE12_RANK]; /* Size of hyperslab */
+ hssize_t offset[SPACE12_RANK]; /* hyperslab offset in the file */
+ unsigned u, v; /* Local index variables */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing more hyperslab selections using offsets in chunked datasets\n"));
+
+ /* Initialize data to write out */
+ for (u = 0; u < SPACE12_DIM0; u++)
+ data_out[u] = u;
+
+ /* Create the file */
+ file = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create dataspace */
+ dataspace = H5Screate_simple(SPACE12_RANK, dims, NULL);
+ CHECK(dataspace, FAIL, "H5Screate_simple");
+
+ /* Create dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set chunk sizes */
+ status = H5Pset_chunk(dcpl, SPACE12_RANK, chunk_dims);
+ CHECK(status, FAIL, "H5Pset_chunk");
+
+ /* Create dataset */
+ dataset = H5Dcreate2(file, DATASETNAME, H5T_NATIVE_UINT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close DCPL */
+ status = H5Pclose(dcpl);
+ CHECK(status, FAIL, "H5Pclose");
+
+ /* Write out entire dataset */
+ status = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_out);
+ CHECK(status, FAIL, "H5Dclose");
+
+ /* Create memory dataspace (same size as a chunk) */
+ memspace = H5Screate_simple(SPACE12_RANK, chunk_dims, NULL);
+ CHECK(dataspace, FAIL, "H5Screate_simple");
+
+ /*
+ * Define hyperslab in the file dataspace.
+ */
+ start[0] = 0;
+ count[0] = SPACE12_CHUNK_DIM0;
+ status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, NULL, count, NULL);
+ CHECK(status, FAIL, "H5Sselect_hyperslab");
+
+ /* Loop through retrieving data from file, checking it against data written */
+ for (u = 0; u < SPACE12_DIM0; u += SPACE12_CHUNK_DIM0) {
+ /* Set the offset of the file selection */
+ offset[0] = u;
+ status = H5Soffset_simple(dataspace, offset);
+ CHECK(status, FAIL, "H5Soffset_simple");
+
+ /* Read in buffer of data */
+ status = H5Dread(dataset, H5T_NATIVE_UINT, memspace, dataspace, H5P_DEFAULT, data_in);
+ CHECK(status, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (v = 0; v < SPACE12_CHUNK_DIM0; v++)
+ if (data_out[u + v] != data_in[v])
+ TestErrPrintf("Error! data_out[%u]=%u, data_in[%u]=%u\n", (unsigned)(u + v), data_out[u + v],
+ v, data_in[v]);
+ } /* end for */
+
+ status = H5Dclose(dataset);
+ CHECK(status, FAIL, "H5Dclose");
+
+ status = H5Sclose(dataspace);
+ CHECK(status, FAIL, "H5Sclose");
+
+ status = H5Sclose(memspace);
+ CHECK(status, FAIL, "H5Sclose");
+
+ status = H5Fclose(file);
+ CHECK(status, FAIL, "H5Fclose");
+} /* test_select_hyper_chunk_offset2() */
+#endif
+/****************************************************************
+**
+** test_select_bounds(): Tests selection bounds on dataspaces,
+** both with and without offsets.
+**
+****************************************************************/
+static void
+test_select_bounds(void)
+{
+ hid_t sid; /* Dataspace ID */
+ const hsize_t dims[SPACE11_RANK] = {SPACE11_DIM1, SPACE11_DIM2}; /* Dataspace dimensions */
+ hsize_t coord[SPACE11_NPOINTS][SPACE11_RANK]; /* Coordinates for point selection */
+ hsize_t start[SPACE11_RANK]; /* The start of the hyperslab */
+ hsize_t stride[SPACE11_RANK]; /* The stride between block starts for the hyperslab */
+ hsize_t count[SPACE11_RANK]; /* The number of blocks for the hyperslab */
+ hsize_t block[SPACE11_RANK]; /* The size of each block for the hyperslab */
+ hssize_t offset[SPACE11_RANK]; /* Offset amount for selection */
+ hsize_t low_bounds[SPACE11_RANK]; /* The low bounds for the selection */
+ hsize_t high_bounds[SPACE11_RANK]; /* The high bounds for the selection */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing selection bounds\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(SPACE11_RANK, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Get bounds for 'all' selection */
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low_bounds[0], 0, "H5Sget_select_bounds");
+ VERIFY(low_bounds[1], 0, "H5Sget_select_bounds");
+ VERIFY(high_bounds[0], SPACE11_DIM1 - 1, "H5Sget_select_bounds");
+ VERIFY(high_bounds[1], SPACE11_DIM2 - 1, "H5Sget_select_bounds");
+
+ /* Set offset for selection */
+ offset[0] = 1;
+ offset[1] = 1;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Get bounds for 'all' selection with offset (which should be ignored) */
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low_bounds[0], 0, "H5Sget_select_bounds");
+ VERIFY(low_bounds[1], 0, "H5Sget_select_bounds");
+ VERIFY(high_bounds[0], SPACE11_DIM1 - 1, "H5Sget_select_bounds");
+ VERIFY(high_bounds[1], SPACE11_DIM2 - 1, "H5Sget_select_bounds");
+
+ /* Reset offset for selection */
+ offset[0] = 0;
+ offset[1] = 0;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Set 'none' selection */
+ ret = H5Sselect_none(sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Get bounds for 'none' selection */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sget_select_bounds");
+
+ /* Set point selection */
+ coord[0][0] = 3;
+ coord[0][1] = 3;
+ coord[1][0] = 3;
+ coord[1][1] = 96;
+ coord[2][0] = 96;
+ coord[2][1] = 3;
+ coord[3][0] = 96;
+ coord[3][1] = 96;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)SPACE11_NPOINTS, (const hsize_t *)coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Get bounds for point selection */
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low_bounds[0], 3, "H5Sget_select_bounds");
+ VERIFY(low_bounds[1], 3, "H5Sget_select_bounds");
+ VERIFY(high_bounds[0], SPACE11_DIM1 - 4, "H5Sget_select_bounds");
+ VERIFY(high_bounds[1], SPACE11_DIM2 - 4, "H5Sget_select_bounds");
+
+ /* Set bad offset for selection */
+ offset[0] = 5;
+ offset[1] = -5;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Get bounds for hyperslab selection with negative offset */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sget_select_bounds");
+
+ /* Set valid offset for selection */
+ offset[0] = 2;
+ offset[1] = -2;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Get bounds for point selection with offset */
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low_bounds[0], 5, "H5Sget_select_bounds");
+ VERIFY(low_bounds[1], 1, "H5Sget_select_bounds");
+ VERIFY(high_bounds[0], SPACE11_DIM1 - 2, "H5Sget_select_bounds");
+ VERIFY(high_bounds[1], SPACE11_DIM2 - 6, "H5Sget_select_bounds");
+
+ /* Reset offset for selection */
+ offset[0] = 0;
+ offset[1] = 0;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Set "regular" hyperslab selection */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 10;
+ stride[1] = 10;
+ count[0] = 4;
+ count[1] = 4;
+ block[0] = 5;
+ block[1] = 5;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Get bounds for hyperslab selection */
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low_bounds[0], 2, "H5Sget_select_bounds");
+ VERIFY(low_bounds[1], 2, "H5Sget_select_bounds");
+ VERIFY(high_bounds[0], 36, "H5Sget_select_bounds");
+ VERIFY(high_bounds[1], 36, "H5Sget_select_bounds");
+
+ /* Set bad offset for selection */
+ offset[0] = 5;
+ offset[1] = -5;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Get bounds for hyperslab selection with negative offset */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sget_select_bounds");
+
+ /* Set valid offset for selection */
+ offset[0] = 5;
+ offset[1] = -2;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Get bounds for hyperslab selection with offset */
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low_bounds[0], 7, "H5Sget_select_bounds");
+ VERIFY(low_bounds[1], 0, "H5Sget_select_bounds");
+ VERIFY(high_bounds[0], 41, "H5Sget_select_bounds");
+ VERIFY(high_bounds[1], 34, "H5Sget_select_bounds");
+
+ /* Reset offset for selection */
+ offset[0] = 0;
+ offset[1] = 0;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Make "irregular" hyperslab selection */
+ start[0] = 20;
+ start[1] = 20;
+ stride[0] = 20;
+ stride[1] = 20;
+ count[0] = 2;
+ count[1] = 2;
+ block[0] = 10;
+ block[1] = 10;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Get bounds for hyperslab selection */
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low_bounds[0], 2, "H5Sget_select_bounds");
+ VERIFY(low_bounds[1], 2, "H5Sget_select_bounds");
+ VERIFY(high_bounds[0], 49, "H5Sget_select_bounds");
+ VERIFY(high_bounds[1], 49, "H5Sget_select_bounds");
+
+ /* Set bad offset for selection */
+ offset[0] = 5;
+ offset[1] = -5;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Get bounds for hyperslab selection with negative offset */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sget_select_bounds");
+
+ /* Set valid offset for selection */
+ offset[0] = 5;
+ offset[1] = -2;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Get bounds for hyperslab selection with offset */
+ ret = H5Sget_select_bounds(sid, low_bounds, high_bounds);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(low_bounds[0], 7, "H5Sget_select_bounds");
+ VERIFY(low_bounds[1], 0, "H5Sget_select_bounds");
+ VERIFY(high_bounds[0], 54, "H5Sget_select_bounds");
+ VERIFY(high_bounds[1], 47, "H5Sget_select_bounds");
+
+ /* Reset offset for selection */
+ offset[0] = 0;
+ offset[1] = 0;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_select_bounds() */
+
+/****************************************************************
+**
+** test_hyper_regular(): Tests query operations on regular hyperslabs
+**
+****************************************************************/
+static void
+test_hyper_regular(void)
+{
+ hid_t sid; /* Dataspace ID */
+ const hsize_t dims[SPACE13_RANK] = {SPACE13_DIM1, SPACE13_DIM2, SPACE13_DIM3}; /* Dataspace dimensions */
+ hsize_t coord[SPACE13_NPOINTS][SPACE13_RANK]; /* Coordinates for point selection */
+ hsize_t start[SPACE13_RANK]; /* The start of the hyperslab */
+ hsize_t stride[SPACE13_RANK]; /* The stride between block starts for the hyperslab */
+ hsize_t count[SPACE13_RANK]; /* The number of blocks for the hyperslab */
+ hsize_t block[SPACE13_RANK]; /* The size of each block for the hyperslab */
+ hsize_t t_start[SPACE13_RANK]; /* Temporary start of the hyperslab */
+ hsize_t t_count[SPACE13_RANK]; /* Temporary number of blocks for the hyperslab */
+ hsize_t q_start[SPACE13_RANK]; /* The queried start of the hyperslab */
+ hsize_t q_stride[SPACE13_RANK]; /* The queried stride between block starts for the hyperslab */
+ hsize_t q_count[SPACE13_RANK]; /* The queried number of blocks for the hyperslab */
+ hsize_t q_block[SPACE13_RANK]; /* The queried size of each block for the hyperslab */
+ htri_t is_regular; /* Whether a hyperslab selection is regular */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing queries on regular hyperslabs\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(SPACE13_RANK, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Query if 'all' selection is regular hyperslab (should fail) */
+ H5E_BEGIN_TRY
+ {
+ is_regular = H5Sis_regular_hyperslab(sid);
+ }
+ H5E_END_TRY;
+ VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab");
+
+ /* Query regular hyperslab selection info (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sget_regular_hyperslab");
+
+ /* Set 'none' selection */
+ ret = H5Sselect_none(sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Query if 'none' selection is regular hyperslab (should fail) */
+ H5E_BEGIN_TRY
+ {
+ is_regular = H5Sis_regular_hyperslab(sid);
+ }
+ H5E_END_TRY;
+ VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab");
+
+ /* Query regular hyperslab selection info (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sget_regular_hyperslab");
+
+ /* Set point selection */
+ coord[0][0] = 3;
+ coord[0][1] = 3;
+ coord[0][2] = 3;
+ coord[1][0] = 3;
+ coord[1][1] = 48;
+ coord[1][2] = 48;
+ coord[2][0] = 48;
+ coord[2][1] = 3;
+ coord[2][2] = 3;
+ coord[3][0] = 48;
+ coord[3][1] = 48;
+ coord[3][2] = 48;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)SPACE13_NPOINTS, (const hsize_t *)coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Query if 'point' selection is regular hyperslab (should fail) */
+ H5E_BEGIN_TRY
+ {
+ is_regular = H5Sis_regular_hyperslab(sid);
+ }
+ H5E_END_TRY;
+ VERIFY(is_regular, FAIL, "H5Sis_regular_hyperslab");
+
+ /* Query regular hyperslab selection info (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sget_regular_hyperslab");
+
+ /* Set "regular" hyperslab selection */
+ start[0] = 2;
+ start[1] = 2;
+ start[2] = 2;
+ stride[0] = 5;
+ stride[1] = 5;
+ stride[2] = 5;
+ count[0] = 3;
+ count[1] = 3;
+ count[2] = 3;
+ block[0] = 4;
+ block[1] = 4;
+ block[2] = 4;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Query if 'hyperslab' selection is regular hyperslab (should be TRUE) */
+ is_regular = H5Sis_regular_hyperslab(sid);
+ VERIFY(is_regular, TRUE, "H5Sis_regular_hyperslab");
+
+ /* Retrieve the hyperslab parameters */
+ ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block);
+ CHECK(ret, FAIL, "H5Sget_regular_hyperslab");
+
+ /* Verify the hyperslab parameters */
+ for (u = 0; u < SPACE13_RANK; u++) {
+ if (start[u] != q_start[u])
+ ERROR("H5Sget_regular_hyperslab, start");
+ if (stride[u] != q_stride[u])
+ ERROR("H5Sget_regular_hyperslab, stride");
+ if (count[u] != q_count[u])
+ ERROR("H5Sget_regular_hyperslab, count");
+ if (block[u] != q_block[u])
+ ERROR("H5Sget_regular_hyperslab, block");
+ } /* end for */
+
+ /* 'OR' in another point */
+ t_start[0] = 0;
+ t_start[1] = 0;
+ t_start[2] = 0;
+ t_count[0] = 1;
+ t_count[1] = 1;
+ t_count[2] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, t_start, NULL, t_count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Query if 'hyperslab' selection is regular hyperslab (should be FALSE) */
+ is_regular = H5Sis_regular_hyperslab(sid);
+ VERIFY(is_regular, FALSE, "H5Sis_regular_hyperslab");
+
+ /* Query regular hyperslab selection info (should fail) */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Sget_regular_hyperslab");
+
+ /* 'XOR' in the point again, to remove it, which should make it regular again */
+ t_start[0] = 0;
+ t_start[1] = 0;
+ t_start[2] = 0;
+ t_count[0] = 1;
+ t_count[1] = 1;
+ t_count[2] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, t_start, NULL, t_count, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Query if 'hyperslab' selection is regular hyperslab (should be TRUE) */
+ is_regular = H5Sis_regular_hyperslab(sid);
+ VERIFY(is_regular, TRUE, "H5Sis_regular_hyperslab");
+
+ /* Retrieve the hyperslab parameters */
+ ret = H5Sget_regular_hyperslab(sid, q_start, q_stride, q_count, q_block);
+ CHECK(ret, FAIL, "H5Sget_regular_hyperslab");
+
+ /* Verify the hyperslab parameters */
+ for (u = 0; u < SPACE13_RANK; u++) {
+ if (start[u] != q_start[u])
+ ERROR("H5Sget_regular_hyperslab, start");
+ if (stride[u] != q_stride[u])
+ ERROR("H5Sget_regular_hyperslab, stride");
+ if (count[u] != q_count[u])
+ ERROR("H5Sget_regular_hyperslab, count");
+ if (block[u] != q_block[u])
+ ERROR("H5Sget_regular_hyperslab, block");
+ } /* end for */
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_hyper_regular() */
+
+/****************************************************************
+**
+** test_hyper_unlim(): Tests unlimited hyperslab selections
+**
+****************************************************************/
+static void
+test_hyper_unlim_check(hid_t sid, hsize_t *dims, hssize_t endpoints, hssize_t enblocks, hsize_t *eblock1,
+ hsize_t *eblock2)
+{
+ hid_t lim_sid;
+ hsize_t start[3];
+ H5S_sel_type sel_type;
+ hssize_t npoints;
+ hssize_t nblocks;
+ hsize_t blocklist[12];
+ herr_t ret;
+
+ HDassert(enblocks <= 2);
+
+ /* Copy sid to lim_sid */
+ lim_sid = H5Scopy(sid);
+ CHECK(lim_sid, FAIL, "H5Scopy");
+
+ /* "And" lim_sid with dims to create limited selection */
+ HDmemset(start, 0, sizeof(start));
+ ret = H5Sselect_hyperslab(lim_sid, H5S_SELECT_AND, start, NULL, dims, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Check number of elements */
+ npoints = H5Sget_select_npoints(lim_sid);
+ CHECK(npoints, FAIL, "H5Sget_select_npoints");
+ VERIFY(npoints, endpoints, "H5Sget_select_npoints");
+
+ /* Get selection type */
+ sel_type = H5Sget_select_type(lim_sid);
+ CHECK(sel_type, H5S_SEL_ERROR, "H5Sget_select_type");
+
+ /* Only examine blocks for hyperslab selection */
+ if (sel_type == H5S_SEL_HYPERSLABS) {
+ /* Get number of blocks */
+ nblocks = H5Sget_select_hyper_nblocks(lim_sid);
+ CHECK(nblocks, FAIL, "H5Sget_select_hyper_nblocks");
+ VERIFY(nblocks, enblocks, "H5Sget_select_hyper_nblocks");
+
+ if (nblocks > 0) {
+ /* Get blocklist */
+ ret = H5Sget_select_hyper_blocklist(lim_sid, (hsize_t)0, (hsize_t)nblocks, blocklist);
+ CHECK(ret, FAIL, "H5Sget_select_hyper_blocklist");
+
+ /* Verify blocklist */
+ if (nblocks == (hssize_t)1) {
+ if (HDmemcmp(blocklist, eblock1, 6 * sizeof(eblock1[0])) != 0)
+ ERROR("H5Sget_select_hyper_blocklist");
+ } /* end if */
+ else {
+ HDassert(nblocks == (hssize_t)2);
+ if (HDmemcmp(blocklist, eblock1, 6 * sizeof(eblock1[0])) != 0) {
+ if (HDmemcmp(blocklist, eblock2, 6 * sizeof(eblock2[0])) != 0)
+ ERROR("H5Sget_select_hyper_blocklist");
+ if (HDmemcmp(&blocklist[6], eblock1, 6 * sizeof(eblock1[0])) != 0)
+ ERROR("H5Sget_select_hyper_blocklist");
+ } /* end if */
+ else if (HDmemcmp(&blocklist[6], eblock2, 6 * sizeof(eblock2[0])) != 0)
+ ERROR("H5Sget_select_hyper_blocklist");
+ } /* end else */
+ } /* end if */
+ } /* end if */
+ else if (sel_type != H5S_SEL_NONE)
+ ERROR("H5Sget_select_type");
+
+ /* Close the limited dataspace */
+ ret = H5Sclose(lim_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* end test_hyper_unlim_check() */
+
+static void
+test_hyper_unlim(void)
+{
+ hid_t sid;
+ hsize_t dims[3] = {4, 4, 7};
+ hsize_t mdims[3] = {4, H5S_UNLIMITED, 7};
+ hsize_t start[3] = {1, 2, 1};
+ hsize_t stride[3] = {1, 1, 3};
+ hsize_t count[3] = {1, 1, 2};
+ hsize_t block[3] = {2, H5S_UNLIMITED, 2};
+ hsize_t start2[3];
+ hsize_t count2[3];
+ hsize_t eblock1[6] = {1, 2, 1, 2, 3, 2};
+ hsize_t eblock2[6] = {1, 2, 4, 2, 3, 5};
+ hssize_t offset[3] = {0, -1, 0};
+ hssize_t ssize_out;
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing unlimited hyperslab selections\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(3, dims, mdims);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Select unlimited hyperslab */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Check with unlimited dimension clipped to 4 */
+ test_hyper_unlim_check(sid, dims, (hssize_t)16, (hssize_t)2, eblock1, eblock2);
+
+ /* Check with unlimited dimension clipped to 3 */
+ dims[1] = 3;
+ eblock1[4] = 2;
+ eblock2[4] = 2;
+ test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)2, eblock1, eblock2);
+
+ /* Check with unlimited dimension clipped to 2 */
+ dims[1] = 2;
+ test_hyper_unlim_check(sid, dims, (hssize_t)0, (hssize_t)0, eblock1, eblock2);
+
+ /* Check with unlimited dimension clipped to 1 */
+ dims[1] = 1;
+ test_hyper_unlim_check(sid, dims, (hssize_t)0, (hssize_t)0, eblock1, eblock2);
+
+ /* Check with unlimited dimension clipped to 7 */
+ dims[1] = 7;
+ eblock1[4] = 6;
+ eblock2[4] = 6;
+ test_hyper_unlim_check(sid, dims, (hssize_t)40, (hssize_t)2, eblock1, eblock2);
+
+ /* Set offset of selection */
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Check with adjusted offset (should not affect result) */
+ test_hyper_unlim_check(sid, dims, (hssize_t)40, (hssize_t)2, eblock1, eblock2);
+
+ /* Reset offset of selection */
+ offset[1] = (hssize_t)0;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /*
+ * Now try with multiple blocks in unlimited dimension
+ */
+ stride[1] = 3;
+ stride[2] = 1;
+ count[1] = H5S_UNLIMITED;
+ count[2] = 1;
+ block[1] = 2;
+
+ /* Select unlimited hyperslab */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Check with new selection */
+ eblock1[1] = 2;
+ eblock1[4] = 3;
+ eblock2[1] = 5;
+ eblock2[2] = 1;
+ eblock2[4] = 6;
+ eblock2[5] = 2;
+ test_hyper_unlim_check(sid, dims, (hssize_t)16, (hssize_t)2, eblock1, eblock2);
+
+ /* Check with unlimited dimension clipped to 3 */
+ dims[1] = 3;
+ eblock1[4] = 2;
+ test_hyper_unlim_check(sid, dims, (hssize_t)4, (hssize_t)1, eblock1, eblock2);
+
+ /* Check with unlimited dimension clipped to 4 */
+ dims[1] = 4;
+ eblock1[4] = 3;
+ test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)1, eblock1, eblock2);
+
+ /* Check with unlimited dimension clipped to 5 */
+ dims[1] = 5;
+ eblock1[4] = 3;
+ test_hyper_unlim_check(sid, dims, (hssize_t)8, (hssize_t)1, eblock1, eblock2);
+
+ /* Check with unlimited dimension clipped to 6 */
+ dims[1] = 6;
+ eblock1[4] = 3;
+ eblock2[4] = 5;
+ test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2);
+
+ /* Set offset of selection */
+ offset[1] = (hssize_t)-1;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Check with adjusted offset (should not affect result) */
+ test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2);
+
+ /* Set offset of selection */
+ offset[1] = (hssize_t)3;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /* Check with adjusted offset (should not affect result) */
+ test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2);
+
+ /* Reset offset of selection */
+ offset[1] = (hssize_t)0;
+ ret = H5Soffset_simple(sid, offset);
+ CHECK(ret, FAIL, "H5Soffset_simple");
+
+ /*
+ * Now try invalid operations
+ */
+ H5E_BEGIN_TRY
+ {
+ /* Try multiple unlimited dimensions */
+ start[0] = 1;
+ start[1] = 2;
+ start[2] = 1;
+ stride[0] = 1;
+ stride[1] = 3;
+ stride[2] = 3;
+ count[0] = 1;
+ count[1] = H5S_UNLIMITED;
+ count[2] = H5S_UNLIMITED;
+ block[0] = 2;
+ block[1] = 2;
+ block[2] = 2;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Try unlimited count and block */
+ count[2] = 2;
+ block[1] = H5S_UNLIMITED;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ }
+ H5E_END_TRY
+
+ /* Try operations with two unlimited selections */
+ block[1] = 2;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, NULL, count, NULL);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start, NULL, count, NULL);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, NULL, count, NULL);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, NULL, count, NULL);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start, NULL, count, NULL);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ }
+ H5E_END_TRY
+
+ /* Try invalid combination operations */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, NULL, block, NULL);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, NULL, block, NULL);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, NULL, block, NULL);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ }
+ H5E_END_TRY
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, NULL, block, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_XOR, start, stride, count, block);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start, stride, count, block);
+ VERIFY(ret, FAIL, "H5Sselect_hyperslab");
+ }
+ H5E_END_TRY
+
+ /*
+ * Now test valid combination operations
+ */
+ /* unlim AND non-unlim */
+ count[0] = 1;
+ count[1] = H5S_UNLIMITED;
+ count[2] = 2;
+ block[0] = 2;
+ block[1] = 2;
+ block[2] = 2;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ start2[0] = 2;
+ start2[1] = 2;
+ start2[2] = 0;
+ count2[0] = 5;
+ count2[1] = 4;
+ count2[2] = 2;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start2, NULL, count2, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ eblock1[0] = 2;
+ eblock1[3] = 2;
+ eblock1[1] = 2;
+ eblock1[4] = 3;
+ eblock1[2] = 1;
+ eblock1[5] = 1;
+ eblock2[0] = 2;
+ eblock2[3] = 2;
+ eblock2[1] = 5;
+ eblock2[4] = 5;
+ eblock2[2] = 1;
+ eblock2[5] = 1;
+ dims[0] = 50;
+ dims[1] = 50;
+ dims[2] = 50;
+ test_hyper_unlim_check(sid, dims, (hssize_t)3, (hssize_t)2, eblock1, eblock2);
+
+ /* unlim NOTA non-unlim */
+ count[0] = 1;
+ count[1] = H5S_UNLIMITED;
+ count[2] = 2;
+ block[0] = 2;
+ block[1] = 2;
+ block[2] = 2;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ start2[0] = 1;
+ start2[1] = 5;
+ start2[2] = 2;
+ count2[0] = 2;
+ count2[1] = 2;
+ count2[2] = 6;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTA, start2, NULL, count2, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ eblock1[0] = 1;
+ eblock1[3] = 2;
+ eblock1[1] = 5;
+ eblock1[4] = 6;
+ eblock1[2] = 3;
+ eblock1[5] = 3;
+ eblock2[0] = 1;
+ eblock2[3] = 2;
+ eblock2[1] = 5;
+ eblock2[4] = 6;
+ eblock2[2] = 6;
+ eblock2[5] = 7;
+ dims[0] = 50;
+ dims[1] = 50;
+ dims[2] = 50;
+ test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2);
+
+ /* non-unlim AND unlim */
+ start2[0] = 2;
+ start2[1] = 2;
+ start2[2] = 0;
+ count2[0] = 5;
+ count2[1] = 4;
+ count2[2] = 2;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start2, NULL, count2, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ count[0] = 1;
+ count[1] = H5S_UNLIMITED;
+ count[2] = 2;
+ block[0] = 2;
+ block[1] = 2;
+ block[2] = 2;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_AND, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ eblock1[0] = 2;
+ eblock1[3] = 2;
+ eblock1[1] = 2;
+ eblock1[4] = 3;
+ eblock1[2] = 1;
+ eblock1[5] = 1;
+ eblock2[0] = 2;
+ eblock2[3] = 2;
+ eblock2[1] = 5;
+ eblock2[4] = 5;
+ eblock2[2] = 1;
+ eblock2[5] = 1;
+ dims[0] = 50;
+ dims[1] = 50;
+ dims[2] = 50;
+ test_hyper_unlim_check(sid, dims, (hssize_t)3, (hssize_t)2, eblock1, eblock2);
+
+ /* non-unlim NOTB unlim */
+ start2[0] = 1;
+ start2[1] = 5;
+ start2[2] = 2;
+ count2[0] = 2;
+ count2[1] = 2;
+ count2[2] = 6;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start2, NULL, count2, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ count[0] = 1;
+ count[1] = H5S_UNLIMITED;
+ count[2] = 2;
+ block[0] = 2;
+ block[1] = 2;
+ block[2] = 2;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_NOTB, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ eblock1[0] = 1;
+ eblock1[3] = 2;
+ eblock1[1] = 5;
+ eblock1[4] = 6;
+ eblock1[2] = 3;
+ eblock1[5] = 3;
+ eblock2[0] = 1;
+ eblock2[3] = 2;
+ eblock2[1] = 5;
+ eblock2[4] = 6;
+ eblock2[2] = 6;
+ eblock2[5] = 7;
+ dims[0] = 50;
+ dims[1] = 50;
+ dims[2] = 50;
+ test_hyper_unlim_check(sid, dims, (hssize_t)12, (hssize_t)2, eblock1, eblock2);
+
+ /* Test H5Sget_select_npoints() */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ ssize_out = H5Sget_select_npoints(sid);
+ VERIFY(ssize_out, (hssize_t)H5S_UNLIMITED, "H5Sget_select_npoints");
+
+ /* Test H5Sget_select_hyper_nblocks() */
+ H5E_BEGIN_TRY
+ {
+ ssize_out = H5Sget_select_hyper_nblocks(sid);
+ }
+ H5E_END_TRY;
+ VERIFY(ssize_out, (hssize_t)H5S_UNLIMITED, "H5Sget_select_hyper_nblocks");
+
+ /* Test H5Sget_select_bounds() */
+ ret = H5Sget_select_bounds(sid, start2, count2);
+ CHECK(ret, FAIL, "H5Sget_select_bounds");
+ VERIFY(start2[0], start[0], "H5Sget_select_bounds");
+ VERIFY(start2[1], start[1], "H5Sget_select_bounds");
+ VERIFY(start2[2], start[2], "H5Sget_select_bounds");
+ VERIFY(count2[0], (long)(start[0] + (stride[0] * (count[0] - 1)) + block[0] - 1), "H5Sget_select_bounds");
+ VERIFY(count2[1], H5S_UNLIMITED, "H5Sget_select_bounds");
+ VERIFY(count2[2], (long)(start[2] + (stride[2] * (count[2] - 1)) + block[2] - 1), "H5Sget_select_bounds");
+
+ /* Close the dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* end test_hyper_unlim() */
+
+/****************************************************************
+**
+** test_internal_consistency(): Tests selections on dataspace, then
+** verify that internal states of data structures of selections are
+** consistent.
+**
+****************************************************************/
+static void
+test_internal_consistency(void)
+{
+ hid_t all_sid; /* Dataspace ID with "all" selection */
+ hid_t none_sid; /* Dataspace ID with "none" selection */
+ hid_t single_pt_sid; /* Dataspace ID with single point selection */
+ hid_t mult_pt_sid; /* Dataspace ID with multiple point selection */
+ hid_t single_hyper_sid; /* Dataspace ID with single block hyperslab selection */
+ hid_t single_hyper_all_sid; /* Dataspace ID with single block hyperslab
+ * selection that is the entire dataspace
+ */
+ hid_t single_hyper_pt_sid; /* Dataspace ID with single block hyperslab
+ * selection that is the same as the single
+ * point selection
+ */
+ hid_t regular_hyper_sid; /* Dataspace ID with regular hyperslab selection */
+ hid_t irreg_hyper_sid; /* Dataspace ID with irregular hyperslab selection */
+ hid_t none_hyper_sid; /* Dataspace ID with "no hyperslabs" selection */
+ hid_t scalar_all_sid; /* ID for scalar dataspace with "all" selection */
+ hid_t scalar_none_sid; /* ID for scalar dataspace with "none" selection */
+ hid_t tmp_sid; /* Temporary dataspace ID */
+ hsize_t dims[] = {SPACE9_DIM1, SPACE9_DIM2};
+ hsize_t coord1[1][SPACE2_RANK]; /* Coordinates for single point selection */
+ hsize_t coord2[SPACE9_DIM2][SPACE9_RANK]; /* Coordinates for multiple point selection */
+ hsize_t start[SPACE9_RANK]; /* Hyperslab start */
+ hsize_t stride[SPACE9_RANK]; /* Hyperslab stride */
+ hsize_t count[SPACE9_RANK]; /* Hyperslab block count */
+ hsize_t block[SPACE9_RANK]; /* Hyperslab block size */
+#if 0
+ htri_t check; /* Shape comparison return value */
+#endif
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Consistency of Internal States\n"));
+ HDassert(SPACE9_DIM2 >= POINT1_NPOINTS);
+
+ /* Create dataspace for "all" selection */
+ all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(all_sid, FAIL, "H5Screate_simple");
+
+ /* Select entire extent for dataspace */
+ ret = H5Sselect_all(all_sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ /* Create dataspace for "none" selection */
+ none_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(none_sid, FAIL, "H5Screate_simple");
+
+ /* Un-Select entire extent for dataspace */
+ ret = H5Sselect_none(none_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Create dataspace for single point selection */
+ single_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(single_pt_sid, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for multiple point selection */
+ coord1[0][0] = 2;
+ coord1[0][1] = 2;
+ ret = H5Sselect_elements(single_pt_sid, H5S_SELECT_SET, (size_t)1, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create dataspace for multiple point selection */
+ mult_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(mult_pt_sid, FAIL, "H5Screate_simple");
+
+ /* Select sequence of ten points for multiple point selection */
+ coord2[0][0] = 2;
+ coord2[0][1] = 2;
+ coord2[1][0] = 7;
+ coord2[1][1] = 2;
+ coord2[2][0] = 1;
+ coord2[2][1] = 4;
+ coord2[3][0] = 2;
+ coord2[3][1] = 6;
+ coord2[4][0] = 0;
+ coord2[4][1] = 8;
+ coord2[5][0] = 3;
+ coord2[5][1] = 2;
+ coord2[6][0] = 4;
+ coord2[6][1] = 4;
+ coord2[7][0] = 1;
+ coord2[7][1] = 0;
+ coord2[8][0] = 5;
+ coord2[8][1] = 1;
+ coord2[9][0] = 9;
+ coord2[9][1] = 3;
+ ret = H5Sselect_elements(mult_pt_sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord2);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create dataspace for single hyperslab selection */
+ single_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(single_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Select 10x10 hyperslab for single hyperslab selection */
+ start[0] = 1;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = (SPACE9_DIM1 - 2);
+ block[1] = (SPACE9_DIM2 - 2);
+ ret = H5Sselect_hyperslab(single_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for single hyperslab selection with entire extent selected */
+ single_hyper_all_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(single_hyper_all_sid, FAIL, "H5Screate_simple");
+
+ /* Select entire extent for hyperslab selection */
+ start[0] = 0;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = SPACE9_DIM1;
+ block[1] = SPACE9_DIM2;
+ ret = H5Sselect_hyperslab(single_hyper_all_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for single hyperslab selection with single point selected */
+ single_hyper_pt_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(single_hyper_pt_sid, FAIL, "H5Screate_simple");
+
+ /* Select entire extent for hyperslab selection */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(single_hyper_pt_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for regular hyperslab selection */
+ regular_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(regular_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Select regular, strided hyperslab selection */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 2;
+ stride[1] = 2;
+ count[0] = 5;
+ count[1] = 2;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(regular_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for irregular hyperslab selection */
+ irreg_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(irreg_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Create irregular hyperslab selection by OR'ing two blocks together */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ start[1] = 4;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 3;
+ block[1] = 3;
+ ret = H5Sselect_hyperslab(irreg_hyper_sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for "no" hyperslab selection */
+ none_hyper_sid = H5Screate_simple(SPACE9_RANK, dims, NULL);
+ CHECK(none_hyper_sid, FAIL, "H5Screate_simple");
+
+ /* Create "no" hyperslab selection by XOR'ing same blocks together */
+ start[0] = 2;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 1;
+ count[1] = 1;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ ret = H5Sselect_hyperslab(none_hyper_sid, H5S_SELECT_XOR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create scalar dataspace for "all" selection */
+ scalar_all_sid = H5Screate(H5S_SCALAR);
+ CHECK(scalar_all_sid, FAIL, "H5Screate");
+
+ /* Create scalar dataspace for "none" selection */
+ scalar_none_sid = H5Screate(H5S_SCALAR);
+ CHECK(scalar_none_sid, FAIL, "H5Screate");
+
+ /* Un-Select entire extent for dataspace */
+ ret = H5Sselect_none(scalar_none_sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Test all the selections created */
+
+ /* Test the copy of itself */
+ tmp_sid = H5Scopy(all_sid);
+ CHECK(tmp_sid, FAIL, "H5Scopy");
+#if 0
+ check = H5S__internal_consistency_test(tmp_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+#endif
+ ret = H5Sclose(tmp_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+#if 0
+ /* Test "none" selection */
+ check = H5S__internal_consistency_test(none_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test single point selection */
+ check = H5S__internal_consistency_test(single_pt_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test multiple point selection */
+ check = H5S__internal_consistency_test(mult_pt_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test "plain" single hyperslab selection */
+ check = H5S__internal_consistency_test(single_hyper_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test "all" single hyperslab selection */
+ check = H5S__internal_consistency_test(single_hyper_all_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test "single point" single hyperslab selection */
+ check = H5S__internal_consistency_test(single_hyper_pt_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test regular, strided hyperslab selection */
+ check = H5S__internal_consistency_test(regular_hyper_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test irregular hyperslab selection */
+ check = H5S__internal_consistency_test(irreg_hyper_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test "no" hyperslab selection */
+ check = H5S__internal_consistency_test(none_hyper_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test scalar "all" hyperslab selection */
+ check = H5S__internal_consistency_test(scalar_all_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+
+ /* Test scalar "none" hyperslab selection */
+ check = H5S__internal_consistency_test(scalar_none_sid);
+ VERIFY(check, TRUE, "H5S__internal_consistency_test");
+#endif
+
+ /* Close dataspaces */
+ ret = H5Sclose(all_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(none_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_pt_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(mult_pt_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_hyper_all_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(single_hyper_pt_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(regular_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(irreg_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(none_hyper_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(scalar_all_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(scalar_none_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_internal_consistency() */
+
+/****************************************************************
+**
+** test_irreg_io(): Tests unusual selections on datasets, to stress the
+** new hyperslab code.
+**
+****************************************************************/
+static void
+test_irreg_io(void)
+{
+ hid_t fid; /* File ID */
+ hid_t did; /* Dataset ID */
+ hid_t dcpl_id; /* Dataset creation property list ID */
+ hid_t sid; /* File dataspace ID */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t dims[] = {6, 12}; /* Dataspace dimensions */
+ hsize_t chunk_dims[] = {2, 2}; /* Chunk dimensions */
+ hsize_t mem_dims[] = {32}; /* Memory dataspace dimensions */
+ hsize_t start[2]; /* Hyperslab start */
+ hsize_t stride[2]; /* Hyperslab stride */
+ hsize_t count[2]; /* Hyperslab block count */
+ hsize_t block[2]; /* Hyperslab block size */
+ unsigned char wbuf[72]; /* Write buffer */
+ unsigned char rbuf[32]; /* Read buffer */
+ unsigned u; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Irregular Hyperslab I/O\n"));
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create dataspace for dataset */
+ sid = H5Screate_simple(2, dims, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Set chunk dimensions for dataset */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl_id, FAIL, "H5Pcreate");
+ ret = H5Pset_chunk(dcpl_id, 2, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create a dataset */
+ did = H5Dcreate2(fid, SPACE1_NAME, H5T_NATIVE_UCHAR, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dcreate2");
+
+ /* Initialize the write buffer */
+ for (u = 0; u < 72; u++)
+ wbuf[u] = (unsigned char)u;
+
+ /* Write entire dataset to disk */
+ ret = H5Dwrite(did, H5T_NATIVE_UCHAR, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the DCPL */
+ ret = H5Pclose(dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Create dataspace for memory selection */
+ mem_sid = H5Screate_simple(1, mem_dims, NULL);
+ CHECK(mem_sid, FAIL, "H5Screate_simple");
+
+ /* Select 'L'-shaped region within dataset */
+ start[0] = 0;
+ start[1] = 10;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 4;
+ count[1] = 2;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 4;
+ start[1] = 0;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 2;
+ count[1] = 12;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Reset the buffer */
+ HDmemset(rbuf, 0, sizeof(rbuf));
+
+ /* Read selection from disk */
+ ret = H5Dread(did, H5T_NATIVE_UCHAR, mem_sid, sid, H5P_DEFAULT, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Close everything */
+ ret = H5Sclose(mem_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* test_irreg_io() */
+
+/****************************************************************
+**
+** test_sel_iter(): Test selection iterator API routines.
+**
+****************************************************************/
+static void
+test_sel_iter(void)
+{
+ hid_t sid; /* Dataspace ID */
+ hid_t iter_id; /* Dataspace selection iterator ID */
+ hsize_t dims1[] = {6, 12}; /* 2-D Dataspace dimensions */
+ hsize_t coord1[POINT1_NPOINTS][2]; /* Coordinates for point selection */
+ hsize_t start[2]; /* Hyperslab start */
+ hsize_t stride[2]; /* Hyperslab stride */
+ hsize_t count[2]; /* Hyperslab block count */
+ hsize_t block[2]; /* Hyperslab block size */
+ size_t nseq; /* # of sequences retrieved */
+ size_t nbytes; /* # of bytes retrieved */
+ hsize_t off[SEL_ITER_MAX_SEQ]; /* Offsets for retrieved sequences */
+ size_t len[SEL_ITER_MAX_SEQ]; /* Lengths for retrieved sequences */
+ H5S_sel_type sel_type; /* Selection type */
+ unsigned sel_share; /* Whether to share selection with dataspace */
+ unsigned sel_iter_flags; /* Flags for selection iterator creation */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Dataspace Selection Iterators\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(2, dims1, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Try creating selection iterator object with bad parameters */
+ H5E_BEGIN_TRY
+ { /* Bad dataspace ID */
+ iter_id = H5Ssel_iter_create(H5I_INVALID_HID, (size_t)1, (unsigned)0);
+ }
+ H5E_END_TRY;
+ VERIFY(iter_id, FAIL, "H5Ssel_iter_create");
+ H5E_BEGIN_TRY
+ { /* Bad element size */
+ iter_id = H5Ssel_iter_create(sid, (size_t)0, (unsigned)0);
+ }
+ H5E_END_TRY;
+ VERIFY(iter_id, FAIL, "H5Ssel_iter_create");
+ H5E_BEGIN_TRY
+ { /* Bad flag(s) */
+ iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)0xffff);
+ }
+ H5E_END_TRY;
+ VERIFY(iter_id, FAIL, "H5Ssel_iter_create");
+
+ /* Try closing selection iterator, with bad parameters */
+ H5E_BEGIN_TRY
+ { /* Invalid ID */
+ ret = H5Ssel_iter_close(H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_close");
+ H5E_BEGIN_TRY
+ { /* Not a selection iterator ID */
+ ret = H5Ssel_iter_close(sid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_close");
+
+ /* Try with no selection sharing, and with sharing */
+ for (sel_share = 0; sel_share < 2; sel_share++) {
+ /* Set selection iterator sharing flags */
+ if (sel_share)
+ sel_iter_flags = H5S_SEL_ITER_SHARE_WITH_DATASPACE;
+ else
+ sel_iter_flags = 0;
+
+ /* Create selection iterator object */
+ iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags);
+ CHECK(iter_id, FAIL, "H5Ssel_iter_create");
+
+ /* Close selection iterator */
+ ret = H5Ssel_iter_close(iter_id);
+ CHECK(ret, FAIL, "H5Ssel_iter_close");
+
+ /* Try closing selection iterator twice */
+ H5E_BEGIN_TRY
+ { /* Invalid ID */
+ ret = H5Ssel_iter_close(iter_id);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_close");
+
+ /* Create selection iterator object */
+ iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags);
+ CHECK(iter_id, FAIL, "H5Ssel_iter_create");
+
+ /* Try resetting selection iterator with bad parameters */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Ssel_iter_reset(H5I_INVALID_HID, sid);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_reset");
+ H5E_BEGIN_TRY
+ {
+ ret = H5Ssel_iter_reset(iter_id, H5I_INVALID_HID);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_reset");
+
+ /* Try retrieving sequences, with bad parameters */
+ H5E_BEGIN_TRY
+ { /* Invalid ID */
+ ret = H5Ssel_iter_get_seq_list(H5I_INVALID_HID, (size_t)1, (size_t)1, &nseq, &nbytes, off, len);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ H5E_BEGIN_TRY
+ { /* Invalid nseq pointer */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, NULL, &nbytes, off, len);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ H5E_BEGIN_TRY
+ { /* Invalid nbytes pointer */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, NULL, off, len);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ H5E_BEGIN_TRY
+ { /* Invalid offset array */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, &nbytes, NULL, len);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ H5E_BEGIN_TRY
+ { /* Invalid length array */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)1, &nseq, &nbytes, off, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Ssel_iter_get_seq_list");
+
+ /* Close selection iterator */
+ ret = H5Ssel_iter_close(iter_id);
+ CHECK(ret, FAIL, "H5Ssel_iter_close");
+
+ /* Test iterators on various basic selection types */
+ for (sel_type = H5S_SEL_NONE; sel_type <= H5S_SEL_ALL; sel_type = (H5S_sel_type)(sel_type + 1)) {
+ switch (sel_type) {
+ case H5S_SEL_NONE: /* "None" selection */
+ ret = H5Sselect_none(sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+ break;
+
+ case H5S_SEL_POINTS: /* Point selection */
+ /* Select sequence of ten points */
+ coord1[0][0] = 0;
+ coord1[0][1] = 9;
+ coord1[1][0] = 1;
+ coord1[1][1] = 2;
+ coord1[2][0] = 2;
+ coord1[2][1] = 4;
+ coord1[3][0] = 0;
+ coord1[3][1] = 6;
+ coord1[4][0] = 1;
+ coord1[4][1] = 8;
+ coord1[5][0] = 2;
+ coord1[5][1] = 10;
+ coord1[6][0] = 0;
+ coord1[6][1] = 11;
+ coord1[7][0] = 1;
+ coord1[7][1] = 4;
+ coord1[8][0] = 2;
+ coord1[8][1] = 1;
+ coord1[9][0] = 0;
+ coord1[9][1] = 3;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS,
+ (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+ break;
+
+ case H5S_SEL_HYPERSLABS: /* Hyperslab selection */
+ /* Select regular hyperslab */
+ start[0] = 3;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 2;
+ count[0] = 2;
+ count[1] = 5;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ break;
+
+ case H5S_SEL_ALL: /* "All" selection */
+ ret = H5Sselect_all(sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+ break;
+
+ case H5S_SEL_ERROR:
+ case H5S_SEL_N:
+ default:
+ HDassert(0 && "Can't occur");
+ break;
+ } /* end switch */
+
+ /* Create selection iterator object */
+ iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags);
+ CHECK(iter_id, FAIL, "H5Ssel_iter_create");
+
+ /* Try retrieving no sequences, with 0 for maxseq & maxbytes */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)0, (size_t)1, &nseq, &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list");
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)0, &nseq, &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list");
+
+ /* Try retrieving all sequences */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq,
+ &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+
+ /* Check results from retrieving sequence list */
+ switch (sel_type) {
+ case H5S_SEL_NONE: /* "None" selection */
+ VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_POINTS: /* Point selection */
+ VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_HYPERSLABS: /* Hyperslab selection */
+ VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_ALL: /* "All" selection */
+ VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_ERROR:
+ case H5S_SEL_N:
+ default:
+ HDassert(0 && "Can't occur");
+ break;
+ } /* end switch */
+
+ /* Close selection iterator */
+ ret = H5Ssel_iter_close(iter_id);
+ CHECK(ret, FAIL, "H5Ssel_iter_close");
+ } /* end for */
+
+ /* Create selection iterator object */
+ iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags);
+ CHECK(iter_id, FAIL, "H5Ssel_iter_create");
+
+ /* Test iterators on various basic selection types using
+ * H5Ssel_iter_reset instead of creating multiple iterators */
+ for (sel_type = H5S_SEL_NONE; sel_type <= H5S_SEL_ALL; sel_type = (H5S_sel_type)(sel_type + 1)) {
+ switch (sel_type) {
+ case H5S_SEL_NONE: /* "None" selection */
+ ret = H5Sselect_none(sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+ break;
+
+ case H5S_SEL_POINTS: /* Point selection */
+ /* Select sequence of ten points */
+ coord1[0][0] = 0;
+ coord1[0][1] = 9;
+ coord1[1][0] = 1;
+ coord1[1][1] = 2;
+ coord1[2][0] = 2;
+ coord1[2][1] = 4;
+ coord1[3][0] = 0;
+ coord1[3][1] = 6;
+ coord1[4][0] = 1;
+ coord1[4][1] = 8;
+ coord1[5][0] = 2;
+ coord1[5][1] = 10;
+ coord1[6][0] = 0;
+ coord1[6][1] = 11;
+ coord1[7][0] = 1;
+ coord1[7][1] = 4;
+ coord1[8][0] = 2;
+ coord1[8][1] = 1;
+ coord1[9][0] = 0;
+ coord1[9][1] = 3;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS,
+ (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+ break;
+
+ case H5S_SEL_HYPERSLABS: /* Hyperslab selection */
+ /* Select regular hyperslab */
+ start[0] = 3;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 2;
+ count[0] = 2;
+ count[1] = 5;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+ break;
+
+ case H5S_SEL_ALL: /* "All" selection */
+ ret = H5Sselect_all(sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+ break;
+
+ case H5S_SEL_ERROR:
+ case H5S_SEL_N:
+ default:
+ HDassert(0 && "Can't occur");
+ break;
+ } /* end switch */
+
+ /* Try retrieving no sequences, with 0 for maxseq & maxbytes */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)0, (size_t)1, &nseq, &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list");
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)1, (size_t)0, &nseq, &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list");
+
+ /* Reset iterator */
+ ret = H5Ssel_iter_reset(iter_id, sid);
+ CHECK(ret, FAIL, "H5Ssel_iter_reset");
+
+ /* Try retrieving all sequences */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq,
+ &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+
+ /* Check results from retrieving sequence list */
+ switch (sel_type) {
+ case H5S_SEL_NONE: /* "None" selection */
+ VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_POINTS: /* Point selection */
+ VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_HYPERSLABS: /* Hyperslab selection */
+ VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_ALL: /* "All" selection */
+ VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_ERROR:
+ case H5S_SEL_N:
+ default:
+ HDassert(0 && "Can't occur");
+ break;
+ } /* end switch */
+
+ /* Reset iterator */
+ ret = H5Ssel_iter_reset(iter_id, sid);
+ CHECK(ret, FAIL, "H5Ssel_iter_reset");
+
+ /* Try retrieving all sequences again */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq,
+ &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+
+ /* Check results from retrieving sequence list */
+ switch (sel_type) {
+ case H5S_SEL_NONE: /* "None" selection */
+ VERIFY(nseq, 0, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 0, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_POINTS: /* Point selection */
+ VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_HYPERSLABS: /* Hyperslab selection */
+ VERIFY(nseq, 10, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_ALL: /* "All" selection */
+ VERIFY(nseq, 1, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 72, "H5Ssel_iter_get_seq_list");
+ break;
+
+ case H5S_SEL_ERROR:
+ case H5S_SEL_N:
+ default:
+ HDassert(0 && "Can't occur");
+ break;
+ } /* end switch */
+
+ /* Reset iterator */
+ ret = H5Ssel_iter_reset(iter_id, sid);
+ CHECK(ret, FAIL, "H5Ssel_iter_reset");
+ } /* end for */
+
+ /* Close selection iterator */
+ ret = H5Ssel_iter_close(iter_id);
+ CHECK(ret, FAIL, "H5Ssel_iter_close");
+
+ /* Point selection which will merge into smaller # of sequences */
+ coord1[0][0] = 0;
+ coord1[0][1] = 9;
+ coord1[1][0] = 0;
+ coord1[1][1] = 10;
+ coord1[2][0] = 0;
+ coord1[2][1] = 11;
+ coord1[3][0] = 0;
+ coord1[3][1] = 6;
+ coord1[4][0] = 1;
+ coord1[4][1] = 8;
+ coord1[5][0] = 2;
+ coord1[5][1] = 10;
+ coord1[6][0] = 0;
+ coord1[6][1] = 11;
+ coord1[7][0] = 1;
+ coord1[7][1] = 4;
+ coord1[8][0] = 1;
+ coord1[8][1] = 5;
+ coord1[9][0] = 1;
+ coord1[9][1] = 6;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Create selection iterator object */
+ iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags);
+ CHECK(iter_id, FAIL, "H5Ssel_iter_create");
+
+ /* Try retrieving all sequences */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq,
+ &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list");
+
+ /* Reset iterator */
+ ret = H5Ssel_iter_reset(iter_id, sid);
+ CHECK(ret, FAIL, "H5Ssel_iter_reset");
+
+ /* Try retrieving all sequences again */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq,
+ &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 10, "H5Ssel_iter_get_seq_list");
+
+ /* Close selection iterator */
+ ret = H5Ssel_iter_close(iter_id);
+ CHECK(ret, FAIL, "H5Ssel_iter_close");
+
+ /* Select irregular hyperslab, which will merge into smaller # of sequences */
+ start[0] = 3;
+ start[1] = 0;
+ stride[0] = 2;
+ stride[1] = 2;
+ count[0] = 2;
+ count[1] = 5;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ start[0] = 3;
+ start[1] = 3;
+ stride[0] = 2;
+ stride[1] = 2;
+ count[0] = 2;
+ count[1] = 5;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create selection iterator object */
+ iter_id = H5Ssel_iter_create(sid, (size_t)1, (unsigned)sel_iter_flags);
+ CHECK(iter_id, FAIL, "H5Ssel_iter_create");
+
+ /* Try retrieving all sequences */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq,
+ &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 20, "H5Ssel_iter_get_seq_list");
+
+ /* Reset iterator */
+ ret = H5Ssel_iter_reset(iter_id, sid);
+ CHECK(ret, FAIL, "H5Ssel_iter_reset");
+
+ /* Try retrieving all sequences again */
+ ret = H5Ssel_iter_get_seq_list(iter_id, (size_t)SEL_ITER_MAX_SEQ, (size_t)(1024 * 1024), &nseq,
+ &nbytes, off, len);
+ CHECK(ret, FAIL, "H5Ssel_iter_get_seq_list");
+ VERIFY(nseq, 6, "H5Ssel_iter_get_seq_list");
+ VERIFY(nbytes, 20, "H5Ssel_iter_get_seq_list");
+
+ /* Close selection iterator */
+ ret = H5Ssel_iter_close(iter_id);
+ CHECK(ret, FAIL, "H5Ssel_iter_close");
+
+ } /* end for */
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_sel_iter() */
+
+/****************************************************************
+**
+** test_select_intersect_block(): Test selections on dataspace,
+** verify that "intersect block" routine is working correctly.
+**
+****************************************************************/
+static void
+test_select_intersect_block(void)
+{
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims1[] = {6, 12}; /* 2-D Dataspace dimensions */
+ hsize_t block_start[] = {1, 3}; /* Start offset for block */
+ hsize_t block_end[] = {2, 5}; /* End offset for block */
+ hsize_t block_end2[] = {0, 5}; /* Bad end offset for block */
+ hsize_t block_end3[] = {2, 2}; /* Another bad end offset for block */
+ hsize_t block_end4[] = {1, 3}; /* End offset that makes a single element block */
+ hsize_t coord[10][2]; /* Coordinates for point selection */
+ hsize_t start[2]; /* Starting location of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Element count of hyperslab */
+ hsize_t block[2]; /* Block size of hyperslab */
+ htri_t status; /* Intersection status */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Dataspace Selection Block Intersection\n"));
+
+ /* Create dataspace */
+ sid = H5Screate_simple(2, dims1, NULL);
+ CHECK(sid, FAIL, "H5Screate_simple");
+
+ /* Try intersection calls with bad parameters */
+ H5E_BEGIN_TRY
+ { /* Bad dataspace ID */
+ status = H5Sselect_intersect_block(H5I_INVALID_HID, block_start, block_end);
+ }
+ H5E_END_TRY;
+ VERIFY(status, FAIL, "H5Sselect_intersect_block");
+ H5E_BEGIN_TRY
+ { /* Bad start pointer */
+ status = H5Sselect_intersect_block(sid, NULL, block_end);
+ }
+ H5E_END_TRY;
+ VERIFY(status, FAIL, "H5Sselect_intersect_block");
+ H5E_BEGIN_TRY
+ { /* Bad end pointer */
+ status = H5Sselect_intersect_block(sid, block_start, NULL);
+ }
+ H5E_END_TRY;
+ VERIFY(status, FAIL, "H5Sselect_intersect_block");
+ H5E_BEGIN_TRY
+ { /* Invalid block */
+ status = H5Sselect_intersect_block(sid, block_start, block_end2);
+ }
+ H5E_END_TRY;
+ VERIFY(status, FAIL, "H5Sselect_intersect_block");
+ H5E_BEGIN_TRY
+ { /* Another invalid block */
+ status = H5Sselect_intersect_block(sid, block_start, block_end3);
+ }
+ H5E_END_TRY;
+ VERIFY(status, FAIL, "H5Sselect_intersect_block");
+
+ /* Set selection to 'none' */
+ ret = H5Sselect_none(sid);
+ CHECK(ret, FAIL, "H5Sselect_none");
+
+ /* Test block intersection with 'none' selection (always false) */
+ status = H5Sselect_intersect_block(sid, block_start, block_end);
+ VERIFY(status, FALSE, "H5Sselect_intersect_block");
+
+ /* Set selection to 'all' */
+ ret = H5Sselect_all(sid);
+ CHECK(ret, FAIL, "H5Sselect_all");
+
+ /* Test block intersection with 'all' selection (always true) */
+ status = H5Sselect_intersect_block(sid, block_start, block_end);
+ VERIFY(status, TRUE, "H5Sselect_intersect_block");
+
+ /* Select sequence of ten points */
+ coord[0][0] = 0;
+ coord[0][1] = 10;
+ coord[1][0] = 1;
+ coord[1][1] = 2;
+ coord[2][0] = 2;
+ coord[2][1] = 4;
+ coord[3][0] = 0;
+ coord[3][1] = 6;
+ coord[4][0] = 1;
+ coord[4][1] = 8;
+ coord[5][0] = 2;
+ coord[5][1] = 11;
+ coord[6][0] = 0;
+ coord[6][1] = 4;
+ coord[7][0] = 1;
+ coord[7][1] = 0;
+ coord[8][0] = 2;
+ coord[8][1] = 1;
+ coord[9][0] = 0;
+ coord[9][1] = 3;
+ ret = H5Sselect_elements(sid, H5S_SELECT_SET, (size_t)10, (const hsize_t *)coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ /* Test block intersection with 'point' selection */
+ status = H5Sselect_intersect_block(sid, block_start, block_end);
+ VERIFY(status, TRUE, "H5Sselect_intersect_block");
+ status = H5Sselect_intersect_block(sid, block_start, block_end4);
+ VERIFY(status, FALSE, "H5Sselect_intersect_block");
+
+ /* Select single 4x6 hyperslab block at (2,1) */
+ start[0] = 2;
+ start[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 4;
+ count[1] = 6;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Test block intersection with single 'hyperslab' selection */
+ status = H5Sselect_intersect_block(sid, block_start, block_end);
+ VERIFY(status, TRUE, "H5Sselect_intersect_block");
+ status = H5Sselect_intersect_block(sid, block_start, block_end4);
+ VERIFY(status, FALSE, "H5Sselect_intersect_block");
+
+ /* 'OR' another hyperslab block in, making an irregular hyperslab selection */
+ start[0] = 3;
+ start[1] = 2;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = 4;
+ count[1] = 6;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_OR, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Test block intersection with 'hyperslab' selection */
+ status = H5Sselect_intersect_block(sid, block_start, block_end);
+ VERIFY(status, TRUE, "H5Sselect_intersect_block");
+ status = H5Sselect_intersect_block(sid, block_start, block_end4);
+ VERIFY(status, FALSE, "H5Sselect_intersect_block");
+
+ /* Select regular, strided hyperslab selection */
+ start[0] = 2;
+ start[1] = 1;
+ stride[0] = 2;
+ stride[1] = 2;
+ count[0] = 2;
+ count[1] = 4;
+ block[0] = 1;
+ block[1] = 1;
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Test block intersection with single 'hyperslab' selection */
+ status = H5Sselect_intersect_block(sid, block_start, block_end);
+ VERIFY(status, TRUE, "H5Sselect_intersect_block");
+ status = H5Sselect_intersect_block(sid, block_start, block_end4);
+ VERIFY(status, FALSE, "H5Sselect_intersect_block");
+
+ /* Close dataspace */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+} /* test_select_intersect_block() */
+
+/****************************************************************
+**
+** test_hyper_io_1d():
+** Test to verify all the selected 10th element in the 1-d file
+** dataspace is read correctly into the 1-d contiguous memory space.
+** This is modeled after the test scenario described in HDFFV-10585
+** that demonstrated the hyperslab slowness. A fix to speed up
+** performance is in place to handle the special case for 1-d disjoint
+** file dataspace into 1-d single block contiguous memory space.
+**
+****************************************************************/
+static void
+test_hyper_io_1d(void)
+{
+ hid_t fid; /* File ID */
+ hid_t did; /* Dataset ID */
+ hid_t sid, mid; /* Dataspace IDs */
+ hid_t dcpl; /* Dataset creation property list ID */
+ hsize_t dims[1], maxdims[1], dimsm[1]; /* Dataset dimension sizes */
+ hsize_t chunk_dims[1]; /* Chunk dimension size */
+ hsize_t offset[1]; /* Starting offset for hyperslab */
+ hsize_t stride[1]; /* Distance between blocks in the hyperslab selection */
+ hsize_t count[1]; /* # of blocks in the the hyperslab selection */
+ hsize_t block[1]; /* Size of block in the hyperslab selection */
+ unsigned int wdata[CHUNKSZ]; /* Data to be written */
+ unsigned int rdata[NUM_ELEMENTS / 10]; /* Data to be read */
+ herr_t ret; /* Generic return value */
+ unsigned i; /* Local index variable */
+
+ /* Output message about test being performed */
+ MESSAGE(6, ("Testing Hyperslab I/O for 1-d single block memory space\n"));
+
+ for (i = 0; i < CHUNKSZ; i++)
+ wdata[i] = i;
+
+ /* Create the file file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, H5I_INVALID_HID, "H5Fcreate");
+
+ /* Create file dataspace */
+ dims[0] = CHUNKSZ;
+ maxdims[0] = H5S_UNLIMITED;
+ sid = H5Screate_simple(RANK, dims, maxdims);
+ CHECK(sid, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Create memory dataspace */
+ dimsm[0] = CHUNKSZ;
+ mid = H5Screate_simple(RANK, dimsm, NULL);
+ CHECK(mid, H5I_INVALID_HID, "H5Pcreate");
+
+ /* Set up to create a chunked dataset */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, H5I_INVALID_HID, "H5Pcreate");
+
+ chunk_dims[0] = CHUNKSZ;
+ ret = H5Pset_chunk(dcpl, RANK, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create a chunked dataset */
+ did = H5Dcreate2(fid, DNAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dcreate2");
+
+ /* Set up hyperslab selection for file dataspace */
+ offset[0] = 0;
+ stride[0] = 1;
+ count[0] = 1;
+ block[0] = CHUNKSZ;
+
+ /* Write to each chunk in the dataset */
+ for (i = 0; i < NUMCHUNKS; i++) {
+ /* Set the hyperslab selection */
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, mid, sid, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Extend the dataset's dataspace */
+ if (i < (NUMCHUNKS - 1)) {
+ offset[0] = offset[0] + CHUNKSZ;
+ dims[0] = dims[0] + CHUNKSZ;
+ ret = H5Dset_extent(did, dims);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ /* Get the dataset's current dataspace */
+ sid = H5Dget_space(did);
+ CHECK(sid, H5I_INVALID_HID, "H5Dget_space");
+ }
+ }
+
+ /* Closing */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(mid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open the file */
+ fid = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, H5I_INVALID_HID, "H5Fopen");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, DNAME, H5P_DEFAULT);
+ CHECK(did, H5I_INVALID_HID, "H5Dopen");
+
+ /* Set up to read every 10th element in file dataspace */
+ offset[0] = 1;
+ stride[0] = 10;
+ count[0] = NUM_ELEMENTS / 10;
+ block[0] = 1;
+
+ /* Get the dataset's dataspace */
+ sid = H5Dget_space(did);
+ CHECK(sid, H5I_INVALID_HID, "H5Dget_space");
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, stride, count, block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Set up contiguous memory dataspace for the selected elements */
+ dimsm[0] = count[0];
+ mid = H5Screate_simple(RANK, dimsm, NULL);
+ CHECK(mid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Read all the selected 10th elements in the dataset into "rdata" */
+ ret = H5Dread(did, H5T_NATIVE_INT, mid, sid, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify data read is correct */
+ for (i = 0; i < 6; i += 2) {
+ VERIFY(rdata[i], 1, "H5Dread\n");
+ VERIFY(rdata[i + 1], 11, "H5Dread\n");
+ }
+
+ /* Closing */
+ ret = H5Sclose(mid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* test_hyper_io_1d() */
+
+/****************************************************************
+**
+** test_h5s_set_extent_none:
+** Test to verify the behavior of dataspace code when passed
+** a dataspace modified by H5Sset_extent_none().
+**
+****************************************************************/
+static void
+test_h5s_set_extent_none(void)
+{
+ hid_t sid = H5I_INVALID_HID;
+ hid_t dst_sid = H5I_INVALID_HID;
+ hid_t null_sid = H5I_INVALID_HID;
+ int rank = 1;
+ hsize_t current_dims = 123;
+ H5S_class_t cls;
+ int out_rank;
+ hsize_t out_dims;
+ hsize_t out_maxdims;
+ hssize_t out_points;
+ htri_t equal;
+ herr_t ret;
+
+ /* Specific values here don't matter as we're just going to reset */
+ sid = H5Screate_simple(rank, &current_dims, NULL);
+ CHECK(sid, H5I_INVALID_HID, "H5Screate_simple");
+
+ /* Dataspace class will be H5S_NULL after this.
+ * In versions prior to 1.10.7 / 1.12.1 this would produce a
+ * dataspace with the internal H5S_NO_CLASS class.
+ */
+ ret = H5Sset_extent_none(sid);
+ CHECK(ret, FAIL, "H5Sset_extent_none");
+ cls = H5Sget_simple_extent_type(sid);
+ VERIFY(cls, H5S_NULL, "H5Sget_simple_extent_type");
+
+ /* Extent getters should generate normal results and not segfault.
+ */
+ out_rank = H5Sget_simple_extent_dims(sid, &out_dims, &out_maxdims);
+ VERIFY(out_rank, 0, "H5Sget_simple_extent_dims");
+ out_rank = H5Sget_simple_extent_ndims(sid);
+ VERIFY(out_rank, 0, "H5Sget_simple_extent_ndims");
+ out_points = H5Sget_simple_extent_npoints(sid);
+ VERIFY(out_points, 0, "H5Sget_simple_extent_npoints");
+
+ /* Check that copying the new (non-)extent works.
+ */
+ dst_sid = H5Screate_simple(rank, &current_dims, NULL);
+ CHECK(dst_sid, H5I_INVALID_HID, "H5Screate_simple");
+ ret = H5Sextent_copy(dst_sid, sid);
+ CHECK(ret, FAIL, "H5Sextent_copy");
+
+ /* Check that H5Sset_extent_none() produces the same extent as
+ * H5Screate(H5S_NULL).
+ */
+ null_sid = H5Screate(H5S_NULL);
+ CHECK(null_sid, H5I_INVALID_HID, "H5Screate");
+ equal = H5Sextent_equal(sid, null_sid);
+ VERIFY(equal, TRUE, "H5Sextent_equal");
+
+ /* Close */
+ ret = H5Sclose(sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(dst_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Sclose(null_sid);
+ CHECK(ret, FAIL, "H5Sclose");
+
+} /* test_h5s_set_extent_none() */
+
+/****************************************************************
+**
+** test_select(): Main H5S selection testing routine.
+**
+****************************************************************/
+void
+test_select(void)
+{
+ hid_t plist_id; /* Property list for reading random hyperslabs */
+ hid_t fapl; /* Property list accessing the file */
+ int mdc_nelmts; /* Metadata number of elements */
+ size_t rdcc_nelmts; /* Raw data number of elements */
+ size_t rdcc_nbytes; /* Raw data number of bytes */
+ double rdcc_w0; /* Raw data write percentage */
+ hssize_t offset[SPACE7_RANK] = {1, 1}; /* Offset for testing selection offsets */
+ const char *env_h5_drvr; /* File Driver value from environment */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Selections\n"));
+
+ /* Get the VFD to use */
+ env_h5_drvr = HDgetenv(HDF5_DRIVER);
+ if (env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+
+ /* Create a dataset transfer property list */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(plist_id, FAIL, "H5Pcreate");
+
+ /* test I/O with a very small buffer for reads */
+ ret = H5Pset_buffer(plist_id, (size_t)59, NULL, NULL);
+ CHECK(ret, FAIL, "H5Pset_buffer");
+
+ /* These next tests use the same file */
+ test_select_hyper(H5P_DEFAULT); /* Test basic H5S hyperslab selection code */
+ test_select_hyper(plist_id); /* Test basic H5S hyperslab selection code */
+ test_select_point(H5P_DEFAULT); /* Test basic H5S element selection code, also tests appending to existing
+ element selections */
+ test_select_point(plist_id); /* Test basic H5S element selection code, also tests appending to existing
+ element selections */
+ test_select_all(H5P_DEFAULT); /* Test basic all & none selection code */
+ test_select_all(plist_id); /* Test basic all & none selection code */
+ test_select_all_hyper(H5P_DEFAULT); /* Test basic all & none selection code */
+ test_select_all_hyper(plist_id); /* Test basic all & none selection code */
+
+ /* These next tests use the same file */
+ test_select_combo(); /* Test combined hyperslab & element selection code */
+ test_select_hyper_stride(H5P_DEFAULT); /* Test strided hyperslab selection code */
+ test_select_hyper_stride(plist_id); /* Test strided hyperslab selection code */
+ test_select_hyper_contig(H5T_STD_U16LE, H5P_DEFAULT); /* Test contiguous hyperslab selection code */
+ test_select_hyper_contig(H5T_STD_U16LE, plist_id); /* Test contiguous hyperslab selection code */
+ test_select_hyper_contig(H5T_STD_U16BE, H5P_DEFAULT); /* Test contiguous hyperslab selection code */
+ test_select_hyper_contig(H5T_STD_U16BE, plist_id); /* Test contiguous hyperslab selection code */
+ test_select_hyper_contig2(H5T_STD_U16LE,
+ H5P_DEFAULT); /* Test more contiguous hyperslab selection cases */
+ test_select_hyper_contig2(H5T_STD_U16LE, plist_id); /* Test more contiguous hyperslab selection cases */
+ test_select_hyper_contig2(H5T_STD_U16BE,
+ H5P_DEFAULT); /* Test more contiguous hyperslab selection cases */
+ test_select_hyper_contig2(H5T_STD_U16BE, plist_id); /* Test more contiguous hyperslab selection cases */
+ test_select_hyper_contig3(H5T_STD_U16LE,
+ H5P_DEFAULT); /* Test yet more contiguous hyperslab selection cases */
+ test_select_hyper_contig3(H5T_STD_U16LE,
+ plist_id); /* Test yet more contiguous hyperslab selection cases */
+ test_select_hyper_contig3(H5T_STD_U16BE,
+ H5P_DEFAULT); /* Test yet more contiguous hyperslab selection cases */
+ test_select_hyper_contig3(H5T_STD_U16BE,
+ plist_id); /* Test yet more contiguous hyperslab selection cases */
+#if 0
+ test_select_hyper_contig_dr(H5T_STD_U16LE, H5P_DEFAULT);
+ test_select_hyper_contig_dr(H5T_STD_U16LE, plist_id);
+ test_select_hyper_contig_dr(H5T_STD_U16BE, H5P_DEFAULT);
+ test_select_hyper_contig_dr(H5T_STD_U16BE, plist_id);
+#else
+ HDprintf("** SKIPPED a test due to file creation issues\n");
+#endif
+#if 0
+ test_select_hyper_checker_board_dr(H5T_STD_U16LE, H5P_DEFAULT);
+ test_select_hyper_checker_board_dr(H5T_STD_U16LE, plist_id);
+ test_select_hyper_checker_board_dr(H5T_STD_U16BE, H5P_DEFAULT);
+ test_select_hyper_checker_board_dr(H5T_STD_U16BE, plist_id);
+#else
+ HDprintf("** SKIPPED a test due to assertion in HDF5\n");
+#endif
+ test_select_hyper_copy(); /* Test hyperslab selection copying code */
+ test_select_point_copy(); /* Test point selection copying code */
+ test_select_hyper_offset(); /* Test selection offset code with hyperslabs */
+ test_select_hyper_offset2(); /* Test more selection offset code with hyperslabs */
+ test_select_point_offset(); /* Test selection offset code with elements */
+ test_select_hyper_union(); /* Test hyperslab union code */
+
+ /* Fancy hyperslab API tests */
+ test_select_hyper_union_stagger(); /* Test hyperslab union code for staggered slabs */
+ test_select_hyper_union_3d(); /* Test hyperslab union code for 3-D dataset */
+ test_select_hyper_valid_combination(); /* Test different input combinations */
+
+ /* The following tests are currently broken with the Direct VFD */
+ if (HDstrcmp(env_h5_drvr, "direct") != 0) {
+ test_select_hyper_and_2d(); /* Test hyperslab intersection (AND) code for 2-D dataset */
+ test_select_hyper_xor_2d(); /* Test hyperslab XOR code for 2-D dataset */
+ test_select_hyper_notb_2d(); /* Test hyperslab NOTB code for 2-D dataset */
+ test_select_hyper_nota_2d(); /* Test hyperslab NOTA code for 2-D dataset */
+ }
+
+ /* test the random hyperslab I/O with the default property list for reading */
+ test_select_hyper_union_random_5d(H5P_DEFAULT); /* Test hyperslab union code for random 5-D hyperslabs */
+
+ /* test random hyperslab I/O with a small buffer for reads */
+ test_select_hyper_union_random_5d(plist_id); /* Test hyperslab union code for random 5-D hyperslabs */
+
+ /* Create a dataset transfer property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+
+ /* Get the default file access properties for caching */
+ ret = H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ CHECK(ret, FAIL, "H5Pget_cache");
+
+ /* Increase the size of the raw data cache */
+ rdcc_nbytes = 10 * 1024 * 1024;
+
+ /* Set the file access properties for caching */
+ ret = H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ CHECK(ret, FAIL, "H5Pset_cache");
+
+ /* Test reading in a large hyperslab with a chunked dataset */
+ test_select_hyper_chunk(fapl, H5P_DEFAULT);
+
+ /* Test reading in a large hyperslab with a chunked dataset a small amount at a time */
+ test_select_hyper_chunk(fapl, plist_id);
+
+ /* Close file access property list */
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(plist_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* More tests for checking validity of selections */
+ test_select_valid();
+
+ /* Tests for combining "all" and "none" selections with hyperslabs */
+ test_select_combine();
+
+ /* Test filling selections */
+ /* (Also tests iterating through each selection */
+ test_select_fill_all();
+ test_select_fill_point(NULL);
+ test_select_fill_point(offset);
+ test_select_fill_hyper_simple(NULL);
+ test_select_fill_hyper_simple(offset);
+ test_select_fill_hyper_regular(NULL);
+ test_select_fill_hyper_regular(offset);
+ test_select_fill_hyper_irregular(NULL);
+ test_select_fill_hyper_irregular(offset);
+
+ /* Test 0-sized selections */
+ test_select_none();
+
+ /* Test selections on scalar dataspaces */
+ test_scalar_select();
+ test_scalar_select2();
+ test_scalar_select3();
+
+ /* Test "same shape" routine */
+ test_shape_same();
+
+ /* Test "same shape" routine for selections of different rank */
+ test_shape_same_dr();
+
+ /* Test "re-build" routine */
+ test_space_rebuild();
+
+ /* Test "update diminfo" routine */
+ test_space_update_diminfo();
+
+ /* Test point selections in chunked datasets */
+ test_select_point_chunk();
+
+ /* Test scalar dataspaces in chunked datasets */
+ test_select_scalar_chunk();
+#if 0
+ /* Test using selection offset on hyperslab in chunked dataset */
+ test_select_hyper_chunk_offset();
+ test_select_hyper_chunk_offset2();
+#else
+ HDprintf("** SKIPPED a test due to assertion in HDF5\n");
+#endif
+
+ /* Test selection bounds with & without offsets */
+ test_select_bounds();
+
+ /* Test 'regular' hyperslab query routines */
+ test_hyper_regular();
+
+ /* Test unlimited hyperslab selections */
+ test_hyper_unlim();
+
+ /* Test the consistency of internal data structures of selection */
+ test_internal_consistency();
+
+ /* Test irregular selection I/O */
+ test_irreg_io();
+
+ /* Test selection iterators */
+ test_sel_iter();
+
+ /* Test selection intersection with block */
+ test_select_intersect_block();
+
+ /* Test reading of 1-d disjoint file space to 1-d single block memory space */
+ test_hyper_io_1d();
+
+ /* Test H5Sset_extent_none() functionality after we updated it to set
+ * the class to H5S_NULL instead of H5S_NO_CLASS.
+ */
+ test_h5s_set_extent_none();
+
+} /* test_select() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_select
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Albert Cheng
+ * July 2, 1998
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_select(void)
+{
+ H5Fdelete(FILENAME, H5P_DEFAULT);
+}
diff --git a/test/API/ttime.c b/test/API/ttime.c
new file mode 100644
index 0000000..74128fd
--- /dev/null
+++ b/test/API/ttime.c
@@ -0,0 +1,231 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: ttime
+ *
+ * Test the Time Datatype functionality
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+
+#define DATAFILE "ttime.h5"
+#ifdef NOT_YET
+#define DATASETNAME "Dataset"
+#endif /* NOT_YET */
+
+/****************************************************************
+**
+** test_time_commit(): Test committing time datatypes to a file
+**
+****************************************************************/
+static void
+test_time_commit(void)
+{
+ hid_t file_id, tid; /* identifiers */
+ herr_t status;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Committing Time Datatypes\n"));
+
+ /* Create a new file using default properties. */
+ file_id = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ tid = H5Tcopy(H5T_UNIX_D32LE);
+ CHECK(tid, FAIL, "H5Tcopy");
+ status = H5Tcommit2(file_id, "Committed D32LE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(status, FAIL, "H5Tcommit2");
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tid = H5Tcopy(H5T_UNIX_D32BE);
+ CHECK(tid, FAIL, "H5Tcopy");
+ status = H5Tcommit2(file_id, "Committed D32BE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(status, FAIL, "H5Tcommit2");
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tid = H5Tcopy(H5T_UNIX_D64LE);
+ CHECK(tid, FAIL, "H5Tcopy");
+ status = H5Tcommit2(file_id, "Committed D64LE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(status, FAIL, "H5Tcommit2");
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tid = H5Tcopy(H5T_UNIX_D64BE);
+ CHECK(tid, FAIL, "H5Tcopy");
+ status = H5Tcommit2(file_id, "Committed D64BE type", tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(status, FAIL, "H5Tcommit2");
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ /* Close the file. */
+ status = H5Fclose(file_id);
+ CHECK(status, FAIL, "H5Fclose");
+
+ file_id = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+
+ tid = H5Topen2(file_id, "Committed D32LE type", H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+
+ if (!H5Tequal(tid, H5T_UNIX_D32LE))
+ TestErrPrintf("H5T_UNIX_D32LE datatype not found\n");
+
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tid = H5Topen2(file_id, "Committed D32BE type", H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+
+ if (!H5Tequal(tid, H5T_UNIX_D32BE))
+ TestErrPrintf("H5T_UNIX_D32BE datatype not found\n");
+
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tid = H5Topen2(file_id, "Committed D64LE type", H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+
+ if (!H5Tequal(tid, H5T_UNIX_D64LE))
+ TestErrPrintf("H5T_UNIX_D64LE datatype not found");
+
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ tid = H5Topen2(file_id, "Committed D64BE type", H5P_DEFAULT);
+ CHECK(tid, FAIL, "H5Topen2");
+
+ if (!H5Tequal(tid, H5T_UNIX_D64BE))
+ TestErrPrintf("H5T_UNIX_D64BE datatype not found");
+
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ status = H5Fclose(file_id);
+ CHECK(status, FAIL, "H5Fclose");
+}
+
+#ifdef NOT_YET
+/****************************************************************
+**
+** test_time_io(): Test writing time data to a dataset
+**
+****************************************************************/
+static void
+test_time_io(void)
+{
+ hid_t fid; /* File identifier */
+ hid_t dsid; /* Dataset identifier */
+ hid_t tid; /* Datatype identifier */
+ hid_t sid; /* Dataspace identifier */
+ time_t timenow, timethen; /* Times */
+ herr_t status;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Committing Time Datatypes\n"));
+
+ /* Create a new file using default properties. */
+ fid = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Create a scalar dataspace */
+ sid = H5Screate(H5S_SCALAR);
+ CHECK(sid, FAIL, "H5Screate");
+
+ /* Create a dataset with a time datatype */
+ dsid = H5Dcreate2(fid, DATASETNAME, H5T_UNIX_D32LE, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dsid, FAIL, "H5Dcreate2");
+
+ /* Initialize time data value */
+ timenow = HDtime(NULL);
+
+ /* Write time to dataset */
+ status = H5Dwrite(dsid, H5T_UNIX_D32LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &timenow);
+ CHECK(status, FAIL, "H5Dwrite");
+
+ /* Close objects */
+ status = H5Dclose(dsid);
+ CHECK(status, FAIL, "H5Dclose");
+
+ status = H5Sclose(sid);
+ CHECK(status, FAIL, "H5Sclose");
+
+ status = H5Fclose(fid);
+ CHECK(status, FAIL, "H5Fclose");
+
+ /* Open file and dataset, read time back and print it in calendar format */
+ fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ dsid = H5Dopen2(fid, DATASETNAME, H5P_DEFAULT);
+ CHECK(dsid, FAIL, "H5Dopen2");
+
+ tid = H5Dget_type(dsid);
+ CHECK(tid, FAIL, "H5Dget_type");
+ if (H5Tget_class(tid) == H5T_TIME)
+ HDfprintf(stderr, "datatype class is H5T_TIME\n");
+ status = H5Tclose(tid);
+ CHECK(status, FAIL, "H5Tclose");
+
+ status = H5Dread(dsid, H5T_UNIX_D32LE, H5S_ALL, H5S_ALL, H5P_DEFAULT, &timethen);
+ CHECK(status, FAIL, "H5Dread");
+ HDfprintf(stderr, "time written was: %s\n", HDctime(&timethen));
+
+ status = H5Dclose(dsid);
+ CHECK(status, FAIL, "H5Dclose");
+
+ status = H5Fclose(fid);
+ CHECK(status, FAIL, "H5Fclose");
+}
+#endif /* NOT_YET */
+
+/****************************************************************
+**
+** test_time(): Main time datatype testing routine.
+**
+****************************************************************/
+void
+test_time(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Time Datatypes\n"));
+
+ test_time_commit(); /* Test committing time datatypes to a file */
+#ifdef NOT_YET
+ test_time_io(); /* Test writing time data to a dataset */
+#endif /* NOT_YET */
+
+} /* test_time() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_time
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Quincey Koziol
+ * October 19, 2000
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_time(void)
+{
+ H5Fdelete(DATAFILE, H5P_DEFAULT);
+}
diff --git a/test/API/tunicode.c b/test/API/tunicode.c
new file mode 100644
index 0000000..fa59456
--- /dev/null
+++ b/test/API/tunicode.c
@@ -0,0 +1,867 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Unicode test */
+#include "testhdf5.h"
+
+#define NUM_CHARS 16
+#define MAX_STRING_LENGTH ((NUM_CHARS * 4) + 1) /* Max length in bytes */
+#define MAX_PATH_LENGTH (MAX_STRING_LENGTH + 20) /* Max length in bytes */
+#define MAX_CODE_POINT 0x200000
+#define FILENAME "unicode.h5"
+/* A buffer to hold two copies of the UTF-8 string */
+#define LONG_BUF_SIZE (2 * MAX_STRING_LENGTH + 4)
+
+#define DSET1_NAME "fl_string_dataset"
+#define DSET3_NAME "dataset3"
+#define DSET4_NAME "dataset4"
+#define VL_DSET1_NAME "vl_dset_1"
+#define GROUP1_NAME "group1"
+#define GROUP2_NAME "group2"
+#define GROUP3_NAME "group3"
+#define GROUP4_NAME "group4"
+
+#define RANK 1
+#define COMP_INT_VAL 7
+#define COMP_FLOAT_VAL (-42.0F)
+#define COMP_DOUBLE_VAL 42.0
+
+/* Test function prototypes */
+void test_fl_string(hid_t fid, const char *string);
+void test_strpad(hid_t fid, const char *string);
+void test_vl_string(hid_t fid, const char *string);
+void test_objnames(hid_t fid, const char *string);
+void test_attrname(hid_t fid, const char *string);
+void test_compound(hid_t fid, const char *string);
+void test_enum(hid_t fid, const char *string);
+void test_opaque(hid_t fid, const char *string);
+
+/* Utility function prototypes */
+static hid_t mkstr(size_t len, H5T_str_t strpad);
+unsigned int write_char(unsigned int c, char *test_string, unsigned int cur_pos);
+void dump_string(const char *string);
+
+/*
+ * test_fl_string
+ * Tests that UTF-8 can be used for fixed-length string data.
+ * Writes the string to a dataset and reads it back again.
+ */
+void
+test_fl_string(hid_t fid, const char *string)
+{
+ hid_t dtype_id, space_id, dset_id;
+ hsize_t dims = 1;
+ char read_buf[MAX_STRING_LENGTH];
+ H5T_cset_t cset;
+ herr_t ret;
+
+ /* Create the datatype, ensure that the character set behaves
+ * correctly (it should default to ASCII and can be set to UTF8)
+ */
+ dtype_id = H5Tcopy(H5T_C_S1);
+ CHECK(dtype_id, FAIL, "H5Tcopy");
+ ret = H5Tset_size(dtype_id, (size_t)MAX_STRING_LENGTH);
+ CHECK(ret, FAIL, "H5Tset_size");
+ cset = H5Tget_cset(dtype_id);
+ VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset");
+ ret = H5Tset_cset(dtype_id, H5T_CSET_UTF8);
+ CHECK(ret, FAIL, "H5Tset_cset");
+ cset = H5Tget_cset(dtype_id);
+ VERIFY(cset, H5T_CSET_UTF8, "H5Tget_cset");
+
+ /* Create dataspace for a dataset */
+ space_id = H5Screate_simple(RANK, &dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+
+ /* Create a dataset */
+ dset_id = H5Dcreate2(fid, DSET1_NAME, dtype_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ /* Write UTF-8 string to dataset */
+ ret = H5Dwrite(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, string);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read string back and make sure it is unchanged */
+ ret = H5Dread(dset_id, dtype_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ VERIFY(HDstrcmp(string, read_buf), 0, "strcmp");
+
+ /* Close all */
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Tclose(dtype_id);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+}
+
+/*
+ * test_strpad
+ * Tests string padding for a UTF-8 string.
+ * Converts strings to shorter and then longer strings.
+ * Borrows heavily from dtypes.c, but is more complicated because
+ * the string is randomly generated.
+ */
+void
+test_strpad(hid_t H5_ATTR_UNUSED fid, const char *string)
+{
+ /* buf is used to hold the data that H5Tconvert operates on. */
+ char buf[LONG_BUF_SIZE];
+
+ /* cmpbuf holds the output that H5Tconvert should produce,
+ * to compare against the actual output. */
+ char cmpbuf[LONG_BUF_SIZE];
+
+ /* new_string is a slightly modified version of the UTF-8
+ * string to make the tests run more smoothly. */
+ char new_string[MAX_STRING_LENGTH + 2];
+
+ size_t length; /* Length of new_string in bytes */
+ size_t small_len; /* Size of the small datatype */
+ size_t big_len; /* Size of the larger datatype */
+ hid_t src_type, dst_type;
+ herr_t ret;
+
+ /* The following tests are simpler if the UTF-8 string contains
+ * the right number of bytes (even or odd, depending on the test).
+ * We create a 'new_string' whose length is convenient by prepending
+ * an 'x' to 'string' when necessary. */
+ length = HDstrlen(string);
+ if (length % 2 != 1) {
+ HDstrcpy(new_string, "x");
+ HDstrcat(new_string, string);
+ length++;
+ }
+ else {
+ HDstrcpy(new_string, string);
+ }
+
+ /* Convert a null-terminated string to a shorter and longer null
+ * terminated string. */
+
+ /* Create a src_type that holds the UTF-8 string and its final NULL */
+ big_len = length + 1; /* +1 byte for final NULL */
+ HDassert((2 * big_len) <= sizeof(cmpbuf));
+ src_type = mkstr(big_len, H5T_STR_NULLTERM);
+ CHECK(src_type, FAIL, "mkstr");
+ /* Create a dst_type that holds half of the UTF-8 string and a final
+ * NULL */
+ small_len = (length + 1) / 2;
+ dst_type = mkstr(small_len, H5T_STR_NULLTERM);
+ CHECK(dst_type, FAIL, "mkstr");
+
+ /* Fill the buffer with two copies of the UTF-8 string, each with a
+ * terminating NULL. It will look like "abcdefg\0abcdefg\0". */
+ HDstrncpy(buf, new_string, big_len);
+ HDstrncpy(&buf[big_len], new_string, big_len);
+
+ ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tconvert");
+
+ /* After conversion, the buffer should look like
+ * "abc\0abc\0abcdefg\0". Note that this is just what the bytes look
+ * like; UTF-8 characters may well have been truncated.
+ * To check that the conversion worked properly, we'll build this
+ * string manually. */
+ HDstrncpy(cmpbuf, new_string, small_len - 1);
+ cmpbuf[small_len - 1] = '\0';
+ HDstrncpy(&cmpbuf[small_len], new_string, small_len - 1);
+ cmpbuf[2 * small_len - 1] = '\0';
+ HDstrcpy(&cmpbuf[2 * small_len], new_string);
+
+ VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp");
+
+ /* Now convert from smaller datatype to bigger datatype. This should
+ * leave our buffer looking like: "abc\0\0\0\0\0abc\0\0\0\0\0" */
+ ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tconvert");
+
+ /* First fill the buffer with NULLs */
+ HDmemset(cmpbuf, '\0', (size_t)LONG_BUF_SIZE);
+ /* Copy in the characters */
+ HDstrncpy(cmpbuf, new_string, small_len - 1);
+ HDstrncpy(&cmpbuf[big_len], new_string, small_len - 1);
+
+ VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp");
+
+ ret = H5Tclose(src_type);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(dst_type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Now test null padding. Null-padded strings do *not* need
+ * terminating NULLs, so the sizes of the datatypes are slightly
+ * different and we want a string with an even number of characters. */
+ length = HDstrlen(string);
+ if (length % 2 != 0) {
+ HDstrcpy(new_string, "x");
+ HDstrcat(new_string, string);
+ length++;
+ }
+ else {
+ HDstrcpy(new_string, string);
+ }
+
+ /* Create a src_type that holds the UTF-8 string */
+ big_len = length;
+ HDassert((2 * big_len) <= sizeof(cmpbuf));
+ src_type = mkstr(big_len, H5T_STR_NULLPAD);
+ CHECK(src_type, FAIL, "mkstr");
+ /* Create a dst_type that holds half of the UTF-8 string */
+ small_len = length / 2;
+ dst_type = mkstr(small_len, H5T_STR_NULLPAD);
+ CHECK(dst_type, FAIL, "mkstr");
+
+ /* Fill the buffer with two copies of the UTF-8 string.
+ * It will look like "abcdefghabcdefgh". */
+ HDstrncpy(buf, new_string, big_len);
+ HDstrncpy(&buf[big_len], new_string, big_len);
+
+ ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tconvert");
+
+ /* After conversion, the buffer should look like
+ * "abcdabcdabcdefgh". Note that this is just what the bytes look
+ * like; UTF-8 characters may well have been truncated.
+ * To check that the conversion worked properly, we'll build this
+ * string manually. */
+ HDstrncpy(cmpbuf, new_string, small_len);
+ HDstrncpy(&cmpbuf[small_len], new_string, small_len);
+ HDstrncpy(&cmpbuf[2 * small_len], new_string, big_len);
+
+ VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp");
+
+ /* Now convert from smaller datatype to bigger datatype. This should
+ * leave our buffer looking like: "abcd\0\0\0\0abcd\0\0\0\0" */
+ ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tconvert");
+
+ /* First fill the buffer with NULLs */
+ HDmemset(cmpbuf, '\0', (size_t)LONG_BUF_SIZE);
+ /* Copy in the characters */
+ HDstrncpy(cmpbuf, new_string, small_len);
+ HDstrncpy(&cmpbuf[big_len], new_string, small_len);
+
+ VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp");
+
+ ret = H5Tclose(src_type);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(dst_type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Test space padding. This is very similar to null-padding; we can
+ use the same values of length, small_len, and big_len. */
+
+ src_type = mkstr(big_len, H5T_STR_SPACEPAD);
+ CHECK(src_type, FAIL, "mkstr");
+ dst_type = mkstr(small_len, H5T_STR_SPACEPAD);
+ CHECK(src_type, FAIL, "mkstr");
+
+ /* Fill the buffer with two copies of the UTF-8 string.
+ * It will look like "abcdefghabcdefgh". */
+ HDstrcpy(buf, new_string);
+ HDstrcpy(&buf[big_len], new_string);
+
+ ret = H5Tconvert(src_type, dst_type, (size_t)2, buf, NULL, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tconvert");
+
+ /* After conversion, the buffer should look like
+ * "abcdabcdabcdefgh". Note that this is just what the bytes look
+ * like; UTF-8 characters may have been truncated.
+ * To check that the conversion worked properly, we'll build this
+ * string manually. */
+ HDstrncpy(cmpbuf, new_string, small_len);
+ HDstrncpy(&cmpbuf[small_len], new_string, small_len);
+ HDstrncpy(&cmpbuf[2 * small_len], new_string, big_len);
+
+ VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp");
+
+ /* Now convert from smaller datatype to bigger datatype. This should
+ * leave our buffer looking like: "abcd abcd " */
+ ret = H5Tconvert(dst_type, src_type, (size_t)2, buf, NULL, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tconvert");
+
+ /* First fill the buffer with spaces */
+ HDmemset(cmpbuf, ' ', (size_t)LONG_BUF_SIZE);
+ /* Copy in the characters */
+ HDstrncpy(cmpbuf, new_string, small_len);
+ HDstrncpy(&cmpbuf[big_len], new_string, small_len);
+
+ VERIFY(HDmemcmp(buf, cmpbuf, 2 * big_len), 0, "HDmemcmp");
+
+ ret = H5Tclose(src_type);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(dst_type);
+ CHECK(ret, FAIL, "H5Tclose");
+}
+
+/*
+ * test_vl_string
+ * Tests variable-length string datatype with UTF-8 strings.
+ */
+void
+test_vl_string(hid_t fid, const char *string)
+{
+ hid_t type_id, space_id, dset_id;
+ hsize_t dims = 1;
+ hsize_t size; /* Number of bytes used */
+ char *read_buf[1];
+ herr_t ret;
+
+ /* Create dataspace for datasets */
+ space_id = H5Screate_simple(RANK, &dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ type_id = H5Tcopy(H5T_C_S1);
+ CHECK(type_id, FAIL, "H5Tcopy");
+ ret = H5Tset_size(type_id, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Create a dataset */
+ dset_id = H5Dcreate2(fid, VL_DSET1_NAME, type_id, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, &string);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dset_id, type_id, space_id, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+ VERIFY(size, (hsize_t)HDstrlen(string) + 1, "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dset_id, type_id, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ VERIFY(HDstrcmp(string, read_buf[0]), 0, "strcmp");
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(type_id, space_id, H5P_DEFAULT, read_buf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close all */
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+}
+
+/*
+ * test_objnames
+ * Tests that UTF-8 can be used for object names in the file.
+ * Tests groups, datasets, named datatypes, and soft links.
+ * Note that this test doesn't actually mark the names as being
+ * in UTF-8. At the time this test was written, that feature
+ * didn't exist in HDF5, and when the character encoding property
+ * was added to links it didn't change how they were stored in the file,
+ * -JML 2/2/2006
+ */
+void
+test_objnames(hid_t fid, const char *string)
+{
+ hid_t grp_id, grp1_id, grp2_id, grp3_id;
+ hid_t type_id, dset_id, space_id;
+#if 0
+ char read_buf[MAX_STRING_LENGTH];
+#endif
+ char path_buf[MAX_PATH_LENGTH];
+ hsize_t dims = 1;
+#if 0
+ hobj_ref_t obj_ref;
+ ssize_t size;
+#endif
+ herr_t ret;
+
+ /* Create a group with a UTF-8 name */
+ grp_id = H5Gcreate2(fid, string, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp_id, FAIL, "H5Gcreate2");
+#if 0
+ /* Set a comment on the group to test that we can access the group
+ * Also test that UTF-8 comments can be read.
+ */
+ ret = H5Oset_comment_by_name(fid, string, string, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Oset_comment_by_name");
+ size = H5Oget_comment_by_name(fid, string, read_buf, (size_t)MAX_STRING_LENGTH, H5P_DEFAULT);
+ CHECK(size, FAIL, "H5Oget_comment_by_name");
+#endif
+ ret = H5Gclose(grp_id);
+ CHECK(ret, FAIL, "H5Gclose");
+#if 0
+ VERIFY(HDstrcmp(string, read_buf), 0, "strcmp");
+#endif
+ /* Create a new dataset with a UTF-8 name */
+ grp1_id = H5Gcreate2(fid, GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp1_id, FAIL, "H5Gcreate2");
+
+ space_id = H5Screate_simple(RANK, &dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+ dset_id = H5Dcreate2(grp1_id, string, H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ /* Make sure that dataset can be opened again */
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ dset_id = H5Dopen2(grp1_id, string, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Dopen2");
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Gclose(grp1_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Do the same for a named datatype */
+ grp2_id = H5Gcreate2(fid, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp2_id, FAIL, "H5Gcreate2");
+
+ type_id = H5Tcreate(H5T_OPAQUE, (size_t)1);
+ CHECK(type_id, FAIL, "H5Tcreate");
+ ret = H5Tcommit2(grp2_id, string, type_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(type_id, FAIL, "H5Tcommit2");
+ ret = H5Tclose(type_id);
+ CHECK(type_id, FAIL, "H5Tclose");
+
+ type_id = H5Topen2(grp2_id, string, H5P_DEFAULT);
+ CHECK(type_id, FAIL, "H5Topen2");
+ ret = H5Tclose(type_id);
+ CHECK(type_id, FAIL, "H5Tclose");
+
+ /* Don't close the group -- use it to test that object references
+ * can refer to objects named in UTF-8 */
+#if 0
+ space_id = H5Screate_simple(RANK, &dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+ dset_id =
+ H5Dcreate2(grp2_id, DSET3_NAME, H5T_STD_REF_OBJ, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Dcreate2");
+
+ /* Create reference to named datatype */
+ ret = H5Rcreate(&obj_ref, grp2_id, string, H5R_OBJECT, (hid_t)-1);
+ CHECK(ret, FAIL, "H5Rcreate");
+ /* Write selection and read it back*/
+ ret = H5Dwrite(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref);
+ CHECK(ret, FAIL, "H5Dwrite");
+ ret = H5Dread(dset_id, H5T_STD_REF_OBJ, H5S_ALL, H5S_ALL, H5P_DEFAULT, &obj_ref);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Ensure that we can open named datatype using object reference */
+ type_id = H5Rdereference2(dset_id, H5P_DEFAULT, H5R_OBJECT, &obj_ref);
+ CHECK(type_id, FAIL, "H5Rdereference2");
+ ret = H5Tcommitted(type_id);
+ VERIFY(ret, 1, "H5Tcommitted");
+
+ ret = H5Tclose(type_id);
+ CHECK(type_id, FAIL, "H5Tclose");
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+#endif
+ ret = H5Gclose(grp2_id);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ /* Create "group3". Build a hard link from group3 to group2, which has
+ * a datatype with the UTF-8 name. Create a soft link in group3
+ * pointing through the hard link to the datatype. Give the soft
+ * link a name in UTF-8. Ensure that the soft link works. */
+
+ grp3_id = H5Gcreate2(fid, GROUP3_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(grp3_id, FAIL, "H5Gcreate2");
+
+ ret = H5Lcreate_hard(fid, GROUP2_NAME, grp3_id, GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_hard");
+ HDstrcpy(path_buf, GROUP2_NAME);
+ HDstrcat(path_buf, "/");
+ HDstrcat(path_buf, string);
+ ret = H5Lcreate_hard(grp3_id, path_buf, H5L_SAME_LOC, string, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Lcreate_hard");
+
+ /* Open named datatype using soft link */
+ type_id = H5Topen2(grp3_id, string, H5P_DEFAULT);
+ CHECK(type_id, FAIL, "H5Topen2");
+
+ ret = H5Tclose(type_id);
+ CHECK(type_id, FAIL, "H5Tclose");
+ ret = H5Gclose(grp3_id);
+ CHECK(ret, FAIL, "H5Gclose");
+}
+
+/*
+ * test_attrname
+ * Test that attributes can deal with UTF-8 strings
+ */
+void
+test_attrname(hid_t fid, const char *string)
+{
+ hid_t group_id, attr_id;
+ hid_t dtype_id, space_id;
+ hsize_t dims = 1;
+ char read_buf[MAX_STRING_LENGTH];
+ ssize_t size;
+ herr_t ret;
+
+ /* Create a new group and give it an attribute whose
+ * name and value are UTF-8 strings.
+ */
+ group_id = H5Gcreate2(fid, GROUP4_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(group_id, FAIL, "H5Gcreate2");
+
+ space_id = H5Screate_simple(RANK, &dims, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+ dtype_id = H5Tcopy(H5T_C_S1);
+ CHECK(dtype_id, FAIL, "H5Tcopy");
+ ret = H5Tset_size(dtype_id, (size_t)MAX_STRING_LENGTH);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Create the attribute and check that its name is correct */
+ attr_id = H5Acreate2(group_id, string, dtype_id, space_id, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(attr_id, FAIL, "H5Acreate2");
+ size = H5Aget_name(attr_id, (size_t)MAX_STRING_LENGTH, read_buf);
+ CHECK(size, FAIL, "H5Aget_name");
+ ret = HDstrcmp(read_buf, string);
+ VERIFY(ret, 0, "strcmp");
+ read_buf[0] = '\0';
+
+ /* Try writing and reading from the attribute */
+ ret = H5Awrite(attr_id, dtype_id, string);
+ CHECK(ret, FAIL, "H5Awrite");
+ ret = H5Aread(attr_id, dtype_id, read_buf);
+ CHECK(ret, FAIL, "H5Aread");
+ ret = HDstrcmp(read_buf, string);
+ VERIFY(ret, 0, "strcmp");
+
+ /* Clean up */
+ ret = H5Aclose(attr_id);
+ CHECK(ret, FAIL, "H5Aclose");
+ ret = H5Tclose(dtype_id);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Gclose(group_id);
+ CHECK(ret, FAIL, "H5Gclose");
+}
+
+/*
+ * test_compound
+ * Test that compound datatypes can have UTF-8 field names.
+ */
+void
+test_compound(hid_t fid, const char *string)
+{
+ /* Define two compound structures, s1_t and s2_t.
+ * s2_t is a subset of s1_t, with two out of three
+ * fields.
+ * This is stolen from the h5_compound example.
+ */
+ typedef struct s1_t {
+ int a;
+ double c;
+ float b;
+ } s1_t;
+ typedef struct s2_t {
+ double c;
+ int a;
+ } s2_t;
+ /* Actual variable declarations */
+ s1_t s1;
+ s2_t s2;
+ hid_t s1_tid, s2_tid;
+ hid_t space_id, dset_id;
+ hsize_t dim = 1;
+ char *readbuf;
+ herr_t ret;
+
+ /* Initialize compound data */
+ HDmemset(&s1, 0, sizeof(s1_t)); /* To make purify happy */
+ s1.a = COMP_INT_VAL;
+ s1.c = COMP_DOUBLE_VAL;
+ s1.b = COMP_FLOAT_VAL;
+
+ /* Create compound datatypes using UTF-8 field name */
+ s1_tid = H5Tcreate(H5T_COMPOUND, sizeof(s1_t));
+ CHECK(s1_tid, FAIL, "H5Tcreate");
+ ret = H5Tinsert(s1_tid, string, HOFFSET(s1_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Check that the field name was stored correctly */
+ readbuf = H5Tget_member_name(s1_tid, 0);
+ ret = HDstrcmp(readbuf, string);
+ VERIFY(ret, 0, "strcmp");
+ H5free_memory(readbuf);
+
+ /* Add the other fields to the datatype */
+ ret = H5Tinsert(s1_tid, "c_name", HOFFSET(s1_t, c), H5T_NATIVE_DOUBLE);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(s1_tid, "b_name", HOFFSET(s1_t, b), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create second datatype, with only two fields. */
+ s2_tid = H5Tcreate(H5T_COMPOUND, sizeof(s2_t));
+ CHECK(s2_tid, FAIL, "H5Tcreate");
+ ret = H5Tinsert(s2_tid, "c_name", HOFFSET(s2_t, c), H5T_NATIVE_DOUBLE);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(s2_tid, string, HOFFSET(s2_t, a), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create the dataspace and dataset. */
+ space_id = H5Screate_simple(1, &dim, NULL);
+ CHECK(space_id, FAIL, "H5Screate_simple");
+ dset_id = H5Dcreate2(fid, DSET4_NAME, s1_tid, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ /* Write data to the dataset. */
+ ret = H5Dwrite(dset_id, s1_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &s1);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Ensure that data can be read back by field name into s2 struct */
+ ret = H5Dread(dset_id, s2_tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, &s2);
+ CHECK(ret, FAIL, "H5Dread");
+
+ VERIFY(s2.a, COMP_INT_VAL, "H5Dread");
+ VERIFY(s2.c, COMP_DOUBLE_VAL, "H5Dread");
+
+ /* Clean up */
+ ret = H5Tclose(s1_tid);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Tclose(s2_tid);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Sclose(space_id);
+ CHECK(ret, FAIL, "H5Sclose");
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+}
+
+/*
+ * test_enum
+ * Test that enumerated datatypes can have UTF-8 member names.
+ */
+void
+test_enum(hid_t H5_ATTR_UNUSED fid, const char *string)
+{
+ /* Define an enumerated type */
+ typedef enum { E1_RED, E1_GREEN, E1_BLUE, E1_WHITE } c_e1;
+ /* Variable declarations */
+ c_e1 val;
+ herr_t ret;
+ hid_t type_id;
+ char readbuf[MAX_STRING_LENGTH];
+
+ /* Create an enumerated datatype in HDF5 with a UTF-8 member name*/
+ type_id = H5Tcreate(H5T_ENUM, sizeof(c_e1));
+ CHECK(type_id, FAIL, "H5Tcreate");
+ val = E1_RED;
+ ret = H5Tenum_insert(type_id, "RED", &val);
+ CHECK(ret, FAIL, "H5Tenum_insert");
+ val = E1_GREEN;
+ ret = H5Tenum_insert(type_id, "GREEN", &val);
+ CHECK(ret, FAIL, "H5Tenum_insert");
+ val = E1_BLUE;
+ ret = H5Tenum_insert(type_id, "BLUE", &val);
+ CHECK(ret, FAIL, "H5Tenum_insert");
+ val = E1_WHITE;
+ ret = H5Tenum_insert(type_id, string, &val);
+ CHECK(ret, FAIL, "H5Tenum_insert");
+
+ /* Ensure that UTF-8 member name gives the right value and vice versa. */
+ ret = H5Tenum_valueof(type_id, string, &val);
+ CHECK(ret, FAIL, "H5Tenum_valueof");
+ VERIFY(val, E1_WHITE, "H5Tenum_valueof");
+ ret = H5Tenum_nameof(type_id, &val, readbuf, (size_t)MAX_STRING_LENGTH);
+ CHECK(ret, FAIL, "H5Tenum_nameof");
+ ret = HDstrcmp(readbuf, string);
+ VERIFY(ret, 0, "strcmp");
+
+ /* Close the datatype */
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+}
+
+/*
+ * test_opaque
+ * Test comments on opaque datatypes
+ */
+void
+test_opaque(hid_t H5_ATTR_UNUSED fid, const char *string)
+{
+ hid_t type_id;
+ char *read_buf;
+ herr_t ret;
+
+ /* Create an opaque type and give it a UTF-8 tag */
+ type_id = H5Tcreate(H5T_OPAQUE, (size_t)4);
+ CHECK(type_id, FAIL, "H5Tcreate");
+ ret = H5Tset_tag(type_id, string);
+ CHECK(ret, FAIL, "H5Tset_tag");
+
+ /* Read the tag back. */
+ read_buf = H5Tget_tag(type_id);
+ ret = HDstrcmp(read_buf, string);
+ VERIFY(ret, 0, "H5Tget_tag");
+ H5free_memory(read_buf);
+
+ ret = H5Tclose(type_id);
+ CHECK(ret, FAIL, "H5Tclose");
+}
+
+/*********************/
+/* Utility functions */
+/*********************/
+
+/* mkstr
+ * Borrwed from dtypes.c.
+ * Creates a new string data type. Used in string padding tests */
+static hid_t
+mkstr(size_t len, H5T_str_t strpad)
+{
+ hid_t t;
+ if ((t = H5Tcopy(H5T_C_S1)) < 0)
+ return -1;
+ if (H5Tset_size(t, len) < 0)
+ return -1;
+ if (H5Tset_strpad(t, strpad) < 0)
+ return -1;
+ return t;
+}
+
+/* write_char
+ * Append a unicode code point c to test_string in UTF-8 encoding.
+ * Return the new end of the string.
+ */
+unsigned int
+write_char(unsigned int c, char *test_string, unsigned int cur_pos)
+{
+ if (c < 0x80) {
+ test_string[cur_pos] = (char)c;
+ cur_pos++;
+ }
+ else if (c < 0x800) {
+ test_string[cur_pos] = (char)(0xC0 | c >> 6);
+ test_string[cur_pos + 1] = (char)(0x80 | (c & 0x3F));
+ cur_pos += 2;
+ }
+ else if (c < 0x10000) {
+ test_string[cur_pos] = (char)(0xE0 | c >> 12);
+ test_string[cur_pos + 1] = (char)(0x80 | (c >> 6 & 0x3F));
+ test_string[cur_pos + 2] = (char)(0x80 | (c & 0x3F));
+ cur_pos += 3;
+ }
+ else if (c < 0x200000) {
+ test_string[cur_pos] = (char)(0xF0 | c >> 18);
+ test_string[cur_pos + 1] = (char)(0x80 | (c >> 12 & 0x3F));
+ test_string[cur_pos + 2] = (char)(0x80 | (c >> 6 & 0x3F));
+ test_string[cur_pos + 3] = (char)(0x80 | (c & 0x3F));
+ cur_pos += 4;
+ }
+
+ return cur_pos;
+}
+
+/* dump_string
+ * Print a string both as text (which will look like garbage) and as hex.
+ * The text display is not guaranteed to be accurate--certain characters
+ * could confuse printf (e.g., '\n'). */
+void
+dump_string(const char *string)
+{
+ size_t length;
+ size_t x;
+
+ HDprintf("The string was:\n %s", string);
+ HDprintf("Or in hex:\n");
+
+ length = HDstrlen(string);
+
+ for (x = 0; x < length; x++)
+ HDprintf("%x ", string[x] & (0x000000FF));
+
+ HDprintf("\n");
+}
+
+/* Main test.
+ * Create a string of random Unicode characters, then run each test with
+ * that string.
+ */
+void
+test_unicode(void)
+{
+ char test_string[MAX_STRING_LENGTH];
+ unsigned int cur_pos = 0; /* Current position in test_string */
+ unsigned int unicode_point; /* Unicode code point for a single character */
+ hid_t fid; /* ID of file */
+ int x; /* Temporary variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing UTF-8 Encoding\n"));
+
+ /* Create a random string with length NUM_CHARS */
+ HDsrandom((unsigned)HDtime(NULL));
+
+ HDmemset(test_string, 0, sizeof(test_string));
+ for (x = 0; x < NUM_CHARS; x++) {
+ /* We need to avoid unprintable characters (codes 0-31) and the
+ * . and / characters, since they aren't allowed in path names.
+ */
+ unicode_point = (unsigned)(HDrandom() % (MAX_CODE_POINT - 32)) + 32;
+ if (unicode_point != 46 && unicode_point != 47)
+ cur_pos = write_char(unicode_point, test_string, cur_pos);
+ }
+
+ /* Avoid unlikely case of the null string */
+ if (cur_pos == 0) {
+ test_string[cur_pos] = 'Q';
+ cur_pos++;
+ }
+ test_string[cur_pos] = '\0';
+
+ /* Create file */
+ fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ test_fl_string(fid, test_string);
+ test_strpad(fid, "abcdefgh");
+ test_strpad(fid, test_string);
+ test_vl_string(fid, test_string);
+ test_objnames(fid, test_string);
+ test_attrname(fid, test_string);
+ test_compound(fid, test_string);
+ test_enum(fid, test_string);
+ test_opaque(fid, test_string);
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* This function could be useful in debugging if certain strings
+ * create errors.
+ */
+#ifdef DEBUG
+ dump_string(test_string);
+#endif /* DEBUG */
+}
+
+/* cleanup_unicode(void)
+ * Delete the file this test created.
+ */
+void
+cleanup_unicode(void)
+{
+ H5Fdelete(FILENAME, H5P_DEFAULT);
+}
diff --git a/test/API/tvlstr.c b/test/API/tvlstr.c
new file mode 100644
index 0000000..b05ff66
--- /dev/null
+++ b/test/API/tvlstr.c
@@ -0,0 +1,1013 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tvlstr
+ *
+ * Test the Variable-Length String functionality
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+
+#define DATAFILE "tvlstr.h5"
+#define DATAFILE2 "tvlstr2.h5"
+#define DATAFILE3 "sel2el.h5"
+
+#define DATASET "1Darray"
+
+/* 1-D dataset with fixed dimensions */
+#define SPACE1_RANK 1
+#define SPACE1_DIM1 4
+#define NUMP 4
+
+#define VLSTR_TYPE "vl_string_type"
+
+/* Definitions for the VL re-writing test */
+#define REWRITE_NDATASETS 32
+
+/* String for testing attributes */
+static const char *string_att = "This is the string for the attribute";
+static char *string_att_write = NULL;
+
+void *test_vlstr_alloc_custom(size_t size, void *info);
+void test_vlstr_free_custom(void *mem, void *info);
+
+/****************************************************************
+**
+** test_vlstr_alloc_custom(): Test VL datatype custom memory
+** allocation routines. This routine just uses malloc to
+** allocate the memory and increments the amount of memory
+** allocated.
+**
+****************************************************************/
+void *
+test_vlstr_alloc_custom(size_t size, void *info)
+{
+ void *ret_value = NULL; /* Pointer to return */
+ size_t *mem_used = (size_t *)info; /* Get the pointer to the memory used */
+ size_t extra; /* Extra space needed */
+
+ /*
+ * This weird contortion is required on the DEC Alpha to keep the
+ * alignment correct - QAK
+ */
+ extra = MAX(sizeof(void *), sizeof(size_t));
+
+ if ((ret_value = HDmalloc(extra + size)) != NULL) {
+ *(size_t *)ret_value = size;
+ *mem_used += size;
+ } /* end if */
+ ret_value = ((unsigned char *)ret_value) + extra;
+ return (ret_value);
+}
+
+/****************************************************************
+**
+** test_vlstr_free_custom(): Test VL datatype custom memory
+** allocation routines. This routine just uses free to
+** release the memory and decrements the amount of memory
+** allocated.
+**
+****************************************************************/
+void
+test_vlstr_free_custom(void *_mem, void *info)
+{
+ unsigned char *mem;
+ size_t *mem_used = (size_t *)info; /* Get the pointer to the memory used */
+ size_t extra; /* Extra space needed */
+
+ /*
+ * This weird contortion is required on the DEC Alpha to keep the
+ * alignment correct - QAK
+ */
+ extra = MAX(sizeof(void *), sizeof(size_t));
+
+ if (_mem != NULL) {
+ mem = ((unsigned char *)_mem) - extra;
+ *mem_used -= *(size_t *)((void *)mem);
+ HDfree(mem);
+ } /* end if */
+}
+
+/****************************************************************
+**
+** test_vlstrings_basic(): Test basic VL string code.
+** Tests simple VL string I/O
+**
+****************************************************************/
+static void
+test_vlstrings_basic(void)
+{
+ /* Information to write */
+ const char *wdata[SPACE1_DIM1] = {
+ "Four score and seven years ago our forefathers brought forth on this continent a new nation,",
+ "conceived in liberty and dedicated to the proposition that all men are created equal.",
+ "Now we are engaged in a great civil war,",
+ "testing whether that nation or any nation so conceived and so dedicated can long endure."};
+
+ char *rdata[SPACE1_DIM1]; /* Information read in */
+ char *wdata2;
+ hid_t dataspace, dataset2;
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i; /* counting variable */
+ size_t str_used; /* String data in memory */
+ size_t mem_used = 0; /* Memory used during allocation */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic VL String Functionality\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcopy(H5T_C_S1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(tid1, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ dataspace = H5Screate(H5S_SCALAR);
+
+ dataset2 = H5Dcreate2(fid1, "Dataset2", tid1, dataspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ wdata2 = (char *)HDcalloc((size_t)65534, sizeof(char));
+ HDmemset(wdata2, 'A', (size_t)65533);
+
+ ret = H5Dwrite(dataset2, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, &wdata2);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ H5Sclose(dataspace);
+ H5Dclose(dataset2);
+ HDfree(wdata2);
+
+ /* Change to the custom memory allocation routines for reading VL string */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vlstr_alloc_custom, &mem_used, test_vlstr_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* Count the actual number of bytes used by the strings */
+ for (i = 0, str_used = 0; i < SPACE1_DIM1; i++)
+ str_used += HDstrlen(wdata[i]) + 1;
+
+ /* Compare against the strings actually written */
+ VERIFY(size, (hsize_t)str_used, "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ VERIFY(mem_used, str_used, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) {
+ TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i,
+ (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i]));
+ continue;
+ } /* end if */
+ if (HDstrcmp(wdata[i], rdata[i]) != 0) {
+ TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i],
+ (int)i, rdata[i]);
+ continue;
+ } /* end if */
+ } /* end for */
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(tid1, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_vlstrings_basic() */
+
+/****************************************************************
+**
+** test_vlstrings_special(): Test VL string code for special
+** string cases, nil and zero-sized.
+**
+****************************************************************/
+static void
+test_vlstrings_special(void)
+{
+ const char *wdata[SPACE1_DIM1] = {"", "two", "three", "\0"};
+ const char *wdata2[SPACE1_DIM1] = {NULL, NULL, NULL, NULL};
+ char *rdata[SPACE1_DIM1]; /* Information read in */
+ char *fill; /* Fill value */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hid_t dcpl; /* Dataset creation property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ unsigned i; /* counting variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Special VL Strings\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcopy(H5T_C_S1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(tid1, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset3", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Read from dataset before writing data */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ if (rdata[i] != NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]);
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) {
+ TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i,
+ (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i]));
+ continue;
+ } /* end if */
+ if ((wdata[i] == NULL && rdata[i] != NULL) || (rdata[i] == NULL && wdata[i] != NULL)) {
+ TestErrPrintf("VL data values don't match!\n");
+ continue;
+ } /* end if */
+ if (HDstrcmp(wdata[i], rdata[i]) != 0) {
+ TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i],
+ (int)i, rdata[i]);
+ continue;
+ } /* end if */
+ } /* end for */
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create another dataset to test nil strings */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set the fill value for the second dataset */
+ fill = NULL;
+ ret = H5Pset_fill_value(dcpl, tid1, &fill);
+ CHECK(ret, FAIL, "H5Pset_fill_value");
+
+ dataset = H5Dcreate2(fid1, "Dataset4", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Read from dataset before writing data */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ if (rdata[i] != NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]);
+
+ /* Try to write nil strings to disk. */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read nil strings back from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ if (rdata[i] != NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d]=%s\n", (int)i, rdata[i]);
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+
+/****************************************************************
+**
+** test_vlstring_type(): Test VL string type.
+** Tests if VL string is treated as string.
+**
+****************************************************************/
+static void
+test_vlstring_type(void)
+{
+ hid_t fid; /* HDF5 File IDs */
+ hid_t tid_vlstr;
+ H5T_cset_t cset;
+ H5T_str_t pad;
+ htri_t vl_str; /* Whether string is VL */
+ herr_t ret;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing VL String type\n"));
+
+ /* Open file */
+ fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Create a datatype to refer to */
+ tid_vlstr = H5Tcopy(H5T_C_S1);
+ CHECK(tid_vlstr, FAIL, "H5Tcopy");
+
+ /* Change padding and verify it */
+ ret = H5Tset_strpad(tid_vlstr, H5T_STR_NULLPAD);
+ CHECK(ret, FAIL, "H5Tset_strpad");
+ pad = H5Tget_strpad(tid_vlstr);
+ VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad");
+
+ /* Convert to variable-length string */
+ ret = H5Tset_size(tid_vlstr, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Check if datatype is VL string */
+ ret = H5Tget_class(tid_vlstr);
+ VERIFY(ret, H5T_STRING, "H5Tget_class");
+ ret = H5Tis_variable_str(tid_vlstr);
+ VERIFY(ret, TRUE, "H5Tis_variable_str");
+
+ /* Verify that the class detects as a string */
+ vl_str = H5Tdetect_class(tid_vlstr, H5T_STRING);
+ CHECK(vl_str, FAIL, "H5Tdetect_class");
+ VERIFY(vl_str, TRUE, "H5Tdetect_class");
+
+ /* Check default character set and padding */
+ cset = H5Tget_cset(tid_vlstr);
+ VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset");
+ pad = H5Tget_strpad(tid_vlstr);
+ VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad");
+
+ /* Commit variable-length string datatype to storage */
+ ret = H5Tcommit2(fid, VLSTR_TYPE, tid_vlstr, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Tcommit2");
+
+ /* Close datatype */
+ ret = H5Tclose(tid_vlstr);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ tid_vlstr = H5Topen2(fid, VLSTR_TYPE, H5P_DEFAULT);
+ CHECK(tid_vlstr, FAIL, "H5Topen2");
+
+ ret = H5Tclose(tid_vlstr);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ fid = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Open the variable-length string datatype just created */
+ tid_vlstr = H5Topen2(fid, VLSTR_TYPE, H5P_DEFAULT);
+ CHECK(tid_vlstr, FAIL, "H5Topen2");
+
+ /* Verify character set and padding */
+ cset = H5Tget_cset(tid_vlstr);
+ VERIFY(cset, H5T_CSET_ASCII, "H5Tget_cset");
+ pad = H5Tget_strpad(tid_vlstr);
+ VERIFY(pad, H5T_STR_NULLPAD, "H5Tget_strpad");
+
+ /* Close datatype and file */
+ ret = H5Tclose(tid_vlstr);
+ CHECK(ret, FAIL, "H5Tclose");
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_vlstring_type() */
+
+/****************************************************************
+**
+** test_compact_vlstring(): Test code for storing VL strings in
+** compact datasets.
+**
+****************************************************************/
+static void
+test_compact_vlstring(void)
+{
+ const char *wdata[SPACE1_DIM1] = {"one", "two", "three", "four"};
+ char *rdata[SPACE1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hid_t plist; /* Dataset creation property list */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ unsigned i; /* counting variable */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing VL Strings in compact dataset\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tcopy(H5T_C_S1);
+ CHECK(tid1, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(tid1, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ plist = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(plist, FAIL, "H5Pcreate");
+
+ ret = H5Pset_layout(plist, H5D_COMPACT);
+ CHECK(ret, FAIL, "H5Pset_layout");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset5", tid1, sid1, H5P_DEFAULT, plist, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (HDstrlen(wdata[i]) != HDstrlen(rdata[i])) {
+ TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n", (int)i,
+ (int)HDstrlen(wdata[i]), (int)i, (int)HDstrlen(rdata[i]));
+ continue;
+ } /* end if */
+ if (HDstrcmp(wdata[i], rdata[i]) != 0) {
+ TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)i, wdata[i],
+ (int)i, rdata[i]);
+ continue;
+ } /* end if */
+ } /* end for */
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset create property list */
+ ret = H5Pclose(plist);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /*test_compact_vlstrings*/
+
+/****************************************************************
+**
+** test_write_vl_string_attribute(): Test basic VL string code.
+** Tests writing VL strings as attributes
+**
+****************************************************************/
+static void
+test_write_vl_string_attribute(void)
+{
+ hid_t file, root, dataspace, att;
+ hid_t type;
+ herr_t ret;
+ char *string_att_check = NULL;
+
+ /* Open the file */
+ file = H5Fopen(DATAFILE, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fopen");
+
+ /* Create a datatype to refer to. */
+ type = H5Tcopy(H5T_C_S1);
+ CHECK(type, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(type, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ root = H5Gopen2(file, "/", H5P_DEFAULT);
+ CHECK(root, FAIL, "H5Gopen2");
+
+ dataspace = H5Screate(H5S_SCALAR);
+ CHECK(dataspace, FAIL, "H5Screate");
+
+ /* Test creating a "normal" sized string attribute */
+ att = H5Acreate2(root, "test_scalar", type, dataspace, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(att, FAIL, "H5Acreate2");
+
+ ret = H5Awrite(att, type, &string_att);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ ret = H5Aread(att, type, &string_att_check);
+ CHECK(ret, FAIL, "H5Aread");
+
+ if (HDstrcmp(string_att_check, string_att) != 0)
+ TestErrPrintf("VL string attributes don't match!, string_att=%s, string_att_check=%s\n", string_att,
+ string_att_check);
+
+ H5free_memory(string_att_check);
+ string_att_check = NULL;
+
+ ret = H5Aclose(att);
+ CHECK(ret, FAIL, "HAclose");
+
+ /* Test creating a "large" sized string attribute */
+ att = H5Acreate2(root, "test_scalar_large", type, dataspace, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(att, FAIL, "H5Acreate2");
+
+ string_att_write = (char *)HDcalloc((size_t)8192, sizeof(char));
+ HDmemset(string_att_write, 'A', (size_t)8191);
+
+ ret = H5Awrite(att, type, &string_att_write);
+ CHECK(ret, FAIL, "H5Awrite");
+
+ ret = H5Aread(att, type, &string_att_check);
+ CHECK(ret, FAIL, "H5Aread");
+
+ if (HDstrcmp(string_att_check, string_att_write) != 0)
+ TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n",
+ string_att_write, string_att_check);
+
+ H5free_memory(string_att_check);
+ string_att_check = NULL;
+
+ /* The attribute string written is freed below, in the test_read_vl_string_attribute() test */
+ /* HDfree(string_att_write); */
+
+ ret = H5Aclose(att);
+ CHECK(ret, FAIL, "HAclose");
+
+ ret = H5Gclose(root);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Sclose(dataspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+
+/****************************************************************
+**
+** test_read_vl_string_attribute(): Test basic VL string code.
+** Tests reading VL strings from attributes
+**
+****************************************************************/
+static void
+test_read_vl_string_attribute(void)
+{
+ hid_t file, root, att;
+ hid_t type;
+ herr_t ret;
+ char *string_att_check = NULL;
+
+ /* Open file */
+ file = H5Fopen(DATAFILE, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fopen");
+
+ /* Create a datatype to refer to. */
+ type = H5Tcopy(H5T_C_S1);
+ CHECK(type, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(type, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ root = H5Gopen2(file, "/", H5P_DEFAULT);
+ CHECK(root, FAIL, "H5Gopen2");
+
+ /* Test reading "normal" sized string attribute */
+ att = H5Aopen(root, "test_scalar", H5P_DEFAULT);
+ CHECK(att, FAIL, "H5Aopen");
+
+ ret = H5Aread(att, type, &string_att_check);
+ CHECK(ret, FAIL, "H5Aread");
+
+ if (HDstrcmp(string_att_check, string_att) != 0)
+ TestErrPrintf("VL string attributes don't match!, string_att=%s, string_att_check=%s\n", string_att,
+ string_att_check);
+
+ H5free_memory(string_att_check);
+ string_att_check = NULL;
+
+ ret = H5Aclose(att);
+ CHECK(ret, FAIL, "HAclose");
+
+ /* Test reading "large" sized string attribute */
+ att = H5Aopen(root, "test_scalar_large", H5P_DEFAULT);
+ CHECK(att, FAIL, "H5Aopen");
+
+ if (string_att_write) {
+ ret = H5Aread(att, type, &string_att_check);
+ CHECK(ret, FAIL, "H5Aread");
+
+ if (HDstrcmp(string_att_check, string_att_write) != 0)
+ TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n",
+ string_att_write, string_att_check);
+
+ H5free_memory(string_att_check);
+ string_att_check = NULL;
+ }
+
+ /* Free string allocated in test_write_vl_string_attribute */
+ if (string_att_write)
+ HDfree(string_att_write);
+
+ ret = H5Aclose(att);
+ CHECK(ret, FAIL, "HAclose");
+
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Gclose(root);
+ CHECK(ret, FAIL, "H5Gclose");
+
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+
+/* Helper routine for test_vl_rewrite() */
+static void
+write_scalar_dset(hid_t file, hid_t type, hid_t space, char *name, char *data)
+{
+ hid_t dset;
+ herr_t ret;
+
+ dset = H5Dcreate2(file, name, type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ ret = H5Dwrite(dset, type, space, space, H5P_DEFAULT, &data);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+}
+
+/* Helper routine for test_vl_rewrite() */
+static void
+read_scalar_dset(hid_t file, hid_t type, hid_t space, char *name, char *data)
+{
+ hid_t dset;
+ herr_t ret;
+ char *data_read;
+
+ dset = H5Dopen2(file, name, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dopen2");
+
+ ret = H5Dread(dset, type, space, space, H5P_DEFAULT, &data_read);
+ CHECK(ret, FAIL, "H5Dread");
+
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ if (HDstrcmp(data, data_read) != 0)
+ TestErrPrintf("Expected %s for dataset %s but read %s\n", data, name, data_read);
+
+ ret = H5Treclaim(type, space, H5P_DEFAULT, &data_read);
+ CHECK(ret, FAIL, "H5Treclaim");
+}
+
+/****************************************************************
+**
+** test_vl_rewrite(): Test basic VL string code.
+** Tests I/O on VL strings when lots of objects in the file
+** have been linked/unlinked.
+**
+****************************************************************/
+static void
+test_vl_rewrite(void)
+{
+ hid_t file1, file2; /* File IDs */
+ hid_t type; /* VL string datatype ID */
+ hid_t space; /* Scalar dataspace */
+ char name[256]; /* Buffer for names & data */
+ int i; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ /* Create the VL string datatype */
+ type = H5Tcopy(H5T_C_S1);
+ CHECK(type, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(type, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Create the scalar dataspace */
+ space = H5Screate(H5S_SCALAR);
+ CHECK(space, FAIL, "H5Screate");
+
+ /* Open the files */
+ file1 = H5Fcreate(DATAFILE, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1, FAIL, "H5Fcreate");
+
+ file2 = H5Fcreate(DATAFILE2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1, FAIL, "H5Fcreate");
+
+ /* Create in file 1 */
+ for (i = 0; i < REWRITE_NDATASETS; i++) {
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
+ write_scalar_dset(file1, type, space, name, name);
+ }
+
+ /* Effectively copy data from file 1 to 2 */
+ for (i = 0; i < REWRITE_NDATASETS; i++) {
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
+ read_scalar_dset(file1, type, space, name, name);
+ write_scalar_dset(file2, type, space, name, name);
+ }
+
+ /* Read back from file 2 */
+ for (i = 0; i < REWRITE_NDATASETS; i++) {
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
+ read_scalar_dset(file2, type, space, name, name);
+ } /* end for */
+
+ /* Remove from file 2. */
+ for (i = 0; i < REWRITE_NDATASETS; i++) {
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
+ ret = H5Ldelete(file2, name, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end for */
+
+ /* Effectively copy from file 1 to file 2 */
+ for (i = 0; i < REWRITE_NDATASETS; i++) {
+ HDsnprintf(name, sizeof(name), "/set_%d", i);
+ read_scalar_dset(file1, type, space, name, name);
+ write_scalar_dset(file2, type, space, name, name);
+ } /* end for */
+
+ /* Close everything */
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Sclose(space);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Fclose(file2);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_vl_rewrite() */
+
+/****************************************************************
+ **
+ ** test_write_same_element():
+ ** Tests writing to the same element of VL string using
+ ** H5Sselect_element.
+ **
+ ****************************************************************/
+static void
+test_write_same_element(void)
+{
+#ifndef NO_WRITE_SAME_ELEMENT_TWICE
+ hid_t file1, dataset1;
+ hid_t mspace, fspace, dtype;
+ hsize_t fdim[] = {SPACE1_DIM1};
+ const char *wdata[SPACE1_DIM1] = {"Parting", "is such a", "sweet", "sorrow."};
+ const char *val[SPACE1_DIM1] = {"But", "reuniting", "is a", "great joy"};
+ hsize_t marray[] = {NUMP};
+ hsize_t coord[SPACE1_RANK][NUMP];
+ herr_t ret;
+#endif
+
+ MESSAGE(
+ 5,
+ ("Testing writing to same element of VL string dataset twice - SKIPPED for now due to no support\n"));
+#ifndef NO_WRITE_SAME_ELEMENT_TWICE
+ file1 = H5Fcreate(DATAFILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file1, FAIL, "H5Fcreate");
+
+ dtype = H5Tcopy(H5T_C_S1);
+ CHECK(dtype, FAIL, "H5Tcopy");
+
+ ret = H5Tset_size(dtype, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ fspace = H5Screate_simple(SPACE1_RANK, fdim, NULL);
+ CHECK(fspace, FAIL, "H5Screate_simple");
+
+ dataset1 = H5Dcreate2(file1, DATASET, dtype, fspace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset1, FAIL, "H5Dcreate");
+
+ ret = H5Dwrite(dataset1, dtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dclose(dataset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Sclose(fspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /*
+ * Open the file. Select the same points, write values to those point locations.
+ */
+ file1 = H5Fopen(DATAFILE3, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file1, FAIL, "H5Fopen");
+
+ dataset1 = H5Dopen2(file1, DATASET, H5P_DEFAULT);
+ CHECK(dataset1, FAIL, "H5Dopen");
+
+ fspace = H5Dget_space(dataset1);
+ CHECK(fspace, FAIL, "H5Dget_space");
+
+ dtype = H5Dget_type(dataset1);
+ CHECK(dtype, FAIL, "H5Dget_type");
+
+ mspace = H5Screate_simple(1, marray, NULL);
+ CHECK(mspace, FAIL, "H5Screate_simple");
+
+ coord[0][0] = 0;
+ coord[0][1] = 2;
+ coord[0][2] = 2;
+ coord[0][3] = 0;
+
+ ret = H5Sselect_elements(fspace, H5S_SELECT_SET, NUMP, (const hsize_t *)&coord);
+ CHECK(ret, FAIL, "H5Sselect_elements");
+
+ ret = H5Dwrite(dataset1, dtype, mspace, fspace, H5P_DEFAULT, val);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Tclose(dtype);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ ret = H5Dclose(dataset1);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(fspace);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ ret = H5Sclose(mspace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Fclose(file1);
+ CHECK(ret, FAIL, "H5Fclose");
+#endif
+} /* test_write_same_element */
+
+/****************************************************************
+**
+** test_vlstrings(): Main VL string testing routine.
+**
+****************************************************************/
+void
+test_vlstrings(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Variable-Length Strings\n"));
+
+ /* These tests use the same file */
+ /* Test basic VL string datatype */
+ test_vlstrings_basic();
+ test_vlstrings_special();
+ test_vlstring_type();
+ test_compact_vlstring();
+
+ /* Test using VL strings in attributes */
+ test_write_vl_string_attribute();
+ test_read_vl_string_attribute();
+
+ /* Test writing VL datasets in files with lots of unlinking */
+ test_vl_rewrite();
+ /* Test writing to the same element more than once using H5Sselect_elements */
+ test_write_same_element();
+} /* test_vlstrings() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_vlstrings
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Quincey Koziol
+ * September 10, 1999
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_vlstrings(void)
+{
+ H5Fdelete(DATAFILE, H5P_DEFAULT);
+ H5Fdelete(DATAFILE2, H5P_DEFAULT);
+ H5Fdelete(DATAFILE3, H5P_DEFAULT);
+}
diff --git a/test/API/tvltypes.c b/test/API/tvltypes.c
new file mode 100644
index 0000000..eca534b
--- /dev/null
+++ b/test/API/tvltypes.c
@@ -0,0 +1,3268 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+ *
+ * Test program: tvltypes
+ *
+ * Test the Variable-Length Datatype functionality
+ *
+ *************************************************************/
+
+#include "testhdf5.h"
+
+/* #include "H5Dprivate.h" */
+
+#define FILENAME "tvltypes.h5"
+
+/* 1-D dataset with fixed dimensions */
+#define SPACE1_RANK 1
+#define SPACE1_DIM1 4
+
+/* 1-D dataset with fixed dimensions */
+#define SPACE3_RANK 1
+#define SPACE3_DIM1 128
+#define L1_INCM 16
+#define L2_INCM 8
+#define L3_INCM 3
+
+/* Default temporary buffer size - Pulled from H5Dprivate.h */
+#define H5D_TEMP_BUF_SIZE (1024 * 1024)
+
+/* 1-D dataset with fixed dimensions */
+#define SPACE4_RANK 1
+#define SPACE4_DIM_SMALL 128
+#define SPACE4_DIM_LARGE (H5D_TEMP_BUF_SIZE / 64)
+
+void *test_vltypes_alloc_custom(size_t size, void *info);
+void test_vltypes_free_custom(void *mem, void *info);
+
+/****************************************************************
+**
+** test_vltypes_alloc_custom(): Test VL datatype custom memory
+** allocation routines. This routine just uses malloc to
+** allocate the memory and increments the amount of memory
+** allocated.
+**
+****************************************************************/
+void *
+test_vltypes_alloc_custom(size_t size, void *mem_used)
+{
+ void *ret_value; /* Pointer to return */
+ const size_t extra = MAX(sizeof(void *), sizeof(size_t)); /* Extra space needed */
+ /* (This weird contortion is required on the
+ * DEC Alpha to keep the alignment correct - QAK)
+ */
+
+ if ((ret_value = HDmalloc(extra + size)) != NULL) {
+ *(size_t *)ret_value = size;
+ *(size_t *)mem_used += size;
+ } /* end if */
+
+ ret_value = ((unsigned char *)ret_value) + extra;
+
+ return (ret_value);
+}
+
+/****************************************************************
+**
+** test_vltypes_free_custom(): Test VL datatype custom memory
+** allocation routines. This routine just uses free to
+** release the memory and decrements the amount of memory
+** allocated.
+**
+****************************************************************/
+void
+test_vltypes_free_custom(void *_mem, void *mem_used)
+{
+ if (_mem) {
+ const size_t extra = MAX(sizeof(void *), sizeof(size_t)); /* Extra space needed */
+ /* (This weird contortion is required
+ * on the DEC Alpha to keep the
+ * alignment correct - QAK)
+ */
+ unsigned char *mem = ((unsigned char *)_mem) - extra; /* Pointer to actual block allocated */
+
+ *(size_t *)mem_used -= *(size_t *)((void *)mem);
+ HDfree(mem);
+ } /* end if */
+}
+
+/****************************************************************
+**
+** test_vltypes_data_create(): Dataset of VL is supposed to
+** fail when fill value is never written to dataset.
+**
+****************************************************************/
+static void
+test_vltypes_dataset_create(void)
+{
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dcpl; /* Dataset Property list */
+ hid_t dataset; /* Dataset ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Dataset of VL Datatype Functionality\n"));
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tvlen_create(H5T_NATIVE_UINT);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create dataset property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set fill value writing time to be NEVER */
+ ret = H5Pset_fill_time(dcpl, H5D_FILL_TIME_NEVER);
+ CHECK(ret, FAIL, "H5Pset_fill_time");
+
+ /* Create a dataset, supposed to fail */
+ H5E_BEGIN_TRY
+ {
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ }
+ H5E_END_TRY;
+ VERIFY(dataset, FAIL, "H5Dcreate2");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+}
+
+/****************************************************************
+**
+** test_vltypes_funcs(): Test some type functions that are and
+** aren't supposed to work with VL type.
+**
+****************************************************************/
+static void
+test_vltypes_funcs(void)
+{
+ hid_t type; /* Datatype ID */
+ size_t size;
+ H5T_pad_t inpad;
+ H5T_norm_t norm;
+ H5T_cset_t cset;
+ H5T_str_t strpad;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing some type functions for VL\n"));
+
+ /* Create a datatype to refer to */
+ type = H5Tvlen_create(H5T_IEEE_F32BE);
+ CHECK(type, FAIL, "H5Tvlen_create");
+
+ size = H5Tget_precision(type);
+ CHECK(size, 0, "H5Tget_precision");
+
+ size = H5Tget_size(type);
+ CHECK(size, 0, "H5Tget_size");
+
+ size = H5Tget_ebias(type);
+ CHECK(size, 0, "H5Tget_ebias");
+
+ ret = H5Tset_pad(type, H5T_PAD_ZERO, H5T_PAD_ONE);
+ CHECK(ret, FAIL, "H5Tset_pad");
+
+ inpad = H5Tget_inpad(type);
+ CHECK(inpad, FAIL, "H5Tget_inpad");
+
+ norm = H5Tget_norm(type);
+ CHECK(norm, FAIL, "H5Tget_norm");
+
+ ret = H5Tset_offset(type, (size_t)16);
+ CHECK(ret, FAIL, "H5Tset_offset");
+
+ H5E_BEGIN_TRY
+ {
+ cset = H5Tget_cset(type);
+ }
+ H5E_END_TRY;
+ VERIFY(cset, FAIL, "H5Tget_cset");
+
+ H5E_BEGIN_TRY
+ {
+ strpad = H5Tget_strpad(type);
+ }
+ H5E_END_TRY;
+ VERIFY(strpad, FAIL, "H5Tget_strpad");
+
+ /* Close datatype */
+ ret = H5Tclose(type);
+ CHECK(ret, FAIL, "H5Tclose");
+}
+
+/****************************************************************
+**
+** test_vltypes_vlen_atomic(): Test basic VL datatype code.
+** Tests VL datatypes of atomic datatypes
+**
+****************************************************************/
+static void
+test_vltypes_vlen_atomic(void)
+{
+ hvl_t wdata[SPACE1_DIM1]; /* Information to write */
+ hvl_t wdata2[SPACE1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1]; /* Information read in */
+ hvl_t fill; /* Fill value */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t sid2; /* ID of bad dataspace (no extent set) */
+ hid_t tid1; /* Datatype ID */
+ hid_t dcpl_pid; /* Dataset creation property list ID */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Atomic VL Datatype Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].p = HDmalloc((i + 1) * sizeof(unsigned int));
+ wdata[i].len = i + 1;
+ for (j = 0; j < (i + 1); j++)
+ ((unsigned int *)wdata[i].p)[j] = i * 10 + j;
+
+ wdata2[i].p = NULL;
+ wdata2[i].len = 0;
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tvlen_create(H5T_NATIVE_UINT);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Read from dataset before writing data */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ if (rdata[i].len != 0 || rdata[i].p != NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i,
+ (unsigned)rdata[i].len, (int)i, rdata[i].p);
+
+ /* Write "nil" data to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read from dataset with "nil" data */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ if (rdata[i].len != 0 || rdata[i].p != NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i,
+ (unsigned)rdata[i].len, (int)i, rdata[i].p);
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create second dataset, with fill value */
+ dcpl_pid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl_pid, FAIL, "H5Pcreate");
+
+ /* Set the fill value for the second dataset */
+ fill.p = NULL;
+ fill.len = 0;
+ ret = H5Pset_fill_value(dcpl_pid, tid1, &fill);
+ CHECK(ret, FAIL, "H5Pset_fill_value");
+
+ /* Create a second dataset */
+ dataset = H5Dcreate2(fid1, "Dataset2", tid1, sid1, H5P_DEFAULT, dcpl_pid, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Read from dataset before writing data */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ if (rdata[i].len != 0 || rdata[i].p != NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i,
+ (unsigned)rdata[i].len, (int)i, rdata[i].p);
+
+ /* Write "nil" data to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata2);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read from dataset with "nil" data */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ if (rdata[i].len != 0 || rdata[i].p != NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d].len=%u, rdata[%d].p=%p\n", (int)i,
+ (unsigned)rdata[i].len, (int)i, rdata[i].p);
+
+ /* Write data to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open the file for data checking */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Get datatype for dataset */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].len != rdata[i].len) {
+ TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__,
+ (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len);
+ continue;
+ } /* end if */
+ for (j = 0; j < rdata[i].len; j++) {
+ if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i,
+ (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j,
+ (int)((unsigned int *)rdata[i].p)[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(tid1, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Open second dataset */
+ dataset = H5Dopen2(fid1, "Dataset2", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Get datatype for dataset */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Create a "bad" dataspace with no extent set */
+ sid2 = H5Screate(H5S_SIMPLE);
+ CHECK(sid2, FAIL, "H5Screate");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size");
+
+ /* Try to call H5Dvlen_get_buf with bad dataspace */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid2, &size);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].len != rdata[i].len) {
+ TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__,
+ (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len);
+ continue;
+ } /* end if */
+ for (j = 0; j < rdata[i].len; j++) {
+ if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i,
+ (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j,
+ (int)((unsigned int *)rdata[i].p)[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Try to reclaim read data using "bad" dataspace with no extent
+ * Should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Treclaim(tid1, sid2, xfer_pid, rdata);
+ }
+ H5E_END_TRY
+ VERIFY(ret, FAIL, "H5Treclaim");
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(tid1, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_vltypes_vlen_atomic() */
+
+/****************************************************************
+**
+** rewrite_vltypes_vlen_atomic(): check memory leak for basic VL datatype.
+** Check memory leak for VL datatypes of atomic datatypes
+**
+****************************************************************/
+static void
+rewrite_vltypes_vlen_atomic(void)
+{
+ hvl_t wdata[SPACE1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1; /* Datatype ID */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ unsigned increment = 4;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Check Memory Leak for Basic Atomic VL Datatype Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].p = HDmalloc((i + increment) * sizeof(unsigned int));
+ wdata[i].len = i + increment;
+ for (j = 0; j < (i + increment); j++)
+ ((unsigned int *)wdata[i].p)[j] = i * 20 + j;
+ } /* end for */
+
+ /* Open file created in test_vltypes_vlen_atomic() */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset created in test_vltypes_vlen_atomic() */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Open dataspace for dataset */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Get datatype for dataset */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open the file for data checking */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Get datatype for dataset */
+ tid1 = H5Dget_type(dataset);
+ CHECK(tid1, FAIL, "H5Dget_type");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 22 elements allocated = 4+5+6+7 elements for each array position */
+ VERIFY(size, 22 * sizeof(unsigned int), "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 22 elements allocated = 4+5+6+7 elements for each array position */
+ VERIFY(mem_used, 22 * sizeof(unsigned int), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].len != rdata[i].len) {
+ TestErrPrintf("%d: VL data lengths don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__,
+ (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len);
+ continue;
+ } /* end if */
+ for (j = 0; j < rdata[i].len; j++) {
+ if (((unsigned int *)wdata[i].p)[j] != ((unsigned int *)rdata[i].p)[j]) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].p[%d]=%d, rdata[%d].p[%d]=%d\n", (int)i,
+ (int)j, (int)((unsigned int *)wdata[i].p)[j], (int)i, (int)j,
+ (int)((unsigned int *)rdata[i].p)[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the read VL data */
+ ret = H5Treclaim(tid1, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end rewrite_vltypes_vlen_atomic() */
+
+/****************************************************************
+**
+** test_vltypes_vlen_compound(): Test basic VL datatype code.
+** Test VL datatypes of compound datatypes
+**
+****************************************************************/
+static void
+test_vltypes_vlen_compound(void)
+{
+ typedef struct { /* Struct that the VL sequences are composed of */
+ int i;
+ float f;
+ } s1;
+ hvl_t wdata[SPACE1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1, tid2; /* Datatype IDs */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Basic Compound VL Datatype Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].p = HDmalloc((i + 1) * sizeof(s1));
+ wdata[i].len = i + 1;
+ for (j = 0; j < (i + 1); j++) {
+ ((s1 *)wdata[i].p)[j].i = (int)(i * 10 + j);
+ ((s1 *)wdata[i].p)[j].f = (float)(i * 20 + j) / 3.0F;
+ } /* end for */
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create the base compound type */
+ tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1));
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tvlen_create(tid2);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid1, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(s1), "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(s1), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].len != rdata[i].len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__,
+ (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len);
+ continue;
+ } /* end if */
+ for (j = 0; j < rdata[i].len; j++) {
+ if (((s1 *)wdata[i].p)[j].i != ((s1 *)rdata[i].p)[j].i) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].i=%d, rdata[%d].p[%d].i=%d\n",
+ (int)i, (int)j, (int)((s1 *)wdata[i].p)[j].i, (int)i, (int)j,
+ (int)((s1 *)rdata[i].p)[j].i);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(((s1 *)wdata[i].p)[j].f, ((s1 *)rdata[i].p)[j].f)) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].f=%f, rdata[%d].p[%d].f=%f\n",
+ (int)i, (int)j, (double)((s1 *)wdata[i].p)[j].f, (int)i, (int)j,
+ (double)((s1 *)rdata[i].p)[j].f);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the VL data */
+ ret = H5Treclaim(tid1, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_vltypes_vlen_compound() */
+
+/****************************************************************
+**
+** rewrite_vltypes_vlen_compound(): Check memory leak for basic VL datatype.
+** Checks memory leak for VL datatypes of compound datatypes
+**
+****************************************************************/
+static void
+rewrite_vltypes_vlen_compound(void)
+{
+ typedef struct { /* Struct that the VL sequences are composed of */
+ int i;
+ float f;
+ } s1;
+ hvl_t wdata[SPACE1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1, tid2; /* Datatype IDs */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ unsigned increment = 4;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Check Memory Leak for Basic Compound VL Datatype Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].p = HDmalloc((i + increment) * sizeof(s1));
+ wdata[i].len = i + increment;
+ for (j = 0; j < (i + increment); j++) {
+ ((s1 *)wdata[i].p)[j].i = (int)(i * 40 + j);
+ ((s1 *)wdata[i].p)[j].f = (float)(i * 60 + j) / 3.0F;
+ } /* end for */
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Create the base compound type */
+ tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1));
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create a datatype to refer to */
+ tid1 = H5Tvlen_create(tid2);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid1, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 22 elements allocated = 4 + 5 + 6 + 7 elements for each array position */
+ VERIFY(size, 22 * sizeof(s1), "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid1, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 22 elements allocated = 4 + 5 + 6 + 7 elements for each array position */
+ VERIFY(mem_used, 22 * sizeof(s1), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].len != rdata[i].len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__,
+ (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len);
+ continue;
+ } /* end if */
+ for (j = 0; j < rdata[i].len; j++) {
+ if (((s1 *)wdata[i].p)[j].i != ((s1 *)rdata[i].p)[j].i) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].i=%d, rdata[%d].p[%d].i=%d\n",
+ (int)i, (int)j, (int)((s1 *)wdata[i].p)[j].i, (int)i, (int)j,
+ (int)((s1 *)rdata[i].p)[j].i);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(((s1 *)wdata[i].p)[j].f, ((s1 *)rdata[i].p)[j].f)) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].p[%d].f=%f, rdata[%d].p[%d].f=%f\n",
+ (int)i, (int)j, (double)((s1 *)wdata[i].p)[j].f, (int)i, (int)j,
+ (double)((s1 *)rdata[i].p)[j].f);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the VL data */
+ ret = H5Treclaim(tid1, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid1, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end rewrite_vltypes_vlen_compound() */
+
+/****************************************************************
+**
+** test_vltypes_compound_vlen_vlen(): Test basic VL datatype code.
+** Tests compound datatypes with VL datatypes of VL datatypes.
+**
+****************************************************************/
+static void
+test_vltypes_compound_vlen_vlen(void)
+{
+ typedef struct { /* Struct that the compound type are composed of */
+ int i;
+ float f;
+ hvl_t v;
+ } s1;
+ s1 *wdata; /* data to write */
+ s1 *rdata; /* data to read */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1, tid2, tid3; /* Datatype IDs */
+ hsize_t dims1[] = {SPACE3_DIM1};
+ unsigned i, j, k; /* counting variables */
+ hvl_t *t1, *t2; /* Temporary pointer to VL information */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Compound Datatypes with VL Atomic Datatype Component Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ wdata = (s1 *)HDmalloc(sizeof(s1) * SPACE3_DIM1);
+ CHECK_PTR(wdata, "HDmalloc");
+ rdata = (s1 *)HDmalloc(sizeof(s1) * SPACE3_DIM1);
+ CHECK_PTR(rdata, "HDmalloc");
+ for (i = 0; i < SPACE3_DIM1; i++) {
+ wdata[i].i = (int)(i * 10);
+ wdata[i].f = (float)(i * 20) / 3.0F;
+ wdata[i].v.p = HDmalloc((i + L1_INCM) * sizeof(hvl_t));
+ wdata[i].v.len = i + L1_INCM;
+ for (t1 = (hvl_t *)((wdata[i].v).p), j = 0; j < (i + L1_INCM); j++, t1++) {
+ t1->p = HDmalloc((j + L2_INCM) * sizeof(unsigned int));
+ t1->len = j + L2_INCM;
+ for (k = 0; k < j + L2_INCM; k++)
+ ((unsigned int *)t1->p)[k] = i * 100 + j * 10 + k;
+ } /* end for */
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE3_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a VL datatype to refer to */
+ tid3 = H5Tvlen_create(H5T_NATIVE_UINT);
+ CHECK(tid3, FAIL, "H5Tvlen_create");
+
+ /* Create a VL datatype to refer to */
+ tid1 = H5Tvlen_create(tid3);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create the base compound type */
+ tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1));
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE3_DIM1; i++) {
+ if (wdata[i].i != rdata[i].i) {
+ TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i,
+ (int)wdata[i].i, (int)i, (int)rdata[i].i);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) {
+ TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i,
+ (double)wdata[i].f, (int)i, (double)rdata[i].f);
+ continue;
+ } /* end if */
+
+ if (wdata[i].v.len != rdata[i].v.len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n",
+ __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len);
+ continue;
+ } /* end if */
+
+ for (t1 = (hvl_t *)(wdata[i].v.p), t2 = (hvl_t *)(rdata[i].v.p), j = 0; j < rdata[i].v.len;
+ j++, t1++, t2++) {
+ if (t1->len != t2->len) {
+ TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n",
+ __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len);
+ continue;
+ } /* end if */
+ for (k = 0; k < t2->len; k++) {
+ if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) {
+ TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k,
+ (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid3);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Release buffers */
+ HDfree(wdata);
+ HDfree(rdata);
+} /* end test_vltypes_compound_vlen_vlen() */
+
+/****************************************************************
+**
+** test_vltypes_compound_vlstr(): Test VL datatype code.
+** Tests VL datatypes of compound datatypes with VL string.
+** Dataset is extensible chunked, and data is rewritten with
+** shorter VL data.
+**
+****************************************************************/
+static void
+test_vltypes_compound_vlstr(void)
+{
+ typedef enum { red, blue, green } e1;
+ typedef struct {
+ char *string;
+ e1 color;
+ } s2;
+ typedef struct { /* Struct that the compound type are composed of */
+ hvl_t v;
+ } s1;
+ s1 wdata[SPACE1_DIM1]; /* data to write */
+ s1 wdata2[SPACE1_DIM1]; /* data to write */
+ s1 rdata[SPACE1_DIM1]; /* data to read */
+ s1 rdata2[SPACE1_DIM1]; /* data to read */
+ char str[64] = "a\0";
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset, dset2; /* Dataset ID */
+ hid_t sid1, sid2, filespace, filespace2; /* Dataspace ID */
+ hid_t tid1, tid2, tid3, tid4, tid5; /* Datatype IDs */
+ hid_t cparms;
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hsize_t chunk_dims[] = {SPACE1_DIM1 / 2};
+ hsize_t maxdims[] = {H5S_UNLIMITED};
+ hsize_t size[] = {SPACE1_DIM1};
+ hsize_t offset[] = {0};
+ unsigned i, j; /* counting variables */
+ s2 *t1, *t2; /* Temporary pointer to VL information */
+ int val;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing VL Datatype of Compound Datatype with VL String Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].v.p = (s2 *)HDmalloc((i + L3_INCM) * sizeof(s2));
+ wdata[i].v.len = i + L3_INCM;
+ for (t1 = (s2 *)((wdata[i].v).p), j = 0; j < (i + L3_INCM); j++, t1++) {
+ HDstrcat(str, "m");
+ t1->string = (char *)HDmalloc(HDstrlen(str) * sizeof(char) + 1);
+ HDstrcpy(t1->string, str);
+ /*t1->color = red;*/
+ t1->color = blue;
+ }
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, maxdims);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a VL string type*/
+ tid4 = H5Tcopy(H5T_C_S1);
+ CHECK(tid4, FAIL, "H5Tcopy");
+ ret = H5Tset_size(tid4, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Create an enum type */
+ tid3 = H5Tenum_create(H5T_STD_I32LE);
+ val = 0;
+ ret = H5Tenum_insert(tid3, "RED", &val);
+ CHECK(ret, FAIL, "H5Tenum_insert");
+ val = 1;
+ ret = H5Tenum_insert(tid3, "BLUE", &val);
+ CHECK(ret, FAIL, "H5Tenum_insert");
+ val = 2;
+ ret = H5Tenum_insert(tid3, "GREEN", &val);
+ CHECK(ret, FAIL, "H5Tenum_insert");
+
+ /* Create the first layer compound type */
+ tid5 = H5Tcreate(H5T_COMPOUND, sizeof(s2));
+ CHECK(tid5, FAIL, "H5Tcreate");
+ /* Insert fields */
+ ret = H5Tinsert(tid5, "string", HOFFSET(s2, string), tid4);
+ CHECK(ret, FAIL, "H5Tinsert");
+ /* Insert fields */
+ ret = H5Tinsert(tid5, "enumerate", HOFFSET(s2, color), tid3);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create a VL datatype of first layer compound type */
+ tid1 = H5Tvlen_create(tid5);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create the base compound type */
+ tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1));
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Modify dataset creation properties, i.e. enable chunking */
+ cparms = H5Pcreate(H5P_DATASET_CREATE);
+ ret = H5Pset_chunk(cparms, SPACE1_RANK, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, cparms, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Extend the dataset. This call assures that dataset is 4.*/
+ ret = H5Dset_extent(dataset, size);
+ CHECK(ret, FAIL, "H5Dset_extent");
+
+ /* Select a hyperslab */
+ filespace = H5Dget_space(dataset);
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, dims1, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid2, sid1, filespace, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Fflush(fid1, H5F_SCOPE_GLOBAL);
+ CHECK(ret, FAIL, "H5Fflush");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(filespace);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid4);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid5);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid3);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close Property list */
+ ret = H5Pclose(cparms);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dset2 = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dset2, FAIL, "H5Dopen2");
+
+ /* Get the data type */
+ tid2 = H5Dget_type(dset2);
+ CHECK(tid2, FAIL, "H5Dget_type");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dset2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].v.len != rdata[i].v.len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n",
+ __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len);
+ continue;
+ } /* end if */
+
+ for (t1 = (s2 *)(wdata[i].v.p), t2 = (s2 *)(rdata[i].v.p), j = 0; j < rdata[i].v.len;
+ j++, t1++, t2++) {
+ if (HDstrcmp(t1->string, t2->string) != 0) {
+ TestErrPrintf("VL data values don't match!, t1->string=%s, t2->string=%s\n", t1->string,
+ t2->string);
+ continue;
+ } /* end if */
+ if (t1->color != t2->color) {
+ TestErrPrintf("VL data values don't match!, t1->color=%d, t2->color=%d\n", t1->color,
+ t2->color);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Use this part for new data */
+ HDstrcpy(str, "bbbbbbbb\0");
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata2[i].v.p = (s2 *)HDmalloc((i + 1) * sizeof(s2));
+ wdata2[i].v.len = i + 1;
+ for (t1 = (s2 *)(wdata2[i].v).p, j = 0; j < i + 1; j++, t1++) {
+ HDstrcat(str, "pp");
+ t1->string = (char *)HDmalloc(HDstrlen(str) * sizeof(char) + 1);
+ HDstrcpy(t1->string, str);
+ t1->color = green;
+ }
+ } /* end for */
+
+ /* Select a hyperslab */
+ filespace2 = H5Dget_space(dset2);
+ ret = H5Sselect_hyperslab(filespace2, H5S_SELECT_SET, offset, NULL, dims1, NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create dataspace for datasets */
+ sid2 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dset2, tid2, sid2, filespace2, H5P_DEFAULT, &wdata2);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dset2, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata2);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata2[i].v.len != rdata2[i].v.len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata2[%d].v.len=%d, rdata2[%d].v.len=%d\n",
+ __LINE__, (int)i, (int)wdata2[i].v.len, (int)i, (int)rdata2[i].v.len);
+ continue;
+ } /* end if */
+
+ for (t1 = (s2 *)(wdata2[i].v.p), t2 = (s2 *)(rdata2[i].v.p), j = 0; j < rdata2[i].v.len;
+ j++, t1++, t2++) {
+ if (HDstrcmp(t1->string, t2->string) != 0) {
+ TestErrPrintf("VL data values don't match!, t1->string=%s, t2->string=%s\n", t1->string,
+ t2->string);
+ continue;
+ } /* end if */
+ if (t1->color != t2->color) {
+ TestErrPrintf("VL data values don't match!, t1->color=%d, t2->color=%d\n", t1->color,
+ t2->color);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata2);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Reclaim the VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata2);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ ret = H5Dclose(dset2);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(filespace2);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+} /* end test_vltypes_compound_vlstr() */
+
+/****************************************************************
+**
+** test_vltypes_compound_vlen_atomic(): Test basic VL datatype code.
+** Tests compound datatypes with VL datatypes of atomic datatypes.
+**
+****************************************************************/
+static void
+test_vltypes_compound_vlen_atomic(void)
+{
+ typedef struct { /* Struct that the VL sequences are composed of */
+ int i;
+ float f;
+ hvl_t v;
+ } s1;
+ s1 wdata[SPACE1_DIM1]; /* Information to write */
+ s1 rdata[SPACE1_DIM1]; /* Information read in */
+ s1 fill; /* Fill value */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1, tid2; /* Datatype IDs */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hid_t dcpl_pid; /* Dataset creation property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Compound Datatypes with VL Atomic Datatype Component Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].i = (int)(i * 10);
+ wdata[i].f = (float)(i * 20) / 3.0F;
+ wdata[i].v.p = HDmalloc((i + 1) * sizeof(unsigned int));
+ wdata[i].v.len = i + 1;
+ for (j = 0; j < (i + 1); j++)
+ ((unsigned int *)wdata[i].v.p)[j] = i * 10 + j;
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a VL datatype to refer to */
+ tid1 = H5Tvlen_create(H5T_NATIVE_UINT);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create the base compound type */
+ tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1));
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ VERIFY(size, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 10 elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ VERIFY(mem_used, ((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(unsigned int), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].i != rdata[i].i) {
+ TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i,
+ (int)wdata[i].i, (int)i, (int)rdata[i].i);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) {
+ TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i,
+ (double)wdata[i].f, (int)i, (double)rdata[i].f);
+ continue;
+ } /* end if */
+ if (wdata[i].v.len != rdata[i].v.len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n",
+ __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len);
+ continue;
+ } /* end if */
+ for (j = 0; j < rdata[i].v.len; j++) {
+ if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n",
+ (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j,
+ (int)((unsigned int *)rdata[i].v.p)[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the VL data */
+ ret = H5Treclaim(tid2, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a second dataset, with a fill value */
+ dcpl_pid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl_pid, FAIL, "H5Pcreate");
+
+ /* Set the fill value for the second dataset */
+ HDmemset(&fill, 0, sizeof(s1));
+ ret = H5Pset_fill_value(dcpl_pid, tid2, &fill);
+ CHECK(ret, FAIL, "H5Pset_fill_value");
+
+ dataset = H5Dcreate2(fid1, "Dataset2", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Read from dataset before writing data */
+ ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Check data read in */
+ for (i = 0; i < SPACE1_DIM1; i++)
+ if (rdata[i].i != 0 || !H5_FLT_ABS_EQUAL(rdata[i].f, 0.0F) || rdata[i].v.len != 0 ||
+ rdata[i].v.p != NULL)
+ TestErrPrintf(
+ "VL doesn't match!, rdata[%d].i=%d, rdata[%d].f=%f, rdata[%d].v.len=%u, rdata[%d].v.p=%p\n",
+ (int)i, rdata[i].i, (int)i, (double)rdata[i].f, (int)i, (unsigned)rdata[i].v.len, (int)i,
+ rdata[i].v.p);
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].i != rdata[i].i) {
+ TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i,
+ (int)wdata[i].i, (int)i, (int)rdata[i].i);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) {
+ TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i,
+ (double)wdata[i].f, (int)i, (double)rdata[i].f);
+ continue;
+ } /* end if */
+ if (wdata[i].v.len != rdata[i].v.len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n",
+ __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len);
+ continue;
+ } /* end if */
+ for (j = 0; j < rdata[i].v.len; j++) {
+ if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n",
+ (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j,
+ (int)((unsigned int *)rdata[i].v.p)[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_vltypes_compound_vlen_atomic() */
+
+/****************************************************************
+**
+** rewrite_vltypes_compound_vlen_atomic(): Check memory leak for
+** basic VL datatype code.
+** Check memory leak for compound datatypes with VL datatypes
+** of atomic datatypes.
+**
+****************************************************************/
+static void
+rewrite_vltypes_compound_vlen_atomic(void)
+{
+ typedef struct { /* Struct that the VL sequences are composed of */
+ int i;
+ float f;
+ hvl_t v;
+ } s1;
+ s1 wdata[SPACE1_DIM1]; /* Information to write */
+ s1 rdata[SPACE1_DIM1]; /* Information read in */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1, tid2; /* Datatype IDs */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ unsigned increment = 4;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5,
+ ("Checking memory leak for compound datatype with VL Atomic Datatype Component Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].i = (int)(i * 40);
+ wdata[i].f = (float)(i * 50) / 3.0F;
+ wdata[i].v.p = HDmalloc((i + increment) * sizeof(unsigned int));
+ wdata[i].v.len = i + increment;
+ for (j = 0; j < (i + increment); j++)
+ ((unsigned int *)wdata[i].v.p)[j] = i * 60 + j;
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Create a VL datatype to refer to */
+ tid1 = H5Tvlen_create(H5T_NATIVE_UINT);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create the base compound type */
+ tid2 = H5Tcreate(H5T_COMPOUND, sizeof(s1));
+ CHECK(tid2, FAIL, "H5Tcreate");
+
+ /* Insert fields */
+ ret = H5Tinsert(tid2, "i", HOFFSET(s1, i), H5T_NATIVE_INT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(tid2, "f", HOFFSET(s1, f), H5T_NATIVE_FLOAT);
+ CHECK(ret, FAIL, "H5Tinsert");
+ ret = H5Tinsert(tid2, "v", HOFFSET(s1, v), tid1);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Create a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory will be used */
+ ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 22 elements allocated = 4+5+6+7 elements for each array position */
+ VERIFY(size, 22 * sizeof(unsigned int), "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 22 elements allocated = 4+5+6+7 elements for each array position */
+ VERIFY(mem_used, 22 * sizeof(unsigned int), "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].i != rdata[i].i) {
+ TestErrPrintf("Integer components don't match!, wdata[%d].i=%d, rdata[%d].i=%d\n", (int)i,
+ (int)wdata[i].i, (int)i, (int)rdata[i].i);
+ continue;
+ } /* end if */
+ if (!H5_FLT_ABS_EQUAL(wdata[i].f, rdata[i].f)) {
+ TestErrPrintf("Float components don't match!, wdata[%d].f=%f, rdata[%d].f=%f\n", (int)i,
+ (double)wdata[i].f, (int)i, (double)rdata[i].f);
+ continue;
+ } /* end if */
+ if (wdata[i].v.len != rdata[i].v.len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].v.len=%d, rdata[%d].v.len=%d\n",
+ __LINE__, (int)i, (int)wdata[i].v.len, (int)i, (int)rdata[i].v.len);
+ continue;
+ } /* end if */
+ for (j = 0; j < rdata[i].v.len; j++) {
+ if (((unsigned int *)wdata[i].v.p)[j] != ((unsigned int *)rdata[i].v.p)[j]) {
+ TestErrPrintf("VL data values don't match!, wdata[%d].v.p[%d]=%d, rdata[%d].v.p[%d]=%d\n",
+ (int)i, (int)j, (int)((unsigned int *)wdata[i].v.p)[j], (int)i, (int)j,
+ (int)((unsigned int *)rdata[i].v.p)[j]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim the VL data */
+ ret = H5Treclaim(tid2, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end rewrite_vltypes_compound_vlen_atomic() */
+
+/****************************************************************
+**
+** vlen_size_func(): Test basic VL datatype code.
+** Tests VL datatype with VL datatypes of atomic datatypes.
+**
+****************************************************************/
+static size_t
+vlen_size_func(unsigned long n)
+{
+ size_t u = 1;
+ size_t tmp = 1;
+ size_t result = 1;
+
+ while (u < n) {
+ u++;
+ tmp += u;
+ result += tmp;
+ }
+ return (result);
+}
+
+/****************************************************************
+**
+** test_vltypes_vlen_vlen_atomic(): Test basic VL datatype code.
+** Tests VL datatype with VL datatypes of atomic datatypes.
+**
+****************************************************************/
+static void
+test_vltypes_vlen_vlen_atomic(void)
+{
+ hvl_t wdata[SPACE1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1]; /* Information read in */
+ hvl_t *t1, *t2; /* Temporary pointer to VL information */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid1, tid2; /* Datatype IDs */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t dims1[] = {SPACE1_DIM1};
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j, k; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing VL Datatypes with VL Atomic Datatype Component Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].p = HDmalloc((i + 1) * sizeof(hvl_t));
+ if (wdata[i].p == NULL) {
+ TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i);
+ return;
+ } /* end if */
+ wdata[i].len = i + 1;
+ for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + 1); j++, t1++) {
+ t1->p = HDmalloc((j + 1) * sizeof(unsigned int));
+ if (t1->p == NULL) {
+ TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j);
+ return;
+ } /* end if */
+ t1->len = j + 1;
+ for (k = 0; k < (j + 1); k++)
+ ((unsigned int *)t1->p)[k] = i * 100 + j * 10 + k;
+ } /* end for */
+ } /* end for */
+
+ /* Create file */
+ fid1 = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fcreate");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a VL datatype to refer to */
+ tid1 = H5Tvlen_create(H5T_NATIVE_UINT);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create the base VL type */
+ tid2 = H5Tvlen_create(tid1);
+ CHECK(tid2, FAIL, "H5Tvlen_create");
+
+ /* Create a dataset */
+ dataset = H5Dcreate2(fid1, "Dataset1", tid2, sid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dcreate2");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Create dataspace for datasets */
+ sid1 = H5Screate_simple(SPACE1_RANK, dims1, NULL);
+ CHECK(sid1, FAIL, "H5Screate_simple");
+
+ /* Create a VL datatype to refer to */
+ tid1 = H5Tvlen_create(H5T_NATIVE_UINT);
+ CHECK(tid1, FAIL, "H5Tvlen_create");
+
+ /* Create the base VL type */
+ tid2 = H5Tvlen_create(tid1);
+ CHECK(tid2, FAIL, "H5Tvlen_create");
+
+ /* Open a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory was used */
+ ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */
+ VERIFY(size,
+ (hsize_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) +
+ vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)),
+ "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */
+ VERIFY(mem_used,
+ (size_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) +
+ vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)),
+ "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].len != rdata[i].len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__,
+ (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len);
+ continue;
+ } /* end if */
+ for (t1 = (hvl_t *)wdata[i].p, t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len; j++, t1++, t2++) {
+ if (t1->len != t2->len) {
+ TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n",
+ __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len);
+ continue;
+ } /* end if */
+ for (k = 0; k < t2->len; k++) {
+ if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) {
+ TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k,
+ (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim all the (nested) VL data */
+ ret = H5Treclaim(tid2, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid1);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end test_vltypes_vlen_vlen_atomic() */
+
+/****************************************************************
+**
+** rewrite_longer_vltypes_vlen_vlen_atomic(): Test basic VL datatype code.
+** Tests VL datatype with VL datatypes of atomic datatypes.
+**
+****************************************************************/
+static void
+rewrite_longer_vltypes_vlen_vlen_atomic(void)
+{
+ hvl_t wdata[SPACE1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1]; /* Information read in */
+ hvl_t *t1, *t2; /* Temporary pointer to VL information */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid2; /* Datatype IDs */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j, k; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ unsigned increment = 1;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Check memory leak for VL Datatypes with VL Atomic Datatype Component Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].p = HDmalloc((i + increment) * sizeof(hvl_t));
+ if (wdata[i].p == NULL) {
+ TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i);
+ return;
+ } /* end if */
+ wdata[i].len = i + increment;
+ for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + increment); j++, t1++) {
+ t1->p = HDmalloc((j + 1) * sizeof(unsigned int));
+ if (t1->p == NULL) {
+ TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j);
+ return;
+ } /* end if */
+ t1->len = j + 1;
+ for (k = 0; k < (j + 1); k++)
+ ((unsigned int *)t1->p)[k] = i * 1000 + j * 100 + k * 10;
+ } /* end for */
+ } /* end for */
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Open datatype of the dataset */
+ tid2 = H5Dget_type(dataset);
+ CHECK(tid2, FAIL, "H5Dget_type");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open the file for data checking */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Get datatype for dataset */
+ tid2 = H5Dget_type(dataset);
+ CHECK(tid2, FAIL, "H5Dget_type");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory was used */
+ ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 18 hvl_t elements allocated = 3 + 4 + 5 + 6 elements for each array position */
+ /* 52 unsigned int elements allocated = 6 + 10 + 15 + 21 elements */
+ /*VERIFY(size, 18 * sizeof(hvl_t) + 52 * sizeof(unsigned int), "H5Dvlen_get_buf_size");*/
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 18 hvl_t elements allocated = 3+4+5+6elements for each array position */
+ /* 52 unsigned int elements allocated = 6+10+15+21 elements */
+ /*VERIFY(mem_used,18*sizeof(hvl_t)+52*sizeof(unsigned int),"H5Dread");*/
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].len != rdata[i].len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__,
+ (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len);
+ continue;
+ } /* end if */
+ for (t1 = (hvl_t *)(wdata[i].p), t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len;
+ j++, t1++, t2++) {
+ if (t1->len != t2->len) {
+ TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n",
+ __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len);
+ continue;
+ } /* end if */
+ for (k = 0; k < t2->len; k++) {
+ if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) {
+ TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k,
+ (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim all the (nested) VL data */
+ ret = H5Treclaim(tid2, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end rewrite_longer_vltypes_vlen_vlen_atomic() */
+
+/****************************************************************
+**
+** rewrite_shorter_vltypes_vlen_vlen_atomic(): Test basic VL datatype code.
+** Tests VL datatype with VL datatypes of atomic datatypes.
+**
+****************************************************************/
+static void
+rewrite_shorter_vltypes_vlen_vlen_atomic(void)
+{
+ hvl_t wdata[SPACE1_DIM1]; /* Information to write */
+ hvl_t rdata[SPACE1_DIM1]; /* Information read in */
+ hvl_t *t1, *t2; /* Temporary pointer to VL information */
+ hid_t fid1; /* HDF5 File IDs */
+ hid_t dataset; /* Dataset ID */
+ hid_t sid1; /* Dataspace ID */
+ hid_t tid2; /* Datatype IDs */
+ hid_t xfer_pid; /* Dataset transfer property list ID */
+ hsize_t size; /* Number of bytes which will be used */
+ unsigned i, j, k; /* counting variables */
+ size_t mem_used = 0; /* Memory used during allocation */
+ unsigned increment = 1;
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Check memory leak for VL Datatypes with VL Atomic Datatype Component Functionality\n"));
+
+ /* Allocate and initialize VL data to write */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ wdata[i].p = HDmalloc((i + increment) * sizeof(hvl_t));
+ if (wdata[i].p == NULL) {
+ TestErrPrintf("Cannot allocate memory for VL data! i=%u\n", i);
+ return;
+ } /* end if */
+ wdata[i].len = i + increment;
+ for (t1 = (hvl_t *)(wdata[i].p), j = 0; j < (i + increment); j++, t1++) {
+ t1->p = HDmalloc((j + 1) * sizeof(unsigned int));
+ if (t1->p == NULL) {
+ TestErrPrintf("Cannot allocate memory for VL data! i=%u, j=%u\n", i, j);
+ return;
+ } /* end if */
+ t1->len = j + 1;
+ for (k = 0; k < (j + 1); k++)
+ ((unsigned int *)t1->p)[k] = i * 100000 + j * 1000 + k * 10;
+ } /* end for */
+ } /* end for */
+
+ /* Open file */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open the dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Open datatype of the dataset */
+ tid2 = H5Dget_type(dataset);
+ CHECK(tid2, FAIL, "H5Dget_type");
+
+ /* Write dataset to disk */
+ ret = H5Dwrite(dataset, tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open the file for data checking */
+ fid1 = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid1, FAIL, "H5Fopen");
+
+ /* Open a dataset */
+ dataset = H5Dopen2(fid1, "Dataset1", H5P_DEFAULT);
+ CHECK(dataset, FAIL, "H5Dopen2");
+
+ /* Get dataspace for datasets */
+ sid1 = H5Dget_space(dataset);
+ CHECK(sid1, FAIL, "H5Dget_space");
+
+ /* Get datatype for dataset */
+ tid2 = H5Dget_type(dataset);
+ CHECK(tid2, FAIL, "H5Dget_type");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Make certain the correct amount of memory was used */
+ ret = H5Dvlen_get_buf_size(dataset, tid2, sid1, &size);
+ CHECK(ret, FAIL, "H5Dvlen_get_buf_size");
+
+ /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */
+ VERIFY(size,
+ (hsize_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) +
+ vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)),
+ "H5Dvlen_get_buf_size");
+
+ /* Read dataset from disk */
+ ret = H5Dread(dataset, tid2, H5S_ALL, H5S_ALL, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Make certain the correct amount of memory has been used */
+ /* 10 hvl_t elements allocated = 1 + 2 + 3 + 4 elements for each array position */
+ /* 20 unsigned int elements allocated = 1 + 3 + 6 + 10 elements */
+ VERIFY(mem_used,
+ (size_t)(((SPACE1_DIM1 * (SPACE1_DIM1 + 1)) / 2) * sizeof(hvl_t) +
+ vlen_size_func((unsigned long)SPACE1_DIM1) * sizeof(unsigned int)),
+ "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < SPACE1_DIM1; i++) {
+ if (wdata[i].len != rdata[i].len) {
+ TestErrPrintf("%d: VL data length don't match!, wdata[%d].len=%d, rdata[%d].len=%d\n", __LINE__,
+ (int)i, (int)wdata[i].len, (int)i, (int)rdata[i].len);
+ continue;
+ } /* end if */
+ for (t1 = (hvl_t *)(wdata[i].p), t2 = (hvl_t *)(rdata[i].p), j = 0; j < rdata[i].len;
+ j++, t1++, t2++) {
+ if (t1->len != t2->len) {
+ TestErrPrintf("%d: VL data length don't match!, i=%d, j=%d, t1->len=%d, t2->len=%d\n",
+ __LINE__, (int)i, (int)j, (int)t1->len, (int)t2->len);
+ continue;
+ } /* end if */
+ for (k = 0; k < t2->len; k++) {
+ if (((unsigned int *)t1->p)[k] != ((unsigned int *)t2->p)[k]) {
+ TestErrPrintf("VL data values don't match!, t1->p[%d]=%d, t2->p[%d]=%d\n", (int)k,
+ (int)((unsigned int *)t1->p)[k], (int)k, (int)((unsigned int *)t2->p)[k]);
+ continue;
+ } /* end if */
+ } /* end for */
+ } /* end for */
+ } /* end for */
+
+ /* Reclaim all the (nested) VL data */
+ ret = H5Treclaim(tid2, sid1, xfer_pid, rdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Make certain the VL memory has been freed */
+ VERIFY(mem_used, 0, "H5Treclaim");
+
+ /* Reclaim the write VL data */
+ ret = H5Treclaim(tid2, sid1, H5P_DEFAULT, wdata);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Close Dataset */
+ ret = H5Dclose(dataset);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close datatype */
+ ret = H5Tclose(tid2);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Close disk dataspace */
+ ret = H5Sclose(sid1);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ /* Close dataset transfer property list */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close file */
+ ret = H5Fclose(fid1);
+ CHECK(ret, FAIL, "H5Fclose");
+
+} /* end rewrite_shorter_vltypes_vlen_vlen_atomic() */
+
+/****************************************************************
+**
+** test_vltypes_fill_value(): Test fill value for VL data.
+** One tests data space isn't allocated; another tests data
+** space is allocated.
+**
+****************************************************************/
+static void
+test_vltypes_fill_value(void)
+{
+ typedef struct dtype1_struct {
+ unsigned int gui;
+ unsigned int pgui;
+ const char *str_id;
+ const char *str_name;
+ const char *str_desc;
+ const char *str_orig;
+ const char *str_stat;
+ unsigned int ver;
+ double val;
+ double ma;
+ double mi;
+ const char *str_form;
+ const char *str_unit;
+ } dtype1_struct;
+
+ herr_t ret;
+ hid_t file_id;
+ hid_t dtype1_id = -1;
+ hid_t str_id = -1;
+ hid_t small_dspace_id; /* Dataspace ID for small datasets */
+ hid_t large_dspace_id; /* Dataspace ID for large datasets */
+ hid_t small_select_dspace_id; /* Dataspace ID for selection in small datasets */
+ hid_t large_select_dspace_id; /* Dataspace ID for selection in large datasets */
+ hid_t dset_dspace_id = -1; /* Dataspace ID for a particular dataset */
+ hid_t dset_select_dspace_id = -1; /* Dataspace ID for selection in a particular dataset */
+ hid_t scalar_dspace_id; /* Dataspace ID for scalar dataspace */
+ hid_t single_dspace_id; /* Dataspace ID for single element selection */
+ hsize_t single_offset[] = {2}; /* Offset of single element selection */
+ hsize_t single_block[] = {1}; /* Block size of single element selection */
+ hsize_t select_offset[] = {0}; /* Offset of non-contiguous element selection */
+ hsize_t select_stride[] = {2}; /* Stride size of non-contiguous element selection */
+ hsize_t small_select_count[] = {SPACE4_DIM_SMALL /
+ 2}; /* Count of small non-contiguous element selection */
+ hsize_t large_select_count[] = {SPACE4_DIM_LARGE /
+ 2}; /* Count of large non-contiguous element selection */
+ hsize_t select_block[] = {1}; /* Block size of non-contiguous element selection */
+ hid_t dcpl_id, xfer_pid;
+ hid_t dset_id;
+ hsize_t small_dims[] = {SPACE4_DIM_SMALL};
+ hsize_t large_dims[] = {SPACE4_DIM_LARGE};
+ size_t dset_elmts = 0; /* Number of elements in a particular dataset */
+ const dtype1_struct fill1 = {1, 2, "foobar", "", NULL, "\0", "dead",
+ 3, 4.0, 100.0, 1.0, "liquid", "meter"};
+ const dtype1_struct wdata = {3, 4, "", NULL, "\0", "foo", "two", 6, 8.0, 200.0, 2.0, "solid", "yard"};
+ dtype1_struct *rbuf = NULL; /* Buffer for reading data */
+ size_t mem_used = 0; /* Memory used during allocation */
+ H5D_layout_t layout; /* Dataset storage layout */
+ char dset_name1[64], dset_name2[64]; /* Dataset names */
+ unsigned i;
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Check fill value for VL data\n"));
+
+ /* Create a string datatype */
+ str_id = H5Tcopy(H5T_C_S1);
+ CHECK(str_id, FAIL, "H5Tcopy");
+ ret = H5Tset_size(str_id, H5T_VARIABLE);
+ CHECK(ret, FAIL, "H5Tset_size");
+
+ /* Create a compound data type */
+ dtype1_id = H5Tcreate(H5T_COMPOUND, sizeof(struct dtype1_struct));
+ CHECK(dtype1_id, FAIL, "H5Tcreate");
+
+ ret = H5Tinsert(dtype1_id, "guid", HOFFSET(struct dtype1_struct, gui), H5T_NATIVE_UINT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "pguid", HOFFSET(struct dtype1_struct, pgui), H5T_NATIVE_UINT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "str_id", HOFFSET(dtype1_struct, str_id), str_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "str_name", HOFFSET(dtype1_struct, str_name), str_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "str_desc", HOFFSET(dtype1_struct, str_desc), str_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "str_orig", HOFFSET(dtype1_struct, str_orig), str_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "str_stat", HOFFSET(dtype1_struct, str_stat), str_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "ver", HOFFSET(struct dtype1_struct, ver), H5T_NATIVE_UINT);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "val", HOFFSET(struct dtype1_struct, val), H5T_NATIVE_DOUBLE);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "ma", HOFFSET(struct dtype1_struct, ma), H5T_NATIVE_DOUBLE);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "mi", HOFFSET(struct dtype1_struct, mi), H5T_NATIVE_DOUBLE);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "str_form", HOFFSET(dtype1_struct, str_form), str_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ ret = H5Tinsert(dtype1_id, "str_unit", HOFFSET(dtype1_struct, str_unit), str_id);
+ CHECK(ret, FAIL, "H5Tinsert");
+
+ /* Close string datatype */
+ ret = H5Tclose(str_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Allocate space for the buffer to read data */
+ rbuf = (dtype1_struct *)HDmalloc(SPACE4_DIM_LARGE * sizeof(dtype1_struct));
+ CHECK_PTR(rbuf, "HDmalloc");
+
+ /* Create the small & large dataspaces to use */
+ small_dspace_id = H5Screate_simple(SPACE4_RANK, small_dims, NULL);
+ CHECK(small_dspace_id, FAIL, "H5Screate_simple");
+
+ large_dspace_id = H5Screate_simple(SPACE4_RANK, large_dims, NULL);
+ CHECK(large_dspace_id, FAIL, "H5Screate_simple");
+
+ /* Create small & large dataspaces w/non-contiguous selections */
+ small_select_dspace_id = H5Scopy(small_dspace_id);
+ CHECK(small_select_dspace_id, FAIL, "H5Scopy");
+
+ ret = H5Sselect_hyperslab(small_select_dspace_id, H5S_SELECT_SET, select_offset, select_stride,
+ small_select_count, select_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ large_select_dspace_id = H5Scopy(large_dspace_id);
+ CHECK(large_select_dspace_id, FAIL, "H5Scopy");
+
+ ret = H5Sselect_hyperslab(large_select_dspace_id, H5S_SELECT_SET, select_offset, select_stride,
+ large_select_count, select_block);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Create a scalar dataspace */
+ scalar_dspace_id = H5Screate(H5S_SCALAR);
+ CHECK(scalar_dspace_id, FAIL, "H5Screate");
+
+ /* Create dataset create property list and set the fill value */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl_id, FAIL, "H5Pcreate");
+
+ ret = H5Pset_fill_value(dcpl_id, dtype1_id, &fill1);
+ CHECK(ret, FAIL, "H5Pset_fill_value");
+
+ /* Create the file */
+ file_id = H5Fcreate(FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fcreate");
+
+ /* Create datasets with different storage layouts */
+ for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) {
+ unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */
+ unsigned test_loop; /* Loop over datasets */
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (layout == H5D_CHUNKED)
+ compress_loop = 2;
+ else
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ compress_loop = 1;
+
+ /* Loop over dataset operations */
+ for (test_loop = 0; test_loop < compress_loop; test_loop++) {
+ hid_t tmp_dcpl_id; /* Temporary copy of the dataset creation property list */
+
+ /* Make a copy of the dataset creation property list */
+ tmp_dcpl_id = H5Pcopy(dcpl_id);
+ CHECK(tmp_dcpl_id, FAIL, "H5Pcopy");
+
+ /* Layout specific actions */
+ switch (layout) {
+ case H5D_COMPACT:
+ HDstrcpy(dset_name1, "dataset1-compact");
+ HDstrcpy(dset_name2, "dataset2-compact");
+ dset_dspace_id = small_dspace_id;
+ ret = H5Pset_layout(tmp_dcpl_id, H5D_COMPACT);
+ CHECK(ret, FAIL, "H5Pset_layout");
+ break;
+
+ case H5D_CONTIGUOUS:
+ HDstrcpy(dset_name1, "dataset1-contig");
+ HDstrcpy(dset_name2, "dataset2-contig");
+ dset_dspace_id = large_dspace_id;
+ break;
+
+ case H5D_CHUNKED: {
+ hsize_t chunk_dims[1] = {SPACE4_DIM_LARGE / 4};
+
+ dset_dspace_id = large_dspace_id;
+ ret = H5Pset_chunk(tmp_dcpl_id, 1, chunk_dims);
+ CHECK(ret, FAIL, "H5Pset_chunk");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (test_loop == 1) {
+ HDstrcpy(dset_name1, "dataset1-chunked-compressed");
+ HDstrcpy(dset_name2, "dataset2-chunked-compressed");
+ ret = H5Pset_deflate(tmp_dcpl_id, 3);
+ CHECK(ret, FAIL, "H5Pset_deflate");
+ } /* end if */
+ else {
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ HDstrcpy(dset_name1, "dataset1-chunked");
+ HDstrcpy(dset_name2, "dataset2-chunked");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ } /* end else */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ } break;
+
+ case H5D_VIRTUAL:
+ HDassert(0 && "Invalid layout type!");
+ break;
+
+ case H5D_LAYOUT_ERROR:
+ case H5D_NLAYOUTS:
+ default:
+ HDassert(0 && "Unknown layout type!");
+ break;
+ } /* end switch */
+
+ /* Create first data set with default setting - no space is allocated */
+ dset_id = H5Dcreate2(file_id, dset_name1, dtype1_id, dset_dspace_id, H5P_DEFAULT, tmp_dcpl_id,
+ H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Create a second data set with space allocated and fill value written */
+ ret = H5Pset_fill_time(tmp_dcpl_id, H5D_FILL_TIME_IFSET);
+ CHECK(ret, FAIL, "H5Pset_fill_time");
+
+ ret = H5Pset_alloc_time(tmp_dcpl_id, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ dset_id = H5Dcreate2(file_id, dset_name2, dtype1_id, dset_dspace_id, H5P_DEFAULT, tmp_dcpl_id,
+ H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dcreate2");
+
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close temporary DCPL */
+ ret = H5Pclose(tmp_dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end for */
+ } /* end for */
+
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Pclose(dcpl_id);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Change to the custom memory allocation routines for reading VL data */
+ xfer_pid = H5Pcreate(H5P_DATASET_XFER);
+ CHECK(xfer_pid, FAIL, "H5Pcreate");
+
+ ret = H5Pset_vlen_mem_manager(xfer_pid, test_vltypes_alloc_custom, &mem_used, test_vltypes_free_custom,
+ &mem_used);
+ CHECK(ret, FAIL, "H5Pset_vlen_mem_manager");
+
+ /* Open the file to check data set value */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+
+ /* Read empty datasets with different storage layouts */
+ for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) {
+ unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */
+ unsigned test_loop; /* Loop over datasets */
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (layout == H5D_CHUNKED)
+ compress_loop = 2;
+ else
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ compress_loop = 1;
+
+ /* Loop over dataset operations */
+ for (test_loop = 0; test_loop < compress_loop; test_loop++) {
+
+ /* Layout specific actions */
+ switch (layout) {
+ case H5D_COMPACT:
+ HDstrcpy(dset_name1, "dataset1-compact");
+ HDstrcpy(dset_name2, "dataset2-compact");
+ dset_dspace_id = small_dspace_id;
+ dset_select_dspace_id = small_select_dspace_id;
+ dset_elmts = SPACE4_DIM_SMALL;
+ break;
+
+ case H5D_CONTIGUOUS:
+ HDstrcpy(dset_name1, "dataset1-contig");
+ HDstrcpy(dset_name2, "dataset2-contig");
+ dset_dspace_id = large_dspace_id;
+ dset_select_dspace_id = large_select_dspace_id;
+ dset_elmts = SPACE4_DIM_LARGE;
+ break;
+
+ case H5D_CHUNKED:
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (test_loop == 1) {
+ HDstrcpy(dset_name1, "dataset1-chunked-compressed");
+ HDstrcpy(dset_name2, "dataset2-chunked-compressed");
+ } /* end if */
+ else {
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ HDstrcpy(dset_name1, "dataset1-chunked");
+ HDstrcpy(dset_name2, "dataset2-chunked");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ } /* end else */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ dset_dspace_id = large_dspace_id;
+ dset_select_dspace_id = large_select_dspace_id;
+ dset_elmts = SPACE4_DIM_LARGE;
+ break;
+
+ case H5D_VIRTUAL:
+ HDassert(0 && "Invalid layout type!");
+ break;
+
+ case H5D_LAYOUT_ERROR:
+ case H5D_NLAYOUTS:
+ default:
+ HDassert(0 && "Unknown layout type!");
+ break;
+ } /* end switch */
+
+ /* Open first data set */
+ dset_id = H5Dopen2(file_id, dset_name1, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dopen2");
+
+ /* Read in the entire 'empty' dataset of fill value */
+ ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < dset_elmts; i++) {
+ if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 ||
+ rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 ||
+ HDstrcmp(rbuf[i].str_stat, "dead") != 0 || HDstrcmp(rbuf[i].str_form, "liquid") != 0 ||
+ HDstrcmp(rbuf[i].str_unit, "meter") != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end for */
+
+ /* Release the space */
+ ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Clear the read buffer */
+ HDmemset(rbuf, 0, dset_elmts * sizeof(dtype1_struct));
+
+ /* Read in non-contiguous selection from 'empty' dataset of fill value */
+ ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < dset_elmts; i++) {
+ if ((i % 2) == select_offset[0]) {
+ if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 ||
+ rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 ||
+ HDstrcmp(rbuf[i].str_stat, "dead") != 0 ||
+ HDstrcmp(rbuf[i].str_form, "liquid") != 0 ||
+ HDstrcmp(rbuf[i].str_unit, "meter") != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ else {
+ if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig ||
+ rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end else */
+ } /* end for */
+
+ /* Release the space */
+ ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open the second data set to check the value of data */
+ dset_id = H5Dopen2(file_id, dset_name2, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dopen2");
+
+ /* Read in the entire 'empty' dataset of fill value */
+ ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < dset_elmts; i++) {
+ if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 ||
+ rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 ||
+ HDstrcmp(rbuf[i].str_stat, "dead") != 0 || HDstrcmp(rbuf[i].str_form, "liquid") != 0 ||
+ HDstrcmp(rbuf[i].str_unit, "meter") != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end for */
+
+ /* Release the space */
+ ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Clear the read buffer */
+ HDmemset(rbuf, 0, dset_elmts * sizeof(dtype1_struct));
+
+ /* Read in non-contiguous selection from 'empty' dataset of fill value */
+ ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < dset_elmts; i++) {
+ if ((i % 2) == select_offset[0]) {
+ if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 ||
+ rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 ||
+ HDstrcmp(rbuf[i].str_stat, "dead") != 0 ||
+ HDstrcmp(rbuf[i].str_form, "liquid") != 0 ||
+ HDstrcmp(rbuf[i].str_unit, "meter") != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ else {
+ if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig ||
+ rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end else */
+ } /* end for */
+
+ /* Release the space */
+ ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end for */
+ } /* end for */
+
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Open the file to check data set value */
+ file_id = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(file_id, FAIL, "H5Fopen");
+
+ /* Write one element & fill values to datasets with different storage layouts */
+ for (layout = H5D_COMPACT; layout <= H5D_CHUNKED; layout++) {
+ unsigned compress_loop; /* # of times to run loop, for testing compressed chunked dataset */
+ unsigned test_loop; /* Loop over datasets */
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (layout == H5D_CHUNKED)
+ compress_loop = 2;
+ else
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ compress_loop = 1;
+
+ /* Loop over dataset operations */
+ for (test_loop = 0; test_loop < compress_loop; test_loop++) {
+
+ /* Layout specific actions */
+ switch (layout) {
+ case H5D_COMPACT:
+ HDstrcpy(dset_name1, "dataset1-compact");
+ HDstrcpy(dset_name2, "dataset2-compact");
+ dset_dspace_id = small_dspace_id;
+ dset_select_dspace_id = small_select_dspace_id;
+ dset_elmts = SPACE4_DIM_SMALL;
+ break;
+
+ case H5D_CONTIGUOUS:
+ HDstrcpy(dset_name1, "dataset1-contig");
+ HDstrcpy(dset_name2, "dataset2-contig");
+ dset_dspace_id = large_dspace_id;
+ dset_select_dspace_id = large_select_dspace_id;
+ dset_elmts = SPACE4_DIM_LARGE;
+ break;
+
+ case H5D_CHUNKED:
+#ifdef H5_HAVE_FILTER_DEFLATE
+ if (test_loop == 1) {
+ HDstrcpy(dset_name1, "dataset1-chunked-compressed");
+ HDstrcpy(dset_name2, "dataset2-chunked-compressed");
+ } /* end if */
+ else {
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ HDstrcpy(dset_name1, "dataset1-chunked");
+ HDstrcpy(dset_name2, "dataset2-chunked");
+#ifdef H5_HAVE_FILTER_DEFLATE
+ } /* end else */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ dset_dspace_id = large_dspace_id;
+ dset_select_dspace_id = large_select_dspace_id;
+ dset_elmts = SPACE4_DIM_LARGE;
+ break;
+
+ case H5D_VIRTUAL:
+ HDassert(0 && "Invalid layout type!");
+ break;
+
+ case H5D_LAYOUT_ERROR:
+ case H5D_NLAYOUTS:
+ default:
+ HDassert(0 && "Unknown layout type!");
+ break;
+ } /* end switch */
+
+ /* Copy the dataset's dataspace */
+ single_dspace_id = H5Scopy(dset_dspace_id);
+ CHECK(single_dspace_id, FAIL, "H5Scopy");
+
+ /* Set a single element in the dataspace */
+ ret = H5Sselect_hyperslab(single_dspace_id, H5S_SELECT_SET, single_offset, NULL, single_block,
+ NULL);
+ CHECK(ret, FAIL, "H5Sselect_hyperslab");
+
+ /* Open first data set */
+ dset_id = H5Dopen2(file_id, dset_name1, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dopen2");
+
+ /* Write one element in the dataset */
+ ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < dset_elmts; i++) {
+ if (i == single_offset[0]) {
+ if (HDstrcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name ||
+ HDstrcmp(rbuf[i].str_desc, wdata.str_desc) != 0 ||
+ HDstrcmp(rbuf[i].str_orig, wdata.str_orig) != 0 ||
+ HDstrcmp(rbuf[i].str_stat, wdata.str_stat) != 0 ||
+ HDstrcmp(rbuf[i].str_form, wdata.str_form) != 0 ||
+ HDstrcmp(rbuf[i].str_unit, wdata.str_unit) != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ else {
+ if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 ||
+ rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 ||
+ HDstrcmp(rbuf[i].str_stat, "dead") != 0 ||
+ HDstrcmp(rbuf[i].str_form, "liquid") != 0 ||
+ HDstrcmp(rbuf[i].str_unit, "meter") != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Release the space */
+ ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Clear the read buffer */
+ HDmemset(rbuf, 0, dset_elmts * sizeof(dtype1_struct));
+
+ /* Read in non-contiguous selection from dataset */
+ ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < dset_elmts; i++) {
+ if (i == single_offset[0]) {
+ if (HDstrcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name ||
+ HDstrcmp(rbuf[i].str_desc, wdata.str_desc) != 0 ||
+ HDstrcmp(rbuf[i].str_orig, wdata.str_orig) != 0 ||
+ HDstrcmp(rbuf[i].str_stat, wdata.str_stat) != 0 ||
+ HDstrcmp(rbuf[i].str_form, wdata.str_form) != 0 ||
+ HDstrcmp(rbuf[i].str_unit, wdata.str_unit) != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ else {
+ if ((i % 2) == select_offset[0]) {
+ if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 ||
+ rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 ||
+ HDstrcmp(rbuf[i].str_stat, "dead") != 0 ||
+ HDstrcmp(rbuf[i].str_form, "liquid") != 0 ||
+ HDstrcmp(rbuf[i].str_unit, "meter") != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ else {
+ if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig ||
+ rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end else */
+ } /* end else */
+ } /* end for */
+
+ /* Release the space */
+ ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Open the second data set to check the value of data */
+ dset_id = H5Dopen2(file_id, dset_name2, H5P_DEFAULT);
+ CHECK(dset_id, FAIL, "H5Dopen2");
+
+ /* Write one element in the dataset */
+ ret = H5Dwrite(dset_id, dtype1_id, scalar_dspace_id, single_dspace_id, xfer_pid, &wdata);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ ret = H5Dread(dset_id, dtype1_id, dset_dspace_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < dset_elmts; i++) {
+ if (i == single_offset[0]) {
+ if (HDstrcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name ||
+ HDstrcmp(rbuf[i].str_desc, wdata.str_desc) != 0 ||
+ HDstrcmp(rbuf[i].str_orig, wdata.str_orig) != 0 ||
+ HDstrcmp(rbuf[i].str_stat, wdata.str_stat) != 0 ||
+ HDstrcmp(rbuf[i].str_form, wdata.str_form) != 0 ||
+ HDstrcmp(rbuf[i].str_unit, wdata.str_unit) != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ else {
+ if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 ||
+ rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 ||
+ HDstrcmp(rbuf[i].str_stat, "dead") != 0 ||
+ HDstrcmp(rbuf[i].str_form, "liquid") != 0 ||
+ HDstrcmp(rbuf[i].str_unit, "meter") != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Release the space */
+ ret = H5Treclaim(dtype1_id, dset_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ /* Clear the read buffer */
+ HDmemset(rbuf, 0, dset_elmts * sizeof(dtype1_struct));
+
+ /* Read in non-contiguous selection from dataset */
+ ret = H5Dread(dset_id, dtype1_id, dset_select_dspace_id, dset_select_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Compare data read in */
+ for (i = 0; i < dset_elmts; i++) {
+ if (i == single_offset[0]) {
+ if (HDstrcmp(rbuf[i].str_id, wdata.str_id) != 0 || rbuf[i].str_name ||
+ HDstrcmp(rbuf[i].str_desc, wdata.str_desc) != 0 ||
+ HDstrcmp(rbuf[i].str_orig, wdata.str_orig) != 0 ||
+ HDstrcmp(rbuf[i].str_stat, wdata.str_stat) != 0 ||
+ HDstrcmp(rbuf[i].str_form, wdata.str_form) != 0 ||
+ HDstrcmp(rbuf[i].str_unit, wdata.str_unit) != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i)=%d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ else {
+ if ((i % 2) == select_offset[0]) {
+ if (HDstrcmp(rbuf[i].str_id, "foobar") != 0 || HDstrcmp(rbuf[i].str_name, "") != 0 ||
+ rbuf[i].str_desc || HDstrcmp(rbuf[i].str_orig, "\0") != 0 ||
+ HDstrcmp(rbuf[i].str_stat, "dead") != 0 ||
+ HDstrcmp(rbuf[i].str_form, "liquid") != 0 ||
+ HDstrcmp(rbuf[i].str_unit, "meter") != 0) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end if */
+ else {
+ if (rbuf[i].str_id || rbuf[i].str_name || rbuf[i].str_desc || rbuf[i].str_orig ||
+ rbuf[i].str_stat || rbuf[i].str_form || rbuf[i].str_unit) {
+ TestErrPrintf("%d: VL data doesn't match!, index(i) = %d\n", __LINE__, (int)i);
+ continue;
+ } /* end if */
+ } /* end else */
+ } /* end else */
+ } /* end for */
+
+ /* Release the space */
+ ret = H5Treclaim(dtype1_id, dset_select_dspace_id, xfer_pid, rbuf);
+ CHECK(ret, FAIL, "H5Treclaim");
+
+ ret = H5Dclose(dset_id);
+ CHECK(ret, FAIL, "H5Dclose");
+
+ /* Close the dataspace for the writes */
+ ret = H5Sclose(single_dspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+ } /* end for */
+ } /* end for */
+
+ ret = H5Fclose(file_id);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Clean up rest of IDs */
+ ret = H5Pclose(xfer_pid);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Sclose(small_dspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(large_dspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(small_select_dspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(large_select_dspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Sclose(scalar_dspace_id);
+ CHECK(ret, FAIL, "H5Sclose");
+
+ ret = H5Tclose(dtype1_id);
+ CHECK(ret, FAIL, "H5Tclose");
+
+ /* Release buffer */
+ HDfree(rbuf);
+} /* end test_vltypes_fill_value() */
+
+/****************************************************************
+**
+** test_vltypes(): Main VL datatype testing routine.
+**
+****************************************************************/
+void
+test_vltypes(void)
+{
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing Variable-Length Datatypes\n"));
+
+ /* These next tests use the same file */
+ test_vltypes_dataset_create(); /* Check dataset of VL when fill value
+ * won't be rewritten to it.*/
+ test_vltypes_funcs(); /* Test functions with VL types */
+ test_vltypes_vlen_atomic(); /* Test VL atomic datatypes */
+ rewrite_vltypes_vlen_atomic(); /* Check VL memory leak */
+ test_vltypes_vlen_compound(); /* Test VL compound datatypes */
+ rewrite_vltypes_vlen_compound(); /* Check VL memory leak */
+ test_vltypes_compound_vlen_atomic(); /* Test compound datatypes with VL atomic components */
+ rewrite_vltypes_compound_vlen_atomic(); /* Check VL memory leak */
+ test_vltypes_vlen_vlen_atomic(); /* Test VL datatype with VL atomic components */
+ rewrite_longer_vltypes_vlen_vlen_atomic(); /*overwrite with VL data of longer sequence*/
+ rewrite_shorter_vltypes_vlen_vlen_atomic(); /*overwrite with VL data of shorted sequence*/
+ test_vltypes_compound_vlen_vlen(); /* Test compound datatypes with VL atomic components */
+ test_vltypes_compound_vlstr(); /* Test data rewritten of nested VL data */
+ test_vltypes_fill_value(); /* Test fill value for VL data */
+} /* test_vltypes() */
+
+/*-------------------------------------------------------------------------
+ * Function: cleanup_vltypes
+ *
+ * Purpose: Cleanup temporary test files
+ *
+ * Return: none
+ *
+ * Programmer: Quincey Koziol
+ * June 8, 1999
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+cleanup_vltypes(void)
+{
+ H5Fdelete(FILENAME, H5P_DEFAULT);
+}
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index c3365b7..d52beb0 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -395,6 +395,24 @@ if (HDF5_BUILD_UTILS)
set (H5_TESTS ${H5_TESTS} mirror_vfd)
endif ()
+set (HDF5_API_TESTS
+ attribute
+ dataset
+ datatype
+ file
+ group
+ link
+ misc
+ object
+)
+
+if (HDF5_TEST_API_ENABLE_ASYNC)
+ set (HDF5_API_TESTS
+ ${HDF5_API_TESTS}
+ async
+ )
+endif ()
+
macro (ADD_H5_EXE file)
add_executable (${file} ${HDF5_TEST_SOURCE_DIR}/${file}.c)
target_include_directories (${file} PRIVATE "${HDF5_SRC_INCLUDE_DIRS};${HDF5_SRC_BINARY_DIR};${HDF5_TEST_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>")
@@ -879,6 +897,8 @@ if (HDF5_ENABLE_FORMATTERS)
clang_format (HDF5_TEST_use_disable_mdc_flushes_FORMAT use_disable_mdc_flushes)
endif ()
+add_subdirectory (API)
+
if (HDF5_TEST_SERIAL)
include (CMakeTests.cmake)
endif ()
diff --git a/test/h5test.c b/test/h5test.c
index 1797df9..856de4b 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -115,6 +115,13 @@ const char *LIBVER_NAMES[] = {"earliest", /* H5F_LIBVER_EARLIEST = 0 */
/* Previous error reporting function */
static H5E_auto2_t err_func = NULL;
+/* Global variables for testing */
+size_t n_tests_run_g = 0;
+size_t n_tests_passed_g = 0;
+size_t n_tests_failed_g = 0;
+size_t n_tests_skipped_g = 0;
+uint64_t vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+
static herr_t h5_errors(hid_t estack, void *client_data);
static char *h5_fixname_real(const char *base_name, hid_t fapl, const char *_suffix, char *fullname,
size_t size, hbool_t nest_printf, hbool_t subst_for_superblock);
diff --git a/test/h5test.h b/test/h5test.h
index ea7ab4d..b2c2cda 100644
--- a/test/h5test.h
+++ b/test/h5test.h
@@ -106,21 +106,25 @@ H5TEST_DLLVAR MPI_Info h5_io_info_g; /* MPI INFO object for IO */
do { \
HDprintf("Testing %-62s", WHAT); \
HDfflush(stdout); \
+ n_tests_run_g++; \
} while (0)
#define TESTING_2(WHAT) \
do { \
HDprintf(" Testing %-60s", WHAT); \
HDfflush(stdout); \
+ n_tests_run_g++; \
} while (0)
#define PASSED() \
do { \
HDputs(" PASSED"); \
HDfflush(stdout); \
+ n_tests_passed_g++; \
} while (0)
#define H5_FAILED() \
do { \
HDputs("*FAILED*"); \
HDfflush(stdout); \
+ n_tests_failed_g++; \
} while (0)
#define H5_WARNING() \
do { \
@@ -131,6 +135,7 @@ H5TEST_DLLVAR MPI_Info h5_io_info_g; /* MPI INFO object for IO */
do { \
HDputs(" -SKIP-"); \
HDfflush(stdout); \
+ n_tests_skipped_g++; \
} while (0)
#define PUTS_ERROR(s) \
do { \
@@ -164,6 +169,66 @@ H5TEST_DLLVAR MPI_Info h5_io_info_g; /* MPI INFO object for IO */
goto error; \
} while (0)
+/*
+ * Testing macros used for multi-part tests.
+ */
+#define TESTING_MULTIPART(WHAT) \
+ do { \
+ HDprintf("Testing %-62s", WHAT); \
+ HDputs(""); \
+ HDfflush(stdout); \
+ } while (0)
+
+/*
+ * Begin and end an entire section of multi-part tests. By placing all the
+ * parts of a test between these macros, skipping to the 'error' cleanup
+ * section of a test is deferred until all parts have finished.
+ */
+#define BEGIN_MULTIPART \
+ { \
+ int part_nerrors = 0;
+
+#define END_MULTIPART \
+ if (part_nerrors > 0) \
+ goto error; \
+ }
+
+/*
+ * Begin, end and handle errors within a single part of a multi-part test.
+ * The PART_END macro creates a goto label based on the given "part name".
+ * When a failure occurs in the current part, the PART_ERROR macro uses
+ * this label to skip to the next part of the multi-part test. The PART_ERROR
+ * macro also increments the error count so that the END_MULTIPART macro
+ * knows to skip to the test's 'error' label once all test parts have finished.
+ */
+#define PART_BEGIN(part_name) {
+#define PART_END(part_name) \
+ } \
+ part_##part_name##_end:
+#define PART_ERROR(part_name) \
+ do { \
+ n_tests_failed_g++; \
+ part_nerrors++; \
+ goto part_##part_name##_end; \
+ } while (0)
+#define PART_TEST_ERROR(part_name) \
+ do { \
+ H5_FAILED(); \
+ AT(); \
+ part_nerrors++; \
+ goto part_##part_name##_end; \
+ } while (0)
+
+/*
+ * Simply skips to the goto label for this test part and moves on to the
+ * next test part. Useful for when a test part needs to be skipped for
+ * some reason or is currently unimplemented and empty.
+ */
+#define PART_EMPTY(part_name) \
+ do { \
+ goto part_##part_name##_end; \
+ } while (0)
+
/* Number of seconds to wait before killing a test (requires alarm(2)) */
#define H5_ALARM_SEC 1200 /* default is 20 minutes */
@@ -285,7 +350,12 @@ H5TEST_DLL char *getenv_all(MPI_Comm comm, int root, const char *name);
#endif
/* Extern global variables */
-H5TEST_DLLVAR int TestVerbosity;
+H5TEST_DLLVAR int TestVerbosity;
+H5TEST_DLLVAR size_t n_tests_run_g;
+H5TEST_DLLVAR size_t n_tests_passed_g;
+H5TEST_DLLVAR size_t n_tests_failed_g;
+H5TEST_DLLVAR size_t n_tests_skipped_g;
+H5TEST_DLLVAR uint64_t vol_cap_flags_g;
H5TEST_DLL void h5_send_message(const char *file, const char *arg1, const char *arg2);
H5TEST_DLL herr_t h5_wait_message(const char *file);
diff --git a/test/vol.c b/test/vol.c
index 29bbb06..6bcae6b 100644
--- a/test/vol.c
+++ b/test/vol.c
@@ -2076,11 +2076,12 @@ test_async_vol_props(void)
hid_t fapl_id = H5I_INVALID_HID;
hid_t vol_id = H5I_INVALID_HID;
H5VL_pass_through_info_t passthru_info;
- uint64_t cap_flags = H5VL_CAP_FLAG_NONE;
char *conn_env_str = NULL;
TESTING("Async VOL props");
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+
/* Retrieve the file access property for testing */
fapl_id = h5_fileaccess();
@@ -2104,11 +2105,11 @@ test_async_vol_props(void)
/* Test query w/default VOL, which should indicate no async, since native connector
* doesn't support async.
*/
- if (H5Pget_vol_cap_flags(fapl_id, &cap_flags) < 0)
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0)
FAIL_STACK_ERROR;
- if ((cap_flags & H5VL_CAP_FLAG_ASYNC) > 0)
+ if ((vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC) > 0)
TEST_ERROR;
- if ((cap_flags & H5VL_CAP_FLAG_NATIVE_FILES) == 0)
+ if ((vol_cap_flags_g & H5VL_CAP_FLAG_NATIVE_FILES) == 0)
TEST_ERROR;
/* Close FAPL */
@@ -2129,12 +2130,12 @@ test_async_vol_props(void)
fapl_id = h5_fileaccess();
/* Test query w/fake async VOL, which should succeed */
- cap_flags = H5VL_CAP_FLAG_NONE;
- if (H5Pget_vol_cap_flags(fapl_id, &cap_flags) < 0)
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0)
FAIL_STACK_ERROR;
- if ((cap_flags & H5VL_CAP_FLAG_ASYNC) == 0)
+ if ((vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC) == 0)
TEST_ERROR;
- if ((cap_flags & H5VL_CAP_FLAG_NATIVE_FILES) > 0)
+ if ((vol_cap_flags_g & H5VL_CAP_FLAG_NATIVE_FILES) > 0)
TEST_ERROR;
/* Reset environment variable & re-init default connector */
@@ -2155,12 +2156,12 @@ test_async_vol_props(void)
FAIL_STACK_ERROR;
/* Test query w/fake async VOL, which should succeed */
- cap_flags = H5VL_CAP_FLAG_NONE;
- if (H5Pget_vol_cap_flags(fapl_id, &cap_flags) < 0)
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0)
FAIL_STACK_ERROR;
- if ((cap_flags & H5VL_CAP_FLAG_ASYNC) == 0)
+ if ((vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC) == 0)
TEST_ERROR;
- if ((cap_flags & H5VL_CAP_FLAG_NATIVE_FILES) > 0)
+ if ((vol_cap_flags_g & H5VL_CAP_FLAG_NATIVE_FILES) > 0)
TEST_ERROR;
/* Stack the [internal] passthrough VOL connector on top of the fake async connector */
@@ -2170,12 +2171,12 @@ test_async_vol_props(void)
FAIL_STACK_ERROR;
/* Test query w/passthru -> fake async VOL, which should succeed */
- cap_flags = H5VL_CAP_FLAG_NONE;
- if (H5Pget_vol_cap_flags(fapl_id, &cap_flags) < 0)
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0)
FAIL_STACK_ERROR;
- if ((cap_flags & H5VL_CAP_FLAG_ASYNC) == 0)
+ if ((vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC) == 0)
TEST_ERROR;
- if ((cap_flags & H5VL_CAP_FLAG_NATIVE_FILES) > 0)
+ if ((vol_cap_flags_g & H5VL_CAP_FLAG_NATIVE_FILES) > 0)
TEST_ERROR;
/* Unregister the fake async VOL ID */
@@ -2224,14 +2225,15 @@ error:
static herr_t
test_vol_cap_flags(void)
{
- hid_t fapl_id = H5I_INVALID_HID;
- hid_t vol_id = H5I_INVALID_HID;
- uint64_t vol_cap_flags = H5VL_CAP_FLAG_NONE;
- char *vol_env = NULL;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t vol_id = H5I_INVALID_HID;
+ char *vol_env = NULL;
H5VL_pass_through_info_t passthru_info;
TESTING("VOL capability flags");
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+
/* Register a fake VOL */
if ((vol_id = H5VLregister_connector(&fake_vol_g, H5P_DEFAULT)) < 0)
TEST_ERROR;
@@ -2243,13 +2245,13 @@ test_vol_cap_flags(void)
TEST_ERROR;
/* Verify the correctness of the VOL capacity flags */
- if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags) < 0)
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0)
TEST_ERROR;
- if (!(vol_cap_flags & H5VL_CAP_FLAG_FILE_BASIC))
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC))
TEST_ERROR;
- if (vol_cap_flags & H5VL_CAP_FLAG_ATTR_BASIC)
+ if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)
TEST_ERROR;
/* If using the native VOL by default, check flags again with H5P_DEFAULT */
@@ -2263,12 +2265,12 @@ test_vol_cap_flags(void)
if (NULL == (cls = H5I_object(connector_id)))
TEST_ERROR;
- vol_cap_flags = H5VL_CAP_FLAG_NONE;
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
- if (H5Pget_vol_cap_flags(H5P_DEFAULT, &vol_cap_flags) < 0)
+ if (H5Pget_vol_cap_flags(H5P_DEFAULT, &vol_cap_flags_g) < 0)
TEST_ERROR;
- if (vol_cap_flags != cls->cap_flags)
+ if (vol_cap_flags_g != cls->cap_flags)
TEST_ERROR;
if (H5VLclose(connector_id) < 0)
@@ -2283,15 +2285,15 @@ test_vol_cap_flags(void)
FAIL_STACK_ERROR;
/* Verify the correctness of the VOL capacity flags */
- vol_cap_flags = H5VL_CAP_FLAG_NONE;
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
- if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags) < 0)
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0)
TEST_ERROR;
- if (!(vol_cap_flags & H5VL_CAP_FLAG_FILE_BASIC))
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC))
TEST_ERROR;
- if (vol_cap_flags & H5VL_CAP_FLAG_ATTR_BASIC)
+ if (vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)
TEST_ERROR;
if (H5Pclose(fapl_id) < 0)
diff --git a/testpar/API/CMakeLists.txt b/testpar/API/CMakeLists.txt
new file mode 100644
index 0000000..5eb69c4
--- /dev/null
+++ b/testpar/API/CMakeLists.txt
@@ -0,0 +1,279 @@
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://www.hdfgroup.org/licenses.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+#------------------------------------------------------------------------------
+# Set module path
+#------------------------------------------------------------------------------
+set(HDF5_TEST_API_CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/CMake")
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} ${HDF5_TEST_API_CMAKE_MODULE_PATH})
+
+#------------------------------------------------------------------------------
+# Setup for API tests
+#------------------------------------------------------------------------------
+
+# Ported HDF5 tests
+set (HDF5_API_PAR_TESTS_EXTRA
+ t_bigio
+ t_pshutdown
+ t_shapesame
+ testphdf5
+)
+
+# List of files generated by the HDF5 API tests which
+# should be cleaned up in case the test failed to remove
+# them
+set (HDF5_API_PAR_TESTS_FILES
+ H5_api_test_parallel.h5
+ H5_api_async_test_parallel.h5
+ H5_api_async_test_parallel_0.h5
+ H5_api_async_test_parallel_1.h5
+ H5_api_async_test_parallel_2.h5
+ H5_api_async_test_parallel_3.h5
+ H5_api_async_test_parallel_4.h5
+ test_file_parallel.h5
+ split_comm_file.h5
+)
+
+#-----------------------------------------------------------------------------
+# Build the main API test executable
+#-----------------------------------------------------------------------------
+foreach (api_test ${HDF5_API_TESTS})
+ set (HDF5_API_PAR_TEST_SRCS
+ ${HDF5_API_PAR_TEST_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_${api_test}_test_parallel.c
+ )
+endforeach ()
+
+set (HDF5_API_PAR_TEST_SRCS
+ ${HDF5_API_PAR_TEST_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/H5_api_test_parallel.c
+ ${HDF5_TEST_API_SRC_DIR}/H5_api_test_util.c
+)
+
+add_executable (h5_api_test_parallel ${HDF5_API_PAR_TEST_SRCS})
+target_include_directories (
+ h5_api_test_parallel
+ PRIVATE
+ "${HDF5_SRC_INCLUDE_DIRS}"
+ "${HDF5_TEST_PAR_DIR}"
+ "${HDF5_TEST_API_SRC_DIR}"
+ "${HDF5_TEST_API_PAR_SRC_DIR}"
+ "${HDF5_SRC_BINARY_DIR}"
+ "${HDF5_TEST_BINARY_DIR}"
+ "${HDF5_TEST_API_SRC_DIR}"
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+)
+target_compile_options (
+ h5_api_test_parallel
+ PRIVATE
+ "${HDF5_CMAKE_C_FLAGS}"
+)
+target_compile_definitions (
+ h5_api_test_parallel
+ PRIVATE
+ $<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}>
+)
+if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (h5_api_test_parallel STATIC)
+ target_link_libraries (
+ h5_api_test_parallel
+ PRIVATE
+ ${HDF5_TEST_LIB_TARGET}
+ ${HDF5_LIB_TARGET}
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
+ )
+else ()
+ TARGET_C_PROPERTIES (h5_api_test_parallel SHARED)
+ target_link_libraries (
+ h5_api_test_parallel
+ PRIVATE
+ ${HDF5_TEST_LIBSH_TARGET}
+ ${HDF5_LIBSH_TARGET}
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
+ )
+endif ()
+set_target_properties (
+ h5_api_test_parallel
+ PROPERTIES
+ FOLDER test/par/API
+)
+# Add Target to clang-format
+if (HDF5_ENABLE_FORMATTERS)
+ clang_format (HDF5_TEST_h5_api_test_parallel_FORMAT h5_api_test_parallel)
+endif ()
+
+#-----------------------------------------------------------------------------
+# Build the ported HDF5 test executables
+#-----------------------------------------------------------------------------
+foreach (api_test_extra ${HDF5_API_PAR_TESTS_EXTRA})
+ unset (HDF5_API_PAR_TEST_EXTRA_SRCS)
+
+ set (HDF5_API_PAR_TEST_EXTRA_SRCS
+ ${HDF5_API_PAR_TEST_EXTRA_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/${api_test_extra}.c
+ )
+
+ if (${api_test_extra} STREQUAL "testphdf5")
+ set (HDF5_API_PAR_TEST_EXTRA_SRCS
+ ${HDF5_API_PAR_TEST_EXTRA_SRCS}
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_ph5basic.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_file.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_dset.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_mdset.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_chunk.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_span_tree.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_prop.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_file_image.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_coll_md_read.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_chunk_alloc.c
+ ${CMAKE_CURRENT_SOURCE_DIR}/t_filter_read.c
+ )
+ endif ()
+
+ add_executable (h5_api_test_parallel_${api_test_extra} ${HDF5_API_PAR_TEST_EXTRA_SRCS})
+ target_include_directories (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ "${HDF5_SRC_INCLUDE_DIRS}"
+ "${HDF5_TEST_PAR_DIR}"
+ "${HDF5_TEST_API_SRC_DIR}"
+ "${HDF5_TEST_API_PAR_SRC_DIR}"
+ "${HDF5_SRC_BINARY_DIR}"
+ "${HDF5_TEST_BINARY_DIR}"
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+ )
+ target_compile_options (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ "${HDF5_CMAKE_C_FLAGS}"
+ )
+ target_compile_definitions (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ $<$<CONFIG:Developer>:${HDF5_DEVELOPER_DEFS}>
+ )
+ if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} STATIC)
+ target_link_libraries (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ ${HDF5_TEST_LIB_TARGET}
+ ${HDF5_LIB_TARGET}
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
+ )
+ else ()
+ TARGET_C_PROPERTIES (h5_api_test_parallel_${api_test_extra} SHARED)
+ target_link_libraries (
+ h5_api_test_parallel_${api_test_extra}
+ PRIVATE
+ ${HDF5_TEST_LIBSH_TARGET}
+ ${HDF5_LIBSH_TARGET}
+ "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:MPI::MPI_C>"
+ )
+ endif ()
+ set_target_properties (
+ h5_api_test_parallel_${api_test_extra}
+ PROPERTIES
+ FOLDER test/par/API
+ )
+ # Add Target to clang-format
+ if (HDF5_ENABLE_FORMATTERS)
+ clang_format (HDF5_TEST_h5_api_test_parallel_${api_test_extra}_FORMAT h5_api_test_parallel_${api_test_extra})
+ endif ()
+endforeach ()
+
+#-----------------------------------------------------------------------------
+# Add tests if HDF5 parallel testing is enabled
+#-----------------------------------------------------------------------------
+if (HDF5_TEST_PARALLEL)
+ if (HDF5_TEST_API_ENABLE_DRIVER)
+ if ("${HDF5_TEST_API_SERVER}" STREQUAL "")
+ message (FATAL_ERROR "Please set HDF5_TEST_API_SERVER to point to a server executable for the test driver program.")
+ endif ()
+
+ # Driver options
+ if (HDF5_TEST_API_SERVER_ALLOW_ERRORS)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS --allow-server-errors)
+ endif ()
+ if (HDF5_TEST_API_CLIENT_HELPER)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ --client-helper ${HDF5_TEST_API_CLIENT_HELPER}
+ )
+ endif ()
+ if (HDF5_TEST_API_CLIENT_INIT)
+ set (HDF5_TEST_API_DRIVER_EXTRA_FLAGS ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ --client-init ${HDF5_TEST_API_CLIENT_INIT}
+ )
+ endif ()
+
+ set(last_api_test "")
+ foreach (api_test ${HDF5_API_TESTS})
+ add_test (
+ NAME "h5_api_test_parallel_${api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:h5_api_test_parallel> "${api_test}"
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+
+ set_tests_properties("h5_api_test_parallel_${api_test}" PROPERTIES DEPENDS "${last_api_test}")
+
+ set(last_api_test "h5_api_test_parallel_${api_test}")
+ endforeach ()
+
+ foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
+ add_test (
+ NAME "h5_api_test_parallel_${hdf5_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:h5_api_test_parallel_${hdf5_test}>
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ endforeach ()
+
+ # Hook external tests to same test suite
+ foreach (ext_api_test ${HDF5_API_EXT_PARALLEL_TESTS})
+ add_test (
+ NAME "h5_api_ext_test_parallel_${ext_api_test}"
+ COMMAND $<TARGET_FILE:h5_api_test_driver>
+ --server ${HDF5_TEST_API_SERVER}
+ --client $<TARGET_FILE:${ext_api_test}>
+ --serial
+ ${HDF5_TEST_API_DRIVER_EXTRA_FLAGS}
+ )
+ endforeach ()
+ else ()
+ set(last_api_test "")
+ foreach (api_test ${HDF5_API_TESTS})
+ add_test (
+ NAME "h5_api_test_parallel_${api_test}"
+ COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
+ ${MPIEXEC_PREFLAGS} $<TARGET_FILE:h5_api_test_parallel> "${api_test}"
+ ${MPIEXEC_POSTFLAGS}
+ )
+
+ set_tests_properties("h5_api_test_parallel_${api_test}" PROPERTIES DEPENDS "${last_api_test}")
+
+ set(last_api_test "h5_api_test_parallel_${api_test}")
+ endforeach ()
+
+ foreach (hdf5_test ${HDF5_API_PAR_TESTS_EXTRA})
+ add_test (
+ NAME "h5_api_test_parallel_${hdf5_test}"
+ COMMAND ${MPIEXEC} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS}
+ ${MPIEXEC_PREFLAGS} $<TARGET_FILE:h5_api_test_parallel_${hdf5_test}>
+ ${MPIEXEC_POSTFLAGS}
+ )
+ endforeach ()
+ endif ()
+endif ()
diff --git a/testpar/API/H5_api_async_test_parallel.c b/testpar/API/H5_api_async_test_parallel.c
new file mode 100644
index 0000000..dcb5e8d
--- /dev/null
+++ b/testpar/API/H5_api_async_test_parallel.c
@@ -0,0 +1,3668 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_async_test_parallel.h"
+
+#ifdef H5ESpublic_H
+
+static int test_one_dataset_io(void);
+static int test_multi_dataset_io(void);
+static int test_multi_file_dataset_io(void);
+static int test_multi_file_grp_dset_io(void);
+static int test_set_extent(void);
+static int test_attribute_exists(void);
+static int test_attribute_io(void);
+static int test_attribute_io_tconv(void);
+static int test_attribute_io_compound(void);
+static int test_group(void);
+static int test_link(void);
+static int test_ocopy_orefresh(void);
+static int test_file_reopen(void);
+
+/*
+ * The array of parallel async tests to be performed.
+ */
+static int (*par_async_tests[])(void) = {
+ test_one_dataset_io,
+ test_multi_dataset_io,
+ test_multi_file_dataset_io,
+ test_multi_file_grp_dset_io,
+ test_set_extent,
+ test_attribute_exists,
+ test_attribute_io,
+ test_attribute_io_tconv,
+ test_attribute_io_compound,
+ test_group,
+ test_link,
+ test_ocopy_orefresh,
+ test_file_reopen,
+};
+
+hbool_t coll_metadata_read = TRUE;
+
+/* Highest "printf" file created (starting at 0) */
+int max_printf_file = -1;
+
+/*
+ * Create file and dataset. Each rank writes to a portion
+ * of the dataset.
+ */
+#define ONE_DATASET_IO_TEST_SPACE_RANK 2
+static int
+test_one_dataset_io(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t start[ONE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t stride[ONE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t count[ONE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t block[ONE_DATASET_IO_TEST_SPACE_RANK];
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t i, data_size, num_in_progress;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING_MULTIPART("single dataset I/O")
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ONE_DATASET_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((space_id = H5Screate_simple(ONE_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < ONE_DATASET_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < ONE_DATASET_IO_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(single_dset_eswait)
+ {
+ TESTING_2("synchronization using H5ESwait()");
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) <
+ 0)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_eswait);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_eswait);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_eswait);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_eswait);
+
+ PART_BEGIN(single_dset_dclose)
+ {
+ TESTING_2("synchronization using H5Dclose()");
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = (int)i;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) <
+ 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Close the dataset synchronously */
+ if (H5Dclose(dset_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Close the dataset synchronously */
+ if (H5Dclose(dset_id) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_dclose);
+ } /* end if */
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_dclose);
+
+ PASSED();
+ }
+ PART_END(single_dset_dclose);
+
+ PART_BEGIN(single_dset_dflush)
+ {
+ TESTING_2("synchronization using H5Oflush_async()");
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = 10 * (int)i;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) <
+ 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. Skip this
+ * function because it isn't supported for the native vol in parallel. */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(single_dset_dflush);
+ if (op_failed)
+ PART_TEST_ERROR(single_dset_dflush);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_dflush);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_dflush);
+
+ PART_BEGIN(single_dset_fclose)
+ {
+ TESTING_2("synchronization using H5Fclose()");
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = (int)i + 5;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) <
+ 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the file synchronously */
+ if (H5Fclose(file_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Reopen the file asynchronously. */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDONLY, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Re-open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Close the file synchronously */
+ if (H5Fclose(file_id) < 0)
+ PART_TEST_ERROR(single_dset_fclose);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(single_dset_fclose);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(single_dset_fclose);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ H5Dclose(dset_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef ONE_DATASET_IO_TEST_SPACE_RANK
+
+/*
+ * Create file and multiple datasets. Each rank writes to a
+ * portion of each dataset and reads back their portion of
+ * each dataset.
+ */
+#define MULTI_DATASET_IO_TEST_SPACE_RANK 2
+#define MULTI_DATASET_IO_TEST_NDSETS 5
+static int
+test_multi_dataset_io(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t start[MULTI_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t stride[MULTI_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t count[MULTI_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t block[MULTI_DATASET_IO_TEST_SPACE_RANK];
+ hbool_t op_failed;
+ size_t i, j, data_size, num_in_progress;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id[MULTI_DATASET_IO_TEST_NDSETS] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
+ H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ char dset_name[32];
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING_MULTIPART("multi dataset I/O")
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(MULTI_DATASET_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(MULTI_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < MULTI_DATASET_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+ data_size *= MULTI_DATASET_IO_TEST_NDSETS;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_dset_open)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("keeping datasets open");
+
+ /* Loop over datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) {
+ size_t buf_end_idx;
+
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", (int)i);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id[i] = H5Dcreate_async(file_id, dset_name, H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ /* Initialize write_buf. Must use a new slice of write_buf for
+ * each dset since we can't overwrite the buffers until I/O is done. */
+ buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+ } /* end for */
+
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ /* Loop over datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) {
+ buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+
+ /* Read the dataset asynchronously */
+ if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+ if (op_failed)
+ PART_TEST_ERROR(multi_dset_open);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_dset_open);
+ } /* end if */
+
+ /* Close the datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++)
+ if (H5Dclose(dset_id[i]) < 0)
+ PART_TEST_ERROR(multi_dset_open);
+
+ PASSED();
+ }
+ PART_END(multi_dset_open);
+
+ PART_BEGIN(multi_dset_close)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("closing datasets between I/O");
+
+ /* Loop over datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) {
+ size_t buf_end_idx;
+
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", (int)i);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Initialize write_buf. */
+ buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ } /* end for */
+
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Loop over datasets */
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++) {
+ /* Set dataset name */
+ sprintf(dset_name, "dset%d", (int)i);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id, dset_name, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_DATASET_IO_TEST_NDSETS / sizeof(int));
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_dset_close);
+ if (op_failed)
+ PART_TEST_ERROR(multi_dset_close);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_dset_close);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_dset_close);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ for (i = 0; i < MULTI_DATASET_IO_TEST_NDSETS; i++)
+ H5Dclose(dset_id[i]);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef MULTI_DATASET_IO_TEST_SPACE_RANK
+#undef MULTI_DATASET_IO_TEST_NDSETS
+
+/*
+ * Create multiple files, each with a single dataset. Each rank writes
+ * to a portion of each dataset and reads from a portion of each dataset.
+ */
+#define MULTI_FILE_DATASET_IO_TEST_SPACE_RANK 2
+#define MULTI_FILE_DATASET_IO_TEST_NFILES 5
+static int
+test_multi_file_dataset_io(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t start[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t stride[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t count[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
+ hsize_t block[MULTI_FILE_DATASET_IO_TEST_SPACE_RANK];
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t i, j, data_size, num_in_progress;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t file_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
+ H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t dset_id[MULTI_FILE_DATASET_IO_TEST_NFILES] = {H5I_INVALID_HID, H5I_INVALID_HID, H5I_INVALID_HID,
+ H5I_INVALID_HID, H5I_INVALID_HID};
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ char file_name[32];
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING_MULTIPART("multi file dataset I/O")
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, dataset, or flush aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(MULTI_FILE_DATASET_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(MULTI_FILE_DATASET_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < MULTI_FILE_DATASET_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+ data_size *= MULTI_FILE_DATASET_IO_TEST_NFILES;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_file_dset_open)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("keeping files and datasets open");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Create file asynchronously */
+ if ((file_id[i] = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ if ((int)i > max_printf_file)
+ max_printf_file = (int)i;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id[i] = H5Dcreate_async(file_id[i], "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Initialize write_buf. Must use a new slice of write_buf for
+ * each dset since we can't overwrite the buffers until I/O is done. */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ } /* end for */
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id[0], &is_native_vol) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. Skip this
+ * function because it isn't supported for the native vol in parallel. */
+ if (!is_native_vol && H5Oflush_async(dset_id[i], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id[i], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_open);
+ } /* end if */
+
+ /* Close the datasets */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++)
+ if (H5Dclose(dset_id[i]) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_open);
+
+ PART_BEGIN(multi_file_dset_dclose)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("closing datasets between I/O");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Initialize write_buf. */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank * 10;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ } /* end for */
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ /* Flush the file asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id[i], H5F_SCOPE_LOCAL, es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_open);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[i], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_dclose);
+ } /* end if */
+
+ /* Close the files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++)
+ if (H5Fclose(file_id[i]) < 0)
+ PART_TEST_ERROR(multi_file_dset_dclose);
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_dclose);
+
+ PART_BEGIN(multi_file_dset_fclose)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("closing files between I/O");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Open the file asynchronously */
+ if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Initialize write_buf. */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank + 5;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Open the file asynchronously */
+ if ((file_id[0] = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id[0] = H5Dopen_async(file_id[0], "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_DATASET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id[0], H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id[0], es_id) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_dset_fclose);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_dset_fclose);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_dset_fclose);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ for (i = 0; i < MULTI_FILE_DATASET_IO_TEST_NFILES; i++) {
+ H5Dclose(dset_id[i]);
+ H5Fclose(file_id[i]);
+ }
+ H5Pclose(fapl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef MULTI_FILE_DATASET_IO_TEST_SPACE_RANK
+#undef MULTI_FILE_DATASET_IO_TEST_NFILES
+
+/*
+ * Create multiple files, each with a single group and dataset. Each rank
+ * writes to a portion of each dataset and reads from a portion of each dataset.
+ */
+#define MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK 2
+#define MULTI_FILE_GRP_DSET_IO_TEST_NFILES 5
+static int
+test_multi_file_grp_dset_io(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t start[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
+ hsize_t stride[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
+ hsize_t count[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
+ hsize_t block[MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK];
+ hbool_t op_failed;
+ size_t i, j, data_size, num_in_progress;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t grp_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ char file_name[32];
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING_MULTIPART("multi file dataset I/O with groups")
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+ data_size *= MULTI_FILE_GRP_DSET_IO_TEST_NFILES;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(multi_file_grp_dset_no_kick)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("without intermediate calls to H5ESwait()");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if ((int)i > max_printf_file)
+ max_printf_file = (int)i;
+
+ /* Create the group asynchronously */
+ if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Initialize write_buf. Must use a new slice of write_buf for
+ * each dset since we can't overwrite the buffers until I/O is done. */
+ buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) {
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Open the file asynchronously */
+ if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Open the group asynchronously */
+ if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_no_kick);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_grp_dset_no_kick);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_grp_dset_no_kick);
+
+ PART_BEGIN(multi_file_grp_dset_kick)
+ {
+ size_t buf_start_idx;
+
+ TESTING_2("with intermediate calls to H5ESwait() (0 timeout)");
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) {
+ size_t buf_end_idx;
+
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(file_name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if ((int)i > max_printf_file)
+ max_printf_file = (int)i;
+
+ /* Create the group asynchronously */
+ if ((grp_id = H5Gcreate_async(file_id, "grp", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(grp_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Initialize write_buf. Must use a new slice of write_buf for
+ * each dset since we can't overwrite the buffers until I/O is done. */
+ buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ buf_end_idx = buf_start_idx + (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ for (j = buf_start_idx; j < buf_end_idx; j++)
+ ((int *)write_buf)[j] = mpi_rank;
+
+ /* Write the dataset asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &write_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Kick the event stack to make progress */
+ if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Loop over files */
+ for (i = 0; i < MULTI_FILE_GRP_DSET_IO_TEST_NFILES; i++) {
+ /* Set file name */
+ sprintf(file_name, PAR_ASYNC_API_TEST_FILE_PRINTF, (int)i);
+
+ /* Open the file asynchronously */
+ if ((file_id = H5Fopen_async(file_name, H5F_ACC_RDONLY, fapl_id, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Open the group asynchronously */
+ if ((grp_id = H5Gopen_async(file_id, "grp", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Open the dataset asynchronously */
+ if ((dset_id = H5Dopen_async(grp_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Read the dataset asynchronously */
+ buf_start_idx = i * (data_size / MULTI_FILE_GRP_DSET_IO_TEST_NFILES / sizeof(int));
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT,
+ &read_buf[buf_start_idx], es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the group asynchronously */
+ if (H5Gclose_async(grp_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Close the file asynchronously */
+ if (H5Fclose_async(file_id, es_id) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Kick the event stack to make progress */
+ if (H5ESwait(es_id, 0, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ } /* end for */
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+ if (op_failed)
+ PART_TEST_ERROR(multi_file_grp_dset_kick);
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ PART_ERROR(multi_file_grp_dset_kick);
+ } /* end if */
+
+ PASSED();
+ }
+ PART_END(multi_file_grp_dset_kick);
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(grp_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef MULTI_FILE_GRP_DSET_IO_TEST_SPACE_RANK
+#undef MULTI_FILE_GRP_DSET_IO_TEST_NFILES
+
+/*
+ * Creates a single file and dataset, then each rank writes to a portion
+ * of the dataset. Next, the dataset is continually extended in the first
+ * dimension by 1 "row" per mpi rank and partially written to by each rank.
+ * Finally, each rank reads from a portion of the dataset.
+ */
+#define SET_EXTENT_TEST_SPACE_RANK 2
+#define SET_EXTENT_TEST_NUM_EXTENDS 6
+static int
+test_set_extent(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *maxdims = NULL;
+ hsize_t *cdims = NULL;
+ hsize_t start[SET_EXTENT_TEST_SPACE_RANK];
+ hsize_t stride[SET_EXTENT_TEST_SPACE_RANK];
+ hsize_t count[SET_EXTENT_TEST_SPACE_RANK];
+ hsize_t block[SET_EXTENT_TEST_SPACE_RANK];
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t i, j, data_size, num_in_progress;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t space_id_out = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ htri_t tri_ret;
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING("extending dataset");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, or flush aren't supported "
+ "with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(SET_EXTENT_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if (NULL == (maxdims = HDmalloc(SET_EXTENT_TEST_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate max dataspace dimension buffer\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (cdims = HDmalloc(SET_EXTENT_TEST_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate chunk dimension buffer\n");
+ TEST_ERROR;
+ }
+
+ for (i = 0; i < SET_EXTENT_TEST_SPACE_RANK; i++) {
+ maxdims[i] = (i == 0) ? dims[i] + (hsize_t)(SET_EXTENT_TEST_NUM_EXTENDS * mpi_size) : dims[i];
+ cdims[i] = (dims[i] == 1) ? 1 : dims[i] / 2;
+ }
+
+ /* Create file dataspace */
+ if ((space_id = H5Screate_simple(SET_EXTENT_TEST_SPACE_RANK, dims, maxdims)) < 0)
+ TEST_ERROR;
+
+ /* Create DCPL */
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Set chunking */
+ if (H5Pset_chunk(dcpl_id, SET_EXTENT_TEST_SPACE_RANK, cdims) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Create file asynchronously */
+ if ((file_id = H5Fcreate_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers - first dimension is skipped in calculation */
+ for (i = 1, data_size = 1; i < SET_EXTENT_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+ data_size *= SET_EXTENT_TEST_NUM_EXTENDS;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ TEST_ERROR;
+ }
+
+ /* Select this rank's portion of the dataspace */
+ for (i = 0; i < SET_EXTENT_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Setup memory space for write_buf */
+ {
+ hsize_t mdims[] = {data_size / SET_EXTENT_TEST_NUM_EXTENDS / sizeof(int)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ /* Initialize write_buf */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ /* Extend the dataset in the first dimension n times, extending by 1 "row" per
+ * mpi rank involved on each iteration. Each rank will claim one of the new
+ * "rows" for I/O in an interleaved fashion. */
+ for (i = 0; i < SET_EXTENT_TEST_NUM_EXTENDS; i++) {
+ /* No need to extend on the first iteration */
+ if (i) {
+ /* Extend datapace */
+ dims[0] += (hsize_t)mpi_size;
+ if (H5Sset_extent_simple(space_id, SET_EXTENT_TEST_SPACE_RANK, dims, maxdims) < 0)
+ TEST_ERROR;
+
+ /* Extend dataset asynchronously */
+ if (H5Dset_extent_async(dset_id, dims, es_id) < 0)
+ TEST_ERROR;
+
+ /* Select hyperslab in file space to match new region */
+ for (j = 0; j < SET_EXTENT_TEST_SPACE_RANK; j++) {
+ if (j == 0) {
+ start[j] = (hsize_t)mpi_rank;
+ block[j] = 1;
+ stride[j] = (hsize_t)mpi_size;
+ count[j] = i + 1;
+ }
+ else {
+ start[j] = 0;
+ block[j] = dims[j];
+ stride[j] = 1;
+ count[j] = 1;
+ }
+ }
+
+ if (H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ /* Adjust memory dataspace to match as well */
+ {
+ hsize_t mdims[] = {(i + 1) * (data_size / SET_EXTENT_TEST_NUM_EXTENDS / sizeof(int))};
+
+ if (H5Sset_extent_simple(mspace_id, 1, mdims, NULL) < 0)
+ TEST_ERROR;
+
+ if (H5Sselect_all(mspace_id) < 0)
+ TEST_ERROR;
+ }
+ } /* end if */
+
+ /* Get dataset dataspace */
+ if ((space_id_out = H5Dget_space_async(dset_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Verify extent is correct */
+ if ((tri_ret = H5Sextent_equal(space_id, space_id_out)) < 0)
+ TEST_ERROR;
+ if (!tri_ret)
+ FAIL_PUTS_ERROR(" dataspaces are not equal\n");
+
+ /* Close output dataspace */
+ if (H5Sclose(space_id_out) < 0)
+ TEST_ERROR;
+
+ /* Write the dataset slice asynchronously */
+ if (H5Dwrite_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, write_buf, es_id) < 0)
+ TEST_ERROR;
+ }
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. Skip this
+ * function because it isn't supported for the native vol in parallel. */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the entire dataset asynchronously */
+ if (H5Dread_async(dset_id, H5T_NATIVE_INT, mspace_id, space_id, H5P_DEFAULT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed, expected %d but got %d\n", write_buf[i], read_buf[i]);
+ goto error;
+ } /* end if */
+
+ /* Close dataset asynchronously */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open dataset asynchronously */
+ if ((dset_id = H5Dopen_async(file_id, "dset", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Get dataset dataspace asynchronously */
+ if ((space_id_out = H5Dget_space_async(dset_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Verify the extents match */
+ if ((tri_ret = H5Sextent_equal(space_id, space_id_out)) < 0)
+ TEST_ERROR;
+ if (!tri_ret)
+ FAIL_PUTS_ERROR(" dataspaces are not equal\n");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (cdims) {
+ HDfree(cdims);
+ cdims = NULL;
+ }
+
+ if (maxdims) {
+ HDfree(maxdims);
+ maxdims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(dcpl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (cdims)
+ HDfree(cdims);
+ if (maxdims)
+ HDfree(maxdims);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Sclose(mspace_id);
+ H5Sclose(space_id_out);
+ H5Dclose(dset_id);
+ H5Pclose(dcpl_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef SET_EXTENT_TEST_SPACE_RANK
+#undef SET_EXTENT_TEST_NUM_EXTENDS
+
+/*
+ * Creates an attribute on a dataset. All ranks check to see
+ * if the attribute exists before and after creating the
+ * attribute on the dataset.
+ */
+#define ATTRIBUTE_EXISTS_TEST_SPACE_RANK 2
+static int
+test_attribute_exists(void)
+{
+ hsize_t *dims = NULL;
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t num_in_progress;
+ hbool_t exists1 = false;
+ hbool_t exists2 = false;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+
+ TESTING("H5Aexists()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't "
+ "supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ATTRIBUTE_EXISTS_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(ATTRIBUTE_EXISTS_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "attr_exists_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Check if the attribute exists asynchronously */
+ if (H5Aexists_async(dset_id, "attr", &exists1, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the create takes place after the existence check.
+ * Skip this function because it isn't supported for the native vol in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously */
+ if ((attr_id =
+ H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the existence check takes place after the create.
+ * Skip this function because it isn't supported for the native vol in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if the attribute exists asynchronously */
+ if (H5Aexists_async(dset_id, "attr", &exists2, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Check if H5Aexists returned the correct values */
+ if (exists1)
+ FAIL_PUTS_ERROR(" H5Aexists returned TRUE for an attribute that should not exist")
+ if (!exists2)
+ FAIL_PUTS_ERROR(" H5Aexists returned FALSE for an attribute that should exist")
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef ATTRIBUTE_EXISTS_TEST_SPACE_RANK
+
+/*
+ * Creates a file, dataset and attribute. Each rank writes to
+ * the attribute. Then, each rank reads the attribute and
+ * verifies the data is correct.
+ */
+#define ATTRIBUTE_IO_TEST_SPACE_RANK 2
+static int
+test_attribute_io(void)
+{
+ hsize_t *dims = NULL;
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+ size_t num_in_progress;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING("attribute I/O");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't "
+ "supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ATTRIBUTE_IO_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(ATTRIBUTE_IO_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the dataset asynchronously */
+ if ((dset_id = H5Dcreate_async(file_id, "attr_dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously */
+ if ((attr_id =
+ H5Acreate_async(dset_id, "attr", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers */
+ for (i = 0, data_size = 1; i < ATTRIBUTE_IO_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute read\n");
+ TEST_ERROR;
+ }
+
+ /* Initialize write_buf. */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ write_buf[i] = 10 * (int)i;
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, H5T_NATIVE_INT, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write.
+ * Skip this function because it isn't supported for the native vol in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ } /* end if */
+
+ /* Close the attribute asynchronously */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open the attribute asynchronously */
+ if ((attr_id = H5Aopen_async(dset_id, "attr", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ } /* end if */
+
+ /* Close out of order to see if it trips things up */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Creates a file, dataset and attribute in parallel. Each rank writes to
+ * the attribute with datatype conversion involved, then reads back the
+ * attribute and verifies the data is correct.
+ */
+#define ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK 2
+static int
+test_attribute_io_tconv(void)
+{
+ hsize_t *dims = NULL;
+ hbool_t op_failed;
+ size_t num_in_progress;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ int *write_buf = NULL;
+ int *read_buf = NULL;
+
+ TESTING("attribute I/O with type conversion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, attribute, or flush aren't supported with this "
+ "connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously by name */
+ if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_tconv", H5T_STD_U16BE, space_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers */
+ for (i = 0, data_size = 1; i < ATTRIBUTE_IO_TCONV_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(int);
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute read\n");
+ TEST_ERROR;
+ }
+
+ /* Initialize write_buf. */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ write_buf[i] = 10 * (int)i;
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, H5T_NATIVE_INT, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ } /* end if */
+
+ /* Close the attribute asynchronously */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Open the attribute asynchronously */
+ if ((attr_id =
+ H5Aopen_by_name_async(file_id, "attr_dset", "attr_tconv", H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, H5T_NATIVE_INT, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(int); i++)
+ if (write_buf[i] != read_buf[i]) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ } /* end if */
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Aclose(attr_id);
+ H5Dclose(dset_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Creates a file, dataset and attribute in parallel. Each rank writes to
+ * the attribute with a compound datatype, then reads back the attribute
+ * and verifies the data is correct.
+ */
+typedef struct tattr_cmpd_t {
+ int a;
+ int b;
+} tattr_cmpd_t;
+
+#define ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK 2
+static int
+test_attribute_io_compound(void)
+{
+ hsize_t *dims = NULL;
+ hbool_t op_failed;
+ size_t num_in_progress;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t mtype_id = H5I_INVALID_HID;
+ hid_t ftype_id = H5I_INVALID_HID;
+ hid_t mtypea_id = H5I_INVALID_HID;
+ hid_t mtypeb_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ tattr_cmpd_t *write_buf = NULL;
+ tattr_cmpd_t *read_buf = NULL;
+ tattr_cmpd_t *fbuf = NULL;
+
+ TESTING("attribute I/O with compound type conversion");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, dataset, dataset more, attribute, or flush aren't "
+ "supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create datatype */
+ if ((mtype_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtype_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtype_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((mtypea_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtypea_id, "a_name", HOFFSET(tattr_cmpd_t, a), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((mtypeb_id = H5Tcreate(H5T_COMPOUND, sizeof(tattr_cmpd_t))) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(mtypeb_id, "b_name", HOFFSET(tattr_cmpd_t, b), H5T_NATIVE_INT) < 0)
+ TEST_ERROR;
+
+ if ((ftype_id = H5Tcreate(H5T_COMPOUND, 2 + 8)) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(ftype_id, "a_name", 0, H5T_STD_U16BE) < 0)
+ TEST_ERROR;
+ if (H5Tinsert(ftype_id, "b_name", 2, H5T_STD_I64LE) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the attribute asynchronously by name */
+ if ((attr_id = H5Acreate_by_name_async(file_id, "attr_dset", "attr_cmpd", ftype_id, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Calculate size of data buffers */
+ for (i = 0, data_size = 1; i < ATTRIBUTE_IO_COMPOUND_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= sizeof(tattr_cmpd_t);
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute write\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (read_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute read\n");
+ TEST_ERROR;
+ }
+
+ if (NULL == (fbuf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for attribute read verification\n");
+ TEST_ERROR;
+ }
+
+ /* Initialize write_buf. */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ write_buf[i].a = 10 * (int)i;
+ write_buf[i].b = (10 * (int)i) + 1;
+ }
+
+ /* Write the attribute asynchronously */
+ if (H5Awrite_async(attr_id, mtype_id, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ fbuf[i].a = write_buf[i].a;
+ fbuf[i].b = write_buf[i].b;
+ }
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != fbuf[i].a) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != fbuf[i].b) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ /* Clear the read buffer */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ read_buf[i].a = -2;
+ read_buf[i].b = -2;
+ }
+
+ /* Read the attribute asynchronously (element a only) */
+ if (H5Aread_async(attr_id, mtypea_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != fbuf[i].a) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != -2) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ /* Clear the read buffer */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ read_buf[i].a = -2;
+ read_buf[i].b = -2;
+ }
+
+ /* Read the attribute asynchronously (element b only) */
+ if (H5Aread_async(attr_id, mtypeb_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != -2) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != fbuf[i].b) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Update write_buf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ write_buf[i].a += 2 * 6 * 10;
+ write_buf[i].b += 2 * 6 * 10;
+ }
+
+ /* Write the attribute asynchronously (element a only) */
+ if (H5Awrite_async(attr_id, mtypea_id, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ fbuf[i].a = write_buf[i].a;
+ }
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Clear the read buffer */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ read_buf[i].a = -2;
+ read_buf[i].b = -2;
+ }
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != fbuf[i].a) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != fbuf[i].b) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Update write_buf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ write_buf[i].a += 2 * 6 * 10;
+ write_buf[i].b += 2 * 6 * 10;
+ }
+
+ /* Write the attribute asynchronously (element b only) */
+ if (H5Awrite_async(attr_id, mtypeb_id, write_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Update fbuf */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ fbuf[i].b = write_buf[i].b;
+ }
+
+ /* Flush the dataset asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Clear the read buffer */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ read_buf[i].a = -2;
+ read_buf[i].b = -2;
+ }
+
+ /* Read the attribute asynchronously */
+ if (H5Aread_async(attr_id, mtype_id, read_buf, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify the read data */
+ for (i = 0; i < data_size / sizeof(tattr_cmpd_t); i++) {
+ if (read_buf[i].a != fbuf[i].a) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'a'\n");
+ goto error;
+ } /* end if */
+ if (read_buf[i].b != fbuf[i].b) {
+ H5_FAILED();
+ HDprintf(" data verification failed for field 'b'\n");
+ goto error;
+ } /* end if */
+ }
+
+ /* Close */
+ if (H5Aclose_async(attr_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(space_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtype_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(ftype_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtypea_id) < 0)
+ TEST_ERROR;
+ if (H5Tclose(mtypeb_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fbuf) {
+ HDfree(fbuf);
+ fbuf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (fbuf)
+ HDfree(fbuf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Tclose(mtype_id);
+ H5Tclose(ftype_id);
+ H5Tclose(mtypea_id);
+ H5Tclose(mtypeb_id);
+ H5Aclose(attr_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests async group interfaces in parallel
+ */
+static int
+test_group(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t subgroup_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ H5G_info_t info1;
+ H5G_info_t info2;
+ H5G_info_t info3;
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("group operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_MORE) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, group more, creation order, or flush aren't "
+ "supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create GCPL */
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Track creation order */
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "group_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create 3 subgroups asynchronously, the first with no sub-subgroups, the
+ * second with 1, and the third with 2 */
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ if ((group_id =
+ H5Gcreate_async(parent_group_id, "group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if ((subgroup_id = H5Gcreate_async(group_id, "subgroup2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(subgroup_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the file asynchronously. This will effectively work as a barrier,
+ * guaranteeing the read takes place after the write. */
+ if (H5Fflush_async(file_id, H5F_SCOPE_LOCAL, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_async */
+ /* Open group1 asynchronously */
+ if ((group_id = H5Gopen_async(parent_group_id, "group1", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Get info */
+ if (H5Gget_info_async(group_id, &info1, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_by_idx_async */
+ if (H5Gget_info_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 1, &info2,
+ H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Test H5Gget_info_by_name_async */
+ if (H5Gget_info_by_name_async(parent_group_id, "group3", &info3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Verify group infos */
+ if (info1.nlinks != 0)
+ FAIL_PUTS_ERROR(" incorrect number of links")
+ if (info2.nlinks != 1)
+ FAIL_PUTS_ERROR(" incorrect number of links")
+ if (info3.nlinks != 2)
+ FAIL_PUTS_ERROR(" incorrect number of links")
+
+ /* Close */
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(subgroup_id);
+ H5Gclose(group_id);
+ H5Gclose(parent_group_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5Pclose(gcpl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests async link interfaces in parallel
+ */
+static int
+test_link(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t group_id = H5I_INVALID_HID;
+ hid_t gcpl_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ hbool_t existsh1;
+ hbool_t existsh2;
+ hbool_t existsh3;
+ hbool_t existss1;
+ hbool_t existss2;
+ hbool_t existss3;
+ size_t num_in_progress;
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+
+ TESTING("link operations");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_LINK_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_HARD_LINKS) || !(vol_cap_flags_g & H5VL_CAP_FLAG_SOFT_LINKS) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_CREATION_ORDER)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, link, hard link, soft link, flush, or creation order "
+ "aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create GCPL */
+ if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0)
+ TEST_ERROR;
+
+ /* Track creation order */
+ if (H5Pset_link_creation_order(gcpl_id, H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "link_parent", H5P_DEFAULT, gcpl_id, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create subgroup asynchronously. */
+ if ((group_id = H5Gcreate_async(parent_group_id, "group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) <
+ 0)
+ TEST_ERROR;
+ if (H5Gclose_async(group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the link to the subgroup is visible to later tasks.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create hard link asynchronously */
+ if (H5Lcreate_hard_async(parent_group_id, "group", parent_group_id, "hard_link", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the soft link create takes place after the hard
+ * link create. Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Create soft link asynchronously */
+ if (H5Lcreate_soft_async("/link_parent/group", parent_group_id, "soft_link", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the writes.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh1, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss1, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the delete takes place after the reads.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Delete soft link by index */
+ if (H5Ldelete_by_idx_async(parent_group_id, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, 2, H5P_DEFAULT, es_id) <
+ 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the delete.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh2, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss2, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the delete takes place after the reads.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Delete hard link */
+ if (H5Ldelete_async(parent_group_id, "hard_link", H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the read takes place after the delete.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ /* Check if hard link exists */
+ if (H5Lexists_async(parent_group_id, "hard_link", &existsh3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Check if soft link exists */
+ if (H5Lexists_async(parent_group_id, "soft_link", &existss3, H5P_DEFAULT, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Check if existence returns were correct */
+ if (!existsh1)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist")
+ if (!existss1)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist")
+ if (!existsh2)
+ FAIL_PUTS_ERROR(" link exists returned FALSE for link that should exist")
+ if (existss2)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist")
+ if (existsh3)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist")
+ if (existsh3)
+ FAIL_PUTS_ERROR(" link exists returned TRUE for link that should not exist")
+
+ /* Close */
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Pclose(gcpl_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Gclose(group_id);
+ H5Gclose(parent_group_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5Pclose(gcpl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests H5Ocopy_async and H5Orefresh_async in parallel
+ */
+#define OCOPY_REFRESH_TEST_SPACE_RANK 2
+static int
+test_ocopy_orefresh(void)
+{
+ hsize_t *dims = NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t parent_group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ size_t num_in_progress;
+ hbool_t op_failed = false;
+ hbool_t is_native_vol = false;
+
+ TESTING("H5Ocopy() and H5Orefresh()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_OBJECT_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, object more, flush, or refresh "
+ "aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if (generate_random_parallel_dimensions(OCOPY_REFRESH_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /* Create dataspace */
+ if ((space_id = H5Screate_simple(OCOPY_REFRESH_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Find out if the native connector is used */
+ if (H5VLobject_is_native(file_id, &is_native_vol) < 0)
+ TEST_ERROR;
+
+ /* Create the parent group asynchronously */
+ if ((parent_group_id =
+ H5Gcreate_async(file_id, "ocopy_parent", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Create dataset asynchronously. */
+ if ((dset_id = H5Dcreate_async(parent_group_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the copy takes place after dataset create.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Copy dataset */
+ if (H5Ocopy_async(parent_group_id, "dset", parent_group_id, "copied_dset", H5P_DEFAULT, H5P_DEFAULT,
+ es_id) < 0)
+ TEST_ERROR;
+
+ /* Flush the parent group asynchronously. This will effectively work as a
+ * barrier, guaranteeing the dataset open takes place copy.
+ * Skip this function for the native vol because it isn't supported in parallel.
+ */
+ if (!is_native_vol && H5Oflush_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+
+ if (!coll_metadata_read) {
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+ }
+
+ /* Open the copied dataset asynchronously */
+ if ((dset_id = H5Dopen_async(parent_group_id, "copied_dset", H5P_DEFAULT, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Refresh the copied dataset asynchronously */
+ if (H5Orefresh(dset_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Close */
+ if (H5Dclose_async(dset_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose_async(parent_group_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (dims)
+ HDfree(dims);
+ H5Sclose(space_id);
+ H5Dclose(dset_id);
+ H5Gclose(parent_group_id);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+#undef OCOPY_REFRESH_TEST_SPACE_RANK
+
+/*
+ * Tests H5Freopen_async in parallel
+ */
+static int
+test_file_reopen(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t reopened_file_id = H5I_INVALID_HID;
+ hid_t es_id = H5I_INVALID_HID;
+ size_t num_in_progress;
+ hbool_t op_failed;
+
+ TESTING("H5Freopen()");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_MORE)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" API functions for basic file or file more aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, coll_metadata_read)) < 0)
+ TEST_ERROR;
+
+ /* Create event stack */
+ if ((es_id = H5EScreate()) < 0)
+ TEST_ERROR;
+
+ /* Open file asynchronously */
+ if ((file_id = H5Fopen_async(PAR_ASYNC_API_TEST_FILE, H5F_ACC_RDWR, fapl_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Reopen file asynchronously */
+ if ((reopened_file_id = H5Freopen_async(file_id, es_id)) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ /* Close */
+ if (H5Fclose_async(reopened_file_id, es_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose_async(file_id, es_id) < 0)
+ TEST_ERROR;
+
+ /* Wait for the event stack to complete */
+ if (H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed) < 0)
+ TEST_ERROR;
+ if (op_failed)
+ TEST_ERROR;
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5ESclose(es_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(reopened_file_id);
+ H5Fclose(file_id);
+ H5Pclose(fapl_id);
+ H5ESwait(es_id, H5_API_TEST_WAIT_FOREVER, &num_in_progress, &op_failed);
+ H5ESclose(es_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Cleanup temporary test files
+ */
+static void
+cleanup_files(void)
+{
+ char file_name[64];
+ int i;
+
+ if (MAINPROCESS) {
+ H5Fdelete(PAR_ASYNC_API_TEST_FILE, H5P_DEFAULT);
+ for (i = 0; i <= max_printf_file; i++) {
+ snprintf(file_name, 64, PAR_ASYNC_API_TEST_FILE_PRINTF, i);
+ H5Fdelete(file_name, H5P_DEFAULT);
+ } /* end for */
+ }
+}
+
+int
+H5_api_async_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Async Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_ASYNC)) {
+ if (MAINPROCESS) {
+ SKIPPED();
+ HDprintf(" Async APIs aren't supported with this connector\n");
+ }
+
+ return 0;
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_async_tests); i++) {
+ nerrors += (*par_async_tests[i])() ? 1 : 0;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("\n");
+ HDprintf("Cleaning up testing files\n");
+ }
+
+ cleanup_files();
+
+ if (MAINPROCESS) {
+ HDprintf("\n * Re-testing with independent metadata reads *\n");
+ }
+
+ coll_metadata_read = FALSE;
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_async_tests); i++) {
+ nerrors += (*par_async_tests[i])() ? 1 : 0;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("\n");
+ HDprintf("Cleaning up testing files\n");
+ }
+
+ cleanup_files();
+
+ return nerrors;
+}
+
+#else /* H5ESpublic_H */
+
+int
+H5_api_async_test_parallel(void)
+{
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Async Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ HDprintf("SKIPPED due to no async support in HDF5 library\n");
+
+ return 0;
+}
+
+#endif
diff --git a/testpar/API/H5_api_async_test_parallel.h b/testpar/API/H5_api_async_test_parallel.h
new file mode 100644
index 0000000..9e4340c
--- /dev/null
+++ b/testpar/API/H5_api_async_test_parallel.h
@@ -0,0 +1,29 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_ASYNC_TEST_PARALLEL_H_
+#define H5_API_ASYNC_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_async_test_parallel(void);
+
+/********************************************************
+ * *
+ * API parallel async test defines *
+ * *
+ ********************************************************/
+
+#define PAR_ASYNC_API_TEST_FILE "H5_api_async_test_parallel.h5"
+#define PAR_ASYNC_API_TEST_FILE_PRINTF "H5_api_async_test_parallel_%d.h5"
+
+#endif /* H5_API_ASYNC_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_attribute_test_parallel.c b/testpar/API/H5_api_attribute_test_parallel.c
new file mode 100644
index 0000000..cffbfcd
--- /dev/null
+++ b/testpar/API/H5_api_attribute_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_attribute_test_parallel.h"
+
+/*
+ * The array of parallel attribute tests to be performed.
+ */
+static int (*par_attribute_tests[])(void) = {NULL};
+
+int
+H5_api_attribute_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Attribute Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_attribute_tests); i++) {
+ /* nerrors += (*par_attribute_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_attribute_test_parallel.h b/testpar/API/H5_api_attribute_test_parallel.h
new file mode 100644
index 0000000..81802ae
--- /dev/null
+++ b/testpar/API/H5_api_attribute_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_ATTRIBUTE_TEST_PARALLEL_H_
+#define H5_API_ATTRIBUTE_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_attribute_test_parallel(void);
+
+#endif /* H5_API_ATTRIBUTE_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_dataset_test_parallel.c b/testpar/API/H5_api_dataset_test_parallel.c
new file mode 100644
index 0000000..fd02a7f
--- /dev/null
+++ b/testpar/API/H5_api_dataset_test_parallel.c
@@ -0,0 +1,8149 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * XXX: Better documentation for each test about how the selections get
+ * split up among MPI ranks.
+ */
+#include "H5_api_dataset_test_parallel.h"
+
+static int test_write_dataset_data_verification(void);
+static int test_write_dataset_independent(void);
+static int test_write_dataset_one_proc_0_selection(void);
+static int test_write_dataset_one_proc_none_selection(void);
+static int test_write_dataset_one_proc_all_selection(void);
+static int test_write_dataset_hyper_file_all_mem(void);
+static int test_write_dataset_all_file_hyper_mem(void);
+static int test_write_dataset_point_file_all_mem(void);
+static int test_write_dataset_all_file_point_mem(void);
+static int test_write_dataset_hyper_file_point_mem(void);
+static int test_write_dataset_point_file_hyper_mem(void);
+static int test_read_dataset_one_proc_0_selection(void);
+static int test_read_dataset_one_proc_none_selection(void);
+static int test_read_dataset_one_proc_all_selection(void);
+static int test_read_dataset_hyper_file_all_mem(void);
+static int test_read_dataset_all_file_hyper_mem(void);
+static int test_read_dataset_point_file_all_mem(void);
+static int test_read_dataset_all_file_point_mem(void);
+static int test_read_dataset_hyper_file_point_mem(void);
+static int test_read_dataset_point_file_hyper_mem(void);
+
+/*
+ * Chunking tests
+ */
+static int test_write_multi_chunk_dataset_same_shape_read(void);
+static int test_write_multi_chunk_dataset_diff_shape_read(void);
+static int test_overwrite_multi_chunk_dataset_same_shape_read(void);
+static int test_overwrite_multi_chunk_dataset_diff_shape_read(void);
+
+/*
+ * The array of parallel dataset tests to be performed.
+ */
+static int (*par_dataset_tests[])(void) = {
+ test_write_dataset_data_verification,
+ test_write_dataset_independent,
+ test_write_dataset_one_proc_0_selection,
+ test_write_dataset_one_proc_none_selection,
+ test_write_dataset_one_proc_all_selection,
+ test_write_dataset_hyper_file_all_mem,
+ test_write_dataset_all_file_hyper_mem,
+ test_write_dataset_point_file_all_mem,
+ test_write_dataset_all_file_point_mem,
+ test_write_dataset_hyper_file_point_mem,
+ test_write_dataset_point_file_hyper_mem,
+ test_read_dataset_one_proc_0_selection,
+ test_read_dataset_one_proc_none_selection,
+ test_read_dataset_one_proc_all_selection,
+ test_read_dataset_hyper_file_all_mem,
+ test_read_dataset_all_file_hyper_mem,
+ test_read_dataset_point_file_all_mem,
+ test_read_dataset_all_file_point_mem,
+ test_read_dataset_hyper_file_point_mem,
+ test_read_dataset_point_file_hyper_mem,
+ test_write_multi_chunk_dataset_same_shape_read,
+ test_write_multi_chunk_dataset_diff_shape_read,
+ test_overwrite_multi_chunk_dataset_same_shape_read,
+ test_overwrite_multi_chunk_dataset_diff_shape_read,
+};
+
+/*
+ * A test to ensure that data is read back correctly from
+ * a dataset after it has been written in parallel. The test
+ * covers simple examples of using H5S_ALL selections,
+ * hyperslab selections and point selections.
+ */
+#define DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK 3
+#define DATASET_WRITE_DATA_VERIFY_TEST_NUM_POINTS 10
+#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME "dataset_write_data_verification_test"
+#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1 "dataset_write_data_verification_all"
+#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2 "dataset_write_data_verification_hyperslab"
+#define DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3 "dataset_write_data_verification_points"
+static int
+test_write_dataset_data_verification(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK];
+ hsize_t *points = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING_MULTIPART("verification of dataset data using H5Dwrite then H5Dread");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1,
+ DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1);
+ goto error;
+ }
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2,
+ DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ goto error;
+ }
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3,
+ DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ goto error;
+ }
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Dwrite_all_read)
+ {
+ hbool_t op_failed = FALSE;
+
+ TESTING_2("H5Dwrite using H5S_ALL then H5Dread");
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ /*
+ * Write data to dataset on rank 0 only. All ranks will read the data back.
+ */
+ if (MAINPROCESS) {
+ for (i = 0, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE;
+
+ if (NULL != (write_buf = HDmalloc(data_size))) {
+ for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = (int)i;
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0)
+ op_failed = TRUE;
+ }
+ else
+ op_failed = TRUE;
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ }
+
+ if (MPI_SUCCESS !=
+ MPI_Allreduce(MPI_IN_PLACE, &op_failed, 1, MPI_C_BOOL, MPI_LAND, MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" couldn't determine if dataset write on rank 0 succeeded\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (op_failed == TRUE) {
+ H5_FAILED();
+ HDprintf(" dataset write on rank 0 failed!\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME1);
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ for (i = 0; i < (hsize_t)space_npoints; i++)
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" H5S_ALL selection data verification failed\n");
+ PART_ERROR(H5Dwrite_all_read);
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_all_read);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Dwrite_hyperslab_read)
+ {
+ TESTING_2("H5Dwrite using hyperslab selection then H5Dread");
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ /* Each MPI rank writes to a single row in the second dimension
+ * and the entirety of the following dimensions. The combined
+ * selections from all MPI ranks spans the first dimension.
+ */
+ for (i = 0; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME2);
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" hyperslab selection data verification failed\n");
+ PART_ERROR(H5Dwrite_hyperslab_read);
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_hyperslab_read);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Dwrite_point_sel_read)
+ {
+ TESTING_2("H5Dwrite using point selection then H5Dread");
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ /* Use different data than the previous test to ensure that the data actually changed. */
+ for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_size - mpi_rank;
+
+ if (NULL == (points = HDmalloc(DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK *
+ (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE) *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ /* Each MPI rank writes to a single row in the second dimension
+ * and the entirety of the following dimensions. The combined
+ * selections from all MPI ranks spans the first dimension.
+ */
+ for (i = 0; i < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK; j++) {
+ size_t idx = (i * DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK) + j;
+
+ if (j == 0)
+ points[idx] = (hsize_t)mpi_rank;
+ else if (j != DATASET_WRITE_DATA_VERIFY_TEST_SPACE_RANK - 1)
+ points[idx] = i / dims[j + 1];
+ else
+ points[idx] = i % dims[j];
+ }
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET,
+ data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select elements in dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_DATA_VERIFY_TEST_GROUP_NAME);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_DATA_VERIFY_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_DATA_VERIFY_TEST_DSET_NAME3);
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_DATA_VERIFY_TEST_DTYPE_SIZE))] !=
+ (mpi_size - (int)i)) {
+ H5_FAILED();
+ HDprintf(" point selection data verification failed\n");
+ PART_ERROR(H5Dwrite_point_sel_read);
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ PASSED();
+ }
+ PART_END(H5Dwrite_point_sel_read);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that independent dataset writes function
+ * as expected. First, two datasets are created in the file.
+ * Then, the even MPI ranks first write to dataset 1, followed
+ * by dataset 2. The odd MPI ranks first write to dataset 2,
+ * followed by dataset 1. After this, the data is read back from
+ * each dataset and verified.
+ */
+#define DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK 3
+#define DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME "independent_dataset_write_test"
+#define DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1 "dset1"
+#define DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2 "dset2"
+static int
+test_write_dataset_independent(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK];
+ hsize_t count[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK];
+ hsize_t block[DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id1 = H5I_INVALID_HID, dset_id2 = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("independent writing to different datasets by different ranks");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /*
+ * Setup dimensions of overall datasets and slabs local
+ * to the MPI rank.
+ */
+ if (generate_random_parallel_dimensions(DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ /* create a dataset collectively */
+ if ((dset_id1 = H5Dcreate2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1,
+ DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create first dataset\n");
+ goto error;
+ }
+ if ((dset_id2 = H5Dcreate2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2,
+ DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create second dataset\n");
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ for (i = 0; i < DATASET_INDEPENDENT_WRITE_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ /*
+ * To test the independent orders of writes between processes, all
+ * even number processes write to dataset1 first, then dataset2.
+ * All odd number processes write to dataset2 first, then dataset1.
+ */
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (mpi_rank % 2 == 0) {
+ if (H5Dwrite(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" even ranks failed to write to dataset 1\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ if (H5Dwrite(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" even ranks failed to write to dataset 2\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ else {
+ if (H5Dwrite(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" odd ranks failed to write to dataset 2\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ if (H5Dwrite(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" odd ranks failed to write to dataset 1\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ H5Sclose(mspace_id);
+ mspace_id = H5I_INVALID_HID;
+ H5Sclose(fspace_id);
+ fspace_id = H5I_INVALID_HID;
+ H5Dclose(dset_id1);
+ dset_id1 = H5I_INVALID_HID;
+ H5Dclose(dset_id2);
+ dset_id2 = H5I_INVALID_HID;
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id1 = H5Dopen2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1);
+ goto error;
+ }
+ if ((dset_id2 = H5Dopen2(group_id, DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ /*
+ * Verify that data has been written correctly.
+ */
+ if ((fspace_id = H5Dget_space(dset_id1)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL == (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id1, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME1);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; j++) {
+ if (((int *)read_buf)[j + (i * (data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" dataset 1 data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (H5Dread(dset_id2, DATASET_INDEPENDENT_WRITE_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_INDEPENDENT_WRITE_TEST_DSET_NAME2);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE; j++) {
+ if (((int *)read_buf)[j + (i * (data_size / DATASET_INDEPENDENT_WRITE_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" dataset 2 data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id1) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id2) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id1);
+ H5Dclose(dset_id2);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * one of the MPI ranks select 0 rows in a hyperslab selection.
+ */
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME "one_rank_0_sel_write_test"
+#define DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME "one_rank_0_sel_dset"
+static int
+test_write_dataset_one_proc_0_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with one rank selecting 0 rows");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME,
+ DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (!MAINPROCESS) {
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ for (i = 0; i < DATASET_WRITE_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = MAINPROCESS ? 0 : 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = MAINPROCESS ? 0 : dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = MAINPROCESS ? 0 : 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE};
+
+ if (MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_0_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ if (i != 0) {
+ for (j = 0; j < data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE; j++) {
+ if (((int *)read_buf)[j + (i * (data_size / DATASET_WRITE_ONE_PROC_0_SEL_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * one of the MPI ranks call H5Sselect_none.
+ */
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME "one_rank_none_sel_write_test"
+#define DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME "one_rank_none_sel_dset"
+static int
+test_write_dataset_one_proc_none_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with one rank using 'none' selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME,
+ DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (!MAINPROCESS) {
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ for (i = 0; i < DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ BEGIN_INDEPENDENT_OP(set_space_sel)
+ {
+ if (MAINPROCESS) {
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'none' selection for dataset write\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ else {
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(set_space_sel);
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE};
+
+ if (MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ if (i != 0) {
+ for (j = 0; j < data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * one of the MPI ranks use an ALL selection, while the other
+ * ranks write nothing.
+ */
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME "one_rank_all_sel_write_test"
+#define DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME "one_rank_all_sel_dset"
+static int
+test_write_dataset_one_proc_all_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with one rank using all selection; others none selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME,
+ DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (MAINPROCESS) {
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = (int)i;
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ BEGIN_INDEPENDENT_OP(set_space_sel)
+ {
+ if (MAINPROCESS) {
+ if (H5Sselect_all(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'all' selection for dataset write\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ else {
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'none' selection for dataset write\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(set_space_sel);
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE};
+
+ if (!MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (H5Dwrite(dset_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE; i++) {
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * a hyperslab selection in the file dataspace and an all selection
+ * in the memory dataspace.
+ *
+ * XXX: Currently pulls from invalid memory locations.
+ */
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME "hyper_sel_file_all_sel_mem_write_test"
+#define DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME "hyper_sel_file_all_sel_mem_dset"
+static int
+test_write_dataset_hyper_file_all_mem(void)
+{
+#ifdef BROKEN
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+#endif
+
+ TESTING("write to dataset with hyperslab sel. for file space; all sel. for memory");
+
+#ifdef BROKEN
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE; i++)
+ ((int *)write_buf)[i] = mpi_rank;
+
+ for (i = 0; i < DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, H5S_ALL, fspace_id, H5P_DEFAULT,
+ write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE; j++) {
+ if (((int *)read_buf)[j + (i * (data_size / DATASET_WRITE_HYPER_FILE_ALL_MEM_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+#else
+ SKIPPED();
+#endif
+
+ return 0;
+
+#ifdef BROKEN
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+#endif
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * an all selection in the file dataspace and a hyperslab
+ * selection in the memory dataspace.
+ */
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME "all_sel_file_hyper_sel_mem_write_test"
+#define DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME "all_sel_file_hyper_sel_mem_dset"
+static int
+test_write_dataset_all_file_hyper_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with all sel. for file space; hyperslab sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (MAINPROCESS) {
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from hyperslab selection <-> all
+ * selection works correctly.
+ */
+ if (NULL == (write_buf = HDmalloc(2 * data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE); i++) {
+ /* Write actual data to even indices */
+ if (i % 2 == 0)
+ ((int *)write_buf)[i] = (int)((i / 2) + (i % 2));
+ else
+ ((int *)write_buf)[i] = 0;
+ }
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ /*
+ * Only have rank 0 perform the dataset write, as writing the entire dataset on all ranks
+ * might be stressful on system resources. There's also no guarantee as to what the outcome
+ * would be, since the writes would be overlapping with each other.
+ */
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (MAINPROCESS) {
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {2};
+ hsize_t count[1] = {data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE};
+ hsize_t block[1] = {1};
+ hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE; i++) {
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * a point selection in the file dataspace and an all selection
+ * in the memory dataspace.
+ */
+static int
+test_write_dataset_point_file_all_mem(void)
+{
+ TESTING("write to dataset with point sel. for file space; all sel. for memory");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * an all selection in the file dataspace and a point selection
+ * in the memory dataspace.
+ */
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME "all_sel_file_point_sel_mem_write_test"
+#define DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME "all_sel_file_point_sel_mem_dset"
+static int
+test_write_dataset_all_file_point_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *points = NULL;
+ hsize_t *dims = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with all sel. for file space; point sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+
+ BEGIN_INDEPENDENT_OP(write_buf_alloc)
+ {
+ if (MAINPROCESS) {
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from point selection <-> all
+ * selection works correctly.
+ */
+ if (NULL == (write_buf = HDmalloc(2 * data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(write_buf_alloc);
+ }
+
+ for (i = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) {
+ /* Write actual data to even indices */
+ if (i % 2 == 0)
+ ((int *)write_buf)[i] = (int)((i / 2) + (i % 2));
+ else
+ ((int *)write_buf)[i] = 0;
+ }
+ }
+ }
+ END_INDEPENDENT_OP(write_buf_alloc);
+
+ /*
+ * Only have rank 0 perform the dataset write, as writing the entire dataset on all ranks
+ * might be stressful on system resources. There's also no guarantee as to what the outcome
+ * would be, since the writes would be overlapping with each other.
+ */
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (MAINPROCESS) {
+ hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE)};
+ int j;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE) *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ /* Select every other point in the 1-dimensional memory dataspace */
+ for (i = 0, j = 0; i < 2 * (data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) {
+ if (i % 2 == 0)
+ points[j++] = (hsize_t)i;
+ }
+
+ if (H5Sselect_elements(mspace_id, H5S_SELECT_SET,
+ data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE,
+ points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE; i++) {
+ if (((int *)read_buf)[i] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * a hyperslab selection in the file dataspace and a point
+ * selection in the memory dataspace.
+ */
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME "hyper_sel_file_point_sel_mem_write_test"
+#define DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME "hyper_sel_file_point_sel_mem_dset"
+static int
+test_write_dataset_hyper_file_point_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t *points = NULL;
+ hsize_t start[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t count[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t block[DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with hyperslab sel. for file space; point sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from point selection <-> hyperslab
+ * selection works correctly.
+ */
+ if (NULL == (write_buf = HDmalloc(2 * data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ goto error;
+ }
+
+ for (i = 0; i < 2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) {
+ /* Write actual data to even indices */
+ if (i % 2 == 0)
+ ((int *)write_buf)[i] = mpi_rank;
+ else
+ ((int *)write_buf)[i] = 0;
+ }
+
+ for (i = 0; i < DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset write\n");
+ goto error;
+ }
+
+ {
+ hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE)};
+ int j;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+
+ if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE) *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ goto error;
+ }
+
+ /* Select every other point in the 1-dimensional memory dataspace */
+ for (i = 0, j = 0; i < 2 * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE); i++) {
+ if (i % 2 == 0)
+ points[j++] = (hsize_t)i;
+ }
+
+ if (H5Sselect_elements(mspace_id, H5S_SELECT_SET,
+ data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset write\n");
+ goto error;
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be written to by having
+ * a point selection in the file dataspace and a hyperslab
+ * selection in the memory dataspace.
+ */
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK 2
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME "point_sel_file_hyper_sel_mem_write_test"
+#define DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME "point_sel_file_hyper_sel_mem_dset"
+static int
+test_write_dataset_point_file_hyper_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t *points = NULL;
+ size_t i, data_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("write to dataset with point sel. for file space; hyperslab sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ if ((fspace_id = H5Screate_simple(DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0)
+ TEST_ERROR;
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME,
+ DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 1, data_size = 1; i < DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from hyperslab selection <-> point
+ * selection works correctly.
+ */
+ if (NULL == (write_buf = HDmalloc(2 * data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ goto error;
+ }
+
+ for (i = 0; i < 2 * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE); i++) {
+ /* Write actual data to even indices */
+ if (i % 2 == 0)
+ ((int *)write_buf)[i] = mpi_rank;
+ else
+ ((int *)write_buf)[i] = 0;
+ }
+
+ if (NULL == (points = HDmalloc((data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE) *
+ DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ goto error;
+ }
+
+ for (i = 0; i < data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE; i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; j++) {
+ size_t idx = (i * (size_t)DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) + j;
+
+ if (j == 0)
+ points[idx] = (hsize_t)mpi_rank;
+ else if (j != (size_t)DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK - 1)
+ points[idx] = i / dims[j + 1];
+ else
+ points[idx] = i % dims[j];
+ }
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET,
+ data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset write\n");
+ goto error;
+ }
+
+ {
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {2};
+ hsize_t count[1] = {data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE};
+ hsize_t block[1] = {1};
+ hsize_t mdims[] = {2 * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE)};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set hyperslab selection for dataset write\n");
+ goto error;
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ goto error;
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ goto error;
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ goto error;
+ }
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ if (NULL ==
+ (read_buf = HDmalloc((hsize_t)space_npoints * DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (H5Dread(dset_id, DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+
+ for (j = 0; j < data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE; j++) {
+ if (((int *)
+ read_buf)[j + (i * (data_size / DATASET_WRITE_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE))] !=
+ (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * one of the MPI ranks select 0 rows in a hyperslab selection.
+ */
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK 2
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME "one_rank_0_sel_read_test"
+#define DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME "one_rank_0_sel_dset"
+static int
+test_read_dataset_one_proc_0_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t count[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ hsize_t block[DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with one rank selecting 0 rows");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME,
+ DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(read_buf_alloc)
+ {
+ if (!MAINPROCESS) {
+ read_buf_size =
+ ((size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_0_SEL_TEST_DTYPE_SIZE);
+
+ if (NULL == (read_buf = HDmalloc(read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(read_buf_alloc);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(read_buf_alloc);
+
+ {
+ hsize_t mdims[] = {(hsize_t)space_npoints / (hsize_t)mpi_size};
+
+ if (MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_READ_ONE_PROC_0_SEL_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = MAINPROCESS ? 0 : 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = MAINPROCESS ? 0 : dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = MAINPROCESS ? 0 : 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset read\n");
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (H5Dread(dset_id, DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_0_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ BEGIN_INDEPENDENT_OP(data_verify)
+ {
+ if (!MAINPROCESS) {
+ for (i = 0; i < (size_t)space_npoints / (size_t)mpi_size; i++) {
+ if (((int *)read_buf)[i] != mpi_rank) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(data_verify);
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(data_verify);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * one of the MPI ranks call H5Sselect_none.
+ */
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK 2
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME "one_rank_none_sel_read_test"
+#define DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME "one_rank_none_sel_dset"
+static int
+test_read_dataset_one_proc_none_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t start[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t count[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ hsize_t block[DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with one rank using 'none' selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK, dims, NULL)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME,
+ DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_NONE_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(read_buf_alloc)
+ {
+ if (!MAINPROCESS) {
+ read_buf_size =
+ ((size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE);
+
+ if (NULL == (read_buf = HDmalloc(read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(read_buf_alloc);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(read_buf_alloc);
+
+ {
+ hsize_t mdims[] = {(hsize_t)space_npoints / (hsize_t)mpi_size};
+
+ if (MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_READ_ONE_PROC_NONE_SEL_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ BEGIN_INDEPENDENT_OP(set_space_sel)
+ {
+ if (MAINPROCESS) {
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'none' selection for dataset read\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ else {
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset read\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(set_space_sel);
+
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (H5Dread(dset_id, DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_NONE_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ BEGIN_INDEPENDENT_OP(data_verify)
+ {
+ if (!MAINPROCESS) {
+ for (i = 0; i < (size_t)space_npoints / (size_t)mpi_size; i++) {
+ if (((int *)read_buf)[i] != mpi_rank) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(data_verify);
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(data_verify);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * one of the MPI ranks use an ALL selection, while the other
+ * ranks read nothing.
+ */
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK 2
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME "one_rank_all_sel_read_test"
+#define DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME "one_rank_all_sel_dset"
+static int
+test_read_dataset_one_proc_all_selection(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with one rank using all selection; others none selection");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK, dims, NULL)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME,
+ DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ONE_PROC_ALL_SEL_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (data_size / DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_ONE_PROC_ALL_SEL_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(read_buf_alloc)
+ {
+ if (MAINPROCESS) {
+ read_buf_size = (size_t)space_npoints * DATASET_READ_ONE_PROC_ALL_SEL_TEST_DTYPE_SIZE;
+
+ if (NULL == (read_buf = HDmalloc(read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(read_buf_alloc);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(read_buf_alloc);
+
+ {
+ hsize_t mdims[] = {(hsize_t)space_npoints};
+
+ if (!MAINPROCESS)
+ mdims[0] = 0;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ BEGIN_INDEPENDENT_OP(set_space_sel)
+ {
+ if (MAINPROCESS) {
+ if (H5Sselect_all(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'all' selection for dataset read\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ else {
+ if (H5Sselect_none(fspace_id) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set 'none' selection for dataset read\n");
+ INDEPENDENT_OP_ERROR(set_space_sel);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(set_space_sel);
+
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (H5Dread(dset_id, DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_ONE_PROC_ALL_SEL_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ BEGIN_INDEPENDENT_OP(data_verify)
+ {
+ if (MAINPROCESS) {
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (size_t)(space_npoints / mpi_size);
+
+ for (j = 0; j < elem_per_proc; j++) {
+ int idx = (int)((i * elem_per_proc) + j);
+
+ if (((int *)read_buf)[idx] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(data_verify);
+ }
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(data_verify);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * a hyperslab selection in the file dataspace and an all
+ * selection in the memory dataspace.
+ */
+static int
+test_read_dataset_hyper_file_all_mem(void)
+{
+ TESTING("read from dataset with hyperslab sel. for file space; all sel. for memory");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * an all selection in the file dataspace and a hyperslab
+ * selection in the memory dataspace.
+ */
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK 2
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME "all_sel_file_hyper_sel_mem_read_test"
+#define DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME "all_sel_file_hyper_sel_mem_dset"
+static int
+test_read_dataset_all_file_hyper_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with all sel. for file space; hyperslab sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME,
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ALL_FILE_HYPER_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc =
+ (data_size / DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ /*
+ * Only have rank 0 perform the dataset read, as reading the entire dataset on all ranks
+ * might be stressful on system resources.
+ */
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (MAINPROCESS) {
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {2};
+ hsize_t count[1] = {(hsize_t)space_npoints};
+ hsize_t block[1] = {1};
+ hsize_t mdims[] = {(hsize_t)(2 * space_npoints)};
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from all selection <-> hyperslab
+ * selection works correctly.
+ */
+ read_buf_size = (size_t)(2 * space_npoints) * DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+ if (NULL == (read_buf = HDcalloc(1, read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset read\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_READ_ALL_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc = (size_t)(space_npoints / mpi_size);
+
+ for (j = 0; j < 2 * elem_per_proc; j++) {
+ size_t idx = (i * 2 * elem_per_proc) + j;
+
+ if (j % 2 == 0) {
+ if (((int *)read_buf)[idx] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ else {
+ if (((int *)read_buf)[idx] != 0) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * a point selection in the file dataspace and an all selection
+ * in the memory dataspace.
+ */
+static int
+test_read_dataset_point_file_all_mem(void)
+{
+ TESTING("read from dataset with point sel. for file space; all sel. for memory");
+
+ SKIPPED();
+
+ return 0;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * an all selection in the file dataspace and a point selection
+ * in the memory dataspace.
+ */
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK 2
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME "all_sel_file_point_sel_mem_read_test"
+#define DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME "all_sel_file_point_sel_mem_dset"
+static int
+test_read_dataset_all_file_point_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *points = NULL;
+ hsize_t *dims = NULL;
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with all sel. for file space; point sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME,
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_ALL_FILE_POINT_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc =
+ (data_size / DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) <
+ 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ /*
+ * Only have rank 0 perform the dataset read, as reading the entire dataset on all ranks
+ * might be stressful on system resources.
+ */
+ BEGIN_INDEPENDENT_OP(dset_read)
+ {
+ if (MAINPROCESS) {
+ hsize_t mdims[] = {(hsize_t)(2 * space_npoints)};
+ size_t j;
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from all selection <-> point
+ * selection works correctly.
+ */
+ read_buf_size = (size_t)(2 * space_npoints) * DATASET_READ_ALL_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+ if (NULL == (read_buf = HDcalloc(1, read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if (NULL == (points = HDmalloc((size_t)space_npoints * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ /* Select every other point in the 1-dimensional memory dataspace */
+ for (i = 0, j = 0; i < 2 * (size_t)space_npoints; i++) {
+ if (i % 2 == 0)
+ points[j++] = (hsize_t)i;
+ }
+
+ if (H5Sselect_elements(mspace_id, H5S_SELECT_SET, (size_t)space_npoints, points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset read\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ if (H5Dread(dset_id, DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_READ_ALL_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t elem_per_proc = (size_t)(space_npoints / mpi_size);
+
+ for (j = 0; j < 2 * elem_per_proc; j++) {
+ size_t idx = (i * 2 * elem_per_proc) + j;
+
+ if (j % 2 == 0) {
+ if (((int *)read_buf)[idx] != (int)i) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ else {
+ if (((int *)read_buf)[idx] != 0) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ INDEPENDENT_OP_ERROR(dset_read);
+ }
+ }
+ }
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_read);
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * a hyperslab selection in the file dataspace and a point
+ * selection in the memory dataspace.
+ */
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK 2
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME "hyper_sel_file_point_sel_mem_read_test"
+#define DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME "hyper_sel_file_point_sel_mem_dset"
+static int
+test_read_dataset_hyper_file_point_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t *points = NULL;
+ hsize_t start[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t stride[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t count[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ hsize_t block[DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK];
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with hyperslab sel. for file space; point sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id =
+ H5Screate_simple(DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME,
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc =
+ (data_size / DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_HYPER_FILE_POINT_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from hyperslab selection <-> point
+ * selection works correctly.
+ */
+ read_buf_size = (2 * (size_t)(space_npoints / mpi_size) * DATASET_READ_ONE_PROC_NONE_SEL_TEST_DTYPE_SIZE);
+ if (NULL == (read_buf = HDcalloc(1, read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_READ_HYPER_FILE_POINT_MEM_TEST_SPACE_RANK; i++) {
+ if (i == 0) {
+ start[i] = (hsize_t)mpi_rank;
+ block[i] = 1;
+ }
+ else {
+ start[i] = 0;
+ block[i] = dims[i];
+ }
+
+ stride[i] = 1;
+ count[i] = 1;
+ }
+
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't select hyperslab for dataset read\n");
+ goto error;
+ }
+
+ {
+ hsize_t mdims[] = {(hsize_t)(2 * (space_npoints / mpi_size))};
+ size_t j;
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+
+ if (NULL == (points = HDmalloc((size_t)(space_npoints / mpi_size) * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ goto error;
+ }
+
+ /* Select every other point in the 1-dimensional memory dataspace */
+ for (i = 0, j = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) {
+ if (i % 2 == 0)
+ points[j++] = (hsize_t)i;
+ }
+
+ if (H5Sselect_elements(mspace_id, H5S_SELECT_SET, (size_t)(space_npoints / mpi_size), points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset read\n");
+ goto error;
+ }
+ }
+
+ if (H5Dread(dset_id, DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_HYPER_FILE_POINT_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) {
+ if (i % 2 == 0) {
+ if (((int *)read_buf)[i] != (int)mpi_rank) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ else {
+ if (((int *)read_buf)[i] != 0) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a dataset can be read from by having
+ * a point selection in the file dataspace and a hyperslab
+ * selection in the memory dataspace.
+ */
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK 2
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE sizeof(int)
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME "point_sel_file_hyper_sel_mem_read_test"
+#define DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME "point_sel_file_hyper_sel_mem_dset"
+static int
+test_read_dataset_point_file_hyper_mem(void)
+{
+ hssize_t space_npoints;
+ hsize_t *dims = NULL;
+ hsize_t *points = NULL;
+ size_t i, data_size, read_buf_size;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ void *read_buf = NULL;
+
+ TESTING("read from dataset with point sel. for file space; hyperslab sel. for memory");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ SKIPPED();
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ return 0;
+ }
+
+ if (generate_random_parallel_dimensions(DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, &dims) < 0)
+ TEST_ERROR;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id =
+ H5Screate_simple(DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME,
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0, data_size = 1; i < DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE;
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < (size_t)mpi_size; i++) {
+ size_t j;
+ size_t elem_per_proc =
+ (data_size / DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE) / dims[0];
+
+ for (j = 0; j < elem_per_proc; j++) {
+ size_t idx = (i * elem_per_proc) + j;
+
+ ((int *)write_buf)[idx] = (int)i;
+ }
+ }
+
+ {
+ hsize_t mdims[] = {data_size / DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (H5Dwrite(dset_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+ if (mspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(mspace_id);
+ }
+ H5E_END_TRY;
+ mspace_id = H5I_INVALID_HID;
+ }
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id =
+ H5Gopen2(container_group, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n", DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ if ((space_npoints = H5Sget_simple_extent_npoints(fspace_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataspace num points\n");
+ goto error;
+ }
+
+ /*
+ * Allocate twice the amount of memory needed and leave "holes" in the memory
+ * buffer in order to prove that the mapping from point selection <-> hyperslab
+ * selection works correctly.
+ */
+ read_buf_size =
+ (2 * (size_t)(space_npoints / mpi_size) * DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DTYPE_SIZE);
+ if (NULL == (read_buf = HDcalloc(1, read_buf_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset read\n");
+ goto error;
+ }
+
+ if (NULL == (points = HDmalloc((size_t)((space_npoints / mpi_size) *
+ DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for point selection\n");
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)(space_npoints / mpi_size); i++) {
+ size_t j;
+
+ for (j = 0; j < DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK; j++) {
+ size_t idx = (i * DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK) + j;
+
+ if (j == 0)
+ points[idx] = (hsize_t)mpi_rank;
+ else if (j != DATASET_READ_POINT_FILE_HYPER_MEM_TEST_SPACE_RANK - 1)
+ points[idx] = i / dims[j + 1];
+ else
+ points[idx] = i % dims[j];
+ }
+ }
+
+ if (H5Sselect_elements(fspace_id, H5S_SELECT_SET, (size_t)(space_npoints / mpi_size), points) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set point selection for dataset read\n");
+ goto error;
+ }
+
+ {
+ hsize_t start[1] = {0};
+ hsize_t stride[1] = {2};
+ hsize_t count[1] = {(hsize_t)(space_npoints / mpi_size)};
+ hsize_t block[1] = {1};
+ hsize_t mdims[] = {(hsize_t)(2 * (space_npoints / mpi_size))};
+
+ if ((mspace_id = H5Screate_simple(1, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create memory dataspace\n");
+ goto error;
+ }
+
+ if (H5Sselect_hyperslab(mspace_id, H5S_SELECT_SET, start, stride, count, block) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't set hyperslab selection for dataset write\n");
+ goto error;
+ }
+ }
+
+ if (H5Dread(dset_id, DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_DTYPE, mspace_id, fspace_id, H5P_DEFAULT,
+ read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n", DATASET_READ_POINT_FILE_HYPER_MEM_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (i = 0; i < (size_t)(2 * (space_npoints / mpi_size)); i++) {
+ if (i % 2 == 0) {
+ if (((int *)read_buf)[i] != (int)mpi_rank) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ else {
+ if (((int *)read_buf)[i] != 0) {
+ H5_FAILED();
+ HDprintf(" data verification failed\n");
+ goto error;
+ }
+ }
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ if (points) {
+ HDfree(points);
+ points = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (read_buf)
+ HDfree(read_buf);
+ if (write_buf)
+ HDfree(write_buf);
+ if (points)
+ HDfree(points);
+ if (dims)
+ HDfree(dims);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly. When reading back the
+ * chunks of the dataset, the file dataspace and memory dataspace
+ * used are the same shape. The dataset's first dimension grows
+ * with the number of MPI ranks, while the other dimensions are fixed.
+ */
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \
+ 100 /* Should be an even divisor of fixed dimension size */
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE 1000
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_write_same_space_read_test"
+#define DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+static int
+test_write_multi_chunk_dataset_same_shape_read(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *chunk_dims = NULL;
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size, n_chunks_per_rank;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[1][DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE];
+
+ TESTING("write to dataset with multiple chunks using same shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if (NULL ==
+ (dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (i == 0) {
+ dims[i] = (hsize_t)mpi_size;
+ chunk_dims[i] = 1;
+ }
+ else {
+ dims[i] = DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE;
+ chunk_dims[i] = DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id =
+ H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE; i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust +=
+ (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id =
+ H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Create 2-dimensional memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]};
+
+ if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ /*
+ * Each rank reads their respective chunks in the dataset, checking the data for each one.
+ */
+ if (MAINPROCESS)
+ HDprintf("\n");
+ for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank; i++) {
+ size_t j, k;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\r All ranks reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (hsize_t)mpi_rank;
+ else if (j == (DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++)
+ for (k = 0; k < chunk_dims[1]; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++) {
+ for (k = 0; k < chunk_dims[1]; k++) {
+ size_t val =
+ ((j * chunk_dims[0]) + k + i) +
+ ((hsize_t)mpi_rank * n_chunks_per_rank); /* Additional value offset for each rank */
+ if (read_buf[j][k] != (int)val) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (chunk_dims) {
+ HDfree(chunk_dims);
+ chunk_dims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (chunk_dims)
+ HDfree(chunk_dims);
+ if (dims)
+ HDfree(dims);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly. When reading back the
+ * chunks of the dataset, the file dataspace and memory dataspace
+ * used are differently shaped. The dataset's first dimension grows
+ * with the number of MPI ranks, while the other dimensions are fixed.
+ */
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \
+ 100 /* Should be an even divisor of fixed dimension size */
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE \
+ (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE / 10)
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE 1000
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_write_diff_space_read_test"
+#define DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+static int
+test_write_multi_chunk_dataset_diff_shape_read(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *chunk_dims = NULL;
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size, n_chunks_per_rank;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE]
+ [DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE];
+
+ TESTING("write to dataset with multiple chunks using differently shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if (NULL ==
+ (dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK * sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (i == 0) {
+ dims[i] = (hsize_t)mpi_size;
+ chunk_dims[i] = 1;
+ }
+ else {
+ dims[i] = DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE;
+ chunk_dims[i] = DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ /*
+ * Have rank 0 create the dataset and completely fill it with data.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id =
+ H5Gcreate2(container_group, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE; i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust +=
+ (((i / n_faster_elemts) / chunk_dims[j]) * (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file to ensure that the data gets written.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ if ((dset_id =
+ H5Dopen2(group_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Create memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE,
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE};
+
+ if ((mspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ /*
+ * Each rank reads their respective chunks in the dataset, checking the data for each one.
+ */
+ if (MAINPROCESS)
+ HDprintf("\n");
+ for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank; i++) {
+ size_t j, k;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\r All ranks reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (hsize_t)mpi_rank;
+ else if (j == (DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++)
+ for (k = 0; k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id, fspace_id,
+ H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++) {
+ for (k = 0; k < DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++) {
+ size_t val = ((j * DATASET_MULTI_CHUNK_WRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE) + k + i) +
+ ((hsize_t)mpi_rank * n_chunks_per_rank);
+
+ if (read_buf[j][k] != (int)val) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (chunk_dims) {
+ HDfree(chunk_dims);
+ chunk_dims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Sclose(fspace_id) < 0)
+ TEST_ERROR;
+ if (H5Dclose(dset_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (chunk_dims)
+ HDfree(chunk_dims);
+ if (dims)
+ HDfree(dims);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly several times in a row.
+ * When reading back the chunks of the dataset, the file
+ * dataspace and memory dataspace used are the same shape.
+ * The dataset's first dimension grows with the number of MPI
+ * ranks, while the other dimensions are fixed.
+ */
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \
+ 100 /* Should be an even divisor of fixed dimension size */
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE 1000
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_same_space_overwrite_test"
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+#define DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS 10
+static int
+test_overwrite_multi_chunk_dataset_same_shape_read(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *chunk_dims = NULL;
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size, n_chunks_per_rank;
+ size_t niter;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[1][DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE];
+
+ TESTING("several overwrites to dataset with multiple chunks using same shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if (NULL == (dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (i == 0) {
+ dims[i] = (hsize_t)mpi_size;
+ chunk_dims[i] = 1;
+ }
+ else {
+ dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_DIMSIZE;
+ chunk_dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ /*
+ * Have rank 0 create the dataset, but don't fill it with data yet.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group,
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /* Set dataset space allocation time to Early to ensure all chunk-related metadata is available to
+ * all other processes when they open the dataset */
+ if (H5Pset_alloc_time(dcpl_id, H5D_ALLOC_TIME_EARLY) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set allocation time on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE,
+ fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file on all ranks.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /*
+ * Create 2-dimensional memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {chunk_dims[0], chunk_dims[1]};
+
+ if ((mspace_id = H5Screate_simple(2, mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+ for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_NITERS; niter++) {
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (MAINPROCESS) {
+ memset(write_buf, 0, data_size);
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on. On each iteration, we add 1 to the previous
+ * values.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPESIZE;
+ i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) *
+ (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust + niter);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL,
+ H5S_ALL, H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Each rank reads their respective chunks in the dataset, checking the data for each one.
+ */
+ for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank;
+ i++) {
+ size_t j, k;
+
+ if (MAINPROCESS)
+ HDprintf("\r All ranks reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (hsize_t)mpi_rank;
+ else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++)
+ for (k = 0; k < chunk_dims[1]; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_DTYPE, mspace_id,
+ fspace_id, H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < chunk_dims[0]; j++) {
+ for (k = 0; k < chunk_dims[1]; k++) {
+ size_t val =
+ ((j * chunk_dims[0]) + k + i) +
+ ((hsize_t)mpi_rank * n_chunks_per_rank) /* Additional value offset for each rank */
+ + niter;
+ if (read_buf[j][k] != (int)val) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+ }
+
+ if (chunk_dims) {
+ HDfree(chunk_dims);
+ chunk_dims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (chunk_dims)
+ HDfree(chunk_dims);
+ if (dims)
+ HDfree(dims);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to check that a dataset composed of multiple chunks
+ * can be written and read correctly several times in a row.
+ * When reading back the chunks of the dataset, the file
+ * dataspace and memory dataspace used are differently shaped.
+ * The dataset's first dimension grows with the number of MPI
+ * ranks, while the other dimensions are fixed.
+ */
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE \
+ 100 /* Should be an even divisor of fixed dimension size */
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE \
+ (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE / 10)
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE 1000
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK 2
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE sizeof(int)
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE H5T_NATIVE_INT
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME \
+ "multi_chunk_dataset_diff_space_overwrite_test"
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME "multi_chunk_dataset"
+#define DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS 10
+static int
+test_overwrite_multi_chunk_dataset_diff_shape_read(void)
+{
+ hsize_t *dims = NULL;
+ hsize_t *chunk_dims = NULL;
+ hsize_t retrieved_chunk_dims[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t start[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ hsize_t count[DATASET_MULTI_CHUNK_OVERWRITE_SAME_SPACE_READ_TEST_DSET_SPACE_RANK];
+ size_t i, data_size, chunk_size, n_chunks_per_rank;
+ size_t niter;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t container_group = H5I_INVALID_HID, group_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ void *write_buf = NULL;
+ int read_buf[DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE]
+ [DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE];
+
+ TESTING("several overwrites to dataset with multiple chunks using differently shaped dataspaces");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_GET_PLIST)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file, group, dataset, or getting property list aren't "
+ "supported with this connector\n");
+ return 0;
+ }
+
+ if (NULL == (dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ if (NULL == (chunk_dims = HDmalloc(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK *
+ sizeof(hsize_t)))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset dimensionality\n");
+ goto error;
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (i == 0) {
+ dims[i] = (hsize_t)mpi_size;
+ chunk_dims[i] = 1;
+ }
+ else {
+ dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_DIMSIZE;
+ chunk_dims[i] = DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_FIXED_CHUNK_DIMSIZE;
+ }
+ }
+
+ for (i = 0, chunk_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ chunk_size *= chunk_dims[i];
+ chunk_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ for (i = 0, data_size = 1; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++)
+ data_size *= dims[i];
+ data_size *= DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+
+ /*
+ * Have rank 0 create the dataset, but don't fill it with data yet.
+ */
+ BEGIN_INDEPENDENT_OP(dset_create)
+ {
+ if (MAINPROCESS) {
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((group_id = H5Gcreate2(container_group,
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((fspace_id = H5Screate_simple(
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK, dims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create file dataspace for dataset\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (H5Pset_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set chunking on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /* Set dataset space allocation time to Early to ensure all chunk-related metadata is available to
+ * all other processes when they open the dataset */
+ if (H5Pset_alloc_time(dcpl_id, H5D_ALLOC_TIME_EARLY) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set allocation time on DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dset_id = H5Dcreate2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE,
+ fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ /*
+ * See if a copy of the DCPL reports the correct chunking.
+ */
+ if (H5Pclose(dcpl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if ((dcpl_id = H5Dget_create_plist(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve copy of DCPL\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ memset(retrieved_chunk_dims, 0, sizeof(retrieved_chunk_dims));
+ if (H5Pget_chunk(dcpl_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ retrieved_chunk_dims) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to retrieve chunking info\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ if (chunk_dims[i] != retrieved_chunk_dims[i]) {
+ H5_FAILED();
+ HDprintf(" chunk dimensionality retrieved from DCPL didn't match originally specified "
+ "dimensionality\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+
+ if (NULL == (write_buf = HDmalloc(data_size))) {
+ H5_FAILED();
+ HDprintf(" couldn't allocate buffer for dataset write\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dcpl_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(dcpl_id);
+ }
+ H5E_END_TRY;
+ dcpl_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * Close and re-open the file on all ranks.
+ */
+ if (H5Gclose(group_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close test's container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Gclose(container_group) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close container group\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file for data flushing\n");
+ INDEPENDENT_OP_ERROR(dset_create);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_create);
+
+ /*
+ * Re-open file on all ranks.
+ */
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't re-open file '%s'\n", H5_api_test_parallel_filename);
+ goto error;
+ }
+ if ((container_group = H5Gopen2(file_id, DATASET_TEST_GROUP_NAME, H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container group '%s'\n", DATASET_TEST_GROUP_NAME);
+ goto error;
+ }
+ if ((group_id = H5Gopen2(container_group, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open container sub-group '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_GROUP_NAME);
+ goto error;
+ }
+
+ /*
+ * Create memory dataspace for read buffer.
+ */
+ {
+ hsize_t mdims[] = {DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE,
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE};
+
+ if ((mspace_id = H5Screate_simple(DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK,
+ mdims, NULL)) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to create memory dataspace\n");
+ goto error;
+ }
+ }
+
+ for (i = 0; i < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; i++) {
+ count[i] = chunk_dims[i];
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+ for (niter = 0; niter < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_NITERS; niter++) {
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ BEGIN_INDEPENDENT_OP(dset_write)
+ {
+ if (MAINPROCESS) {
+ memset(write_buf, 0, data_size);
+
+ /*
+ * Ensure that each underlying chunk contains the values
+ *
+ * chunk_index .. (chunk_nelemts - 1) + chunk_index.
+ *
+ * That is to say, for a chunk size of 10 x 10, chunk 0
+ * contains the values
+ *
+ * 0 .. 99
+ *
+ * while the next chunk contains the values
+ *
+ * 1 .. 100
+ *
+ * and so on. On each iteration, we add 1 to the previous
+ * values.
+ */
+ for (i = 0; i < data_size / DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPESIZE;
+ i++) {
+ size_t j;
+ size_t base;
+ size_t tot_adjust;
+
+ /*
+ * Calculate a starting base value by taking the index value mod
+ * the size of a chunk in each dimension.
+ */
+ for (j = 0, base = i;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++)
+ if (chunk_dims[j] > 1 && base >= chunk_dims[j])
+ base %= chunk_dims[j];
+
+ /*
+ * Calculate the adjustment in each dimension.
+ */
+ for (j = 0, tot_adjust = 0;
+ j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ tot_adjust += (i % dims[j]) / chunk_dims[j];
+ else {
+ size_t k;
+ size_t n_faster_elemts;
+
+ /*
+ * Calculate the number of elements in faster dimensions.
+ */
+ for (k = j + 1, n_faster_elemts = 1;
+ k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; k++)
+ n_faster_elemts *= dims[k];
+
+ tot_adjust += (((i / n_faster_elemts) / chunk_dims[j]) *
+ (dims[j + 1] / chunk_dims[j + 1])) +
+ (((i / n_faster_elemts) % chunk_dims[j]) * chunk_dims[j + 1]);
+ }
+ }
+
+ ((int *)write_buf)[i] = (int)(base + tot_adjust + niter);
+ }
+
+ /*
+ * Write every chunk in the dataset.
+ */
+ if (H5Dwrite(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, H5S_ALL,
+ H5S_ALL, H5P_DEFAULT, write_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't write to dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ INDEPENDENT_OP_ERROR(dset_write);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(dset_write);
+
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+
+ if ((dset_id = H5Dopen2(group_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME,
+ H5P_DEFAULT)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't open dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ if ((fspace_id = H5Dget_space(dset_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't get dataset dataspace\n");
+ goto error;
+ }
+
+ /*
+ * Each rank reads their respective chunks in the dataset, checking the data for each one.
+ */
+ for (i = 0, n_chunks_per_rank = (data_size / (size_t)mpi_size) / chunk_size; i < n_chunks_per_rank;
+ i++) {
+ size_t j, k;
+
+ if (MAINPROCESS)
+ HDprintf("\r All ranks reading chunk %zu", i);
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK; j++) {
+ if (j == 0)
+ start[j] = (hsize_t)mpi_rank;
+ else if (j == (DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_SPACE_RANK - 1))
+ /* Fastest changing dimension */
+ start[j] = (i * chunk_dims[j]) % dims[j];
+ else
+ start[j] = ((i * chunk_dims[j + 1]) / dims[j + 1]) * (chunk_dims[j]);
+ }
+
+ /*
+ * Adjust file dataspace selection for next chunk.
+ */
+ if (H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, NULL, count, NULL) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to set hyperslab selection\n");
+ goto error;
+ }
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++)
+ for (k = 0; k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++)
+ read_buf[j][k] = 0;
+
+ if (H5Dread(dset_id, DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_DTYPE, mspace_id,
+ fspace_id, H5P_DEFAULT, read_buf) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't read from dataset '%s'\n",
+ DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_DSET_NAME);
+ goto error;
+ }
+
+ for (j = 0; j < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; j++) {
+ for (k = 0; k < DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE; k++) {
+ size_t val =
+ ((j * DATASET_MULTI_CHUNK_OVERWRITE_DIFF_SPACE_READ_TEST_READ_BUF_DIMSIZE) + k + i) +
+ ((hsize_t)mpi_rank * n_chunks_per_rank) + niter;
+
+ if (read_buf[j][k] != (int)val) {
+ H5_FAILED();
+ HDprintf(" data verification failed for chunk %lld\n", (long long)i);
+ goto error;
+ }
+ }
+ }
+ }
+
+ if (fspace_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Sclose(fspace_id);
+ }
+ H5E_END_TRY;
+ fspace_id = H5I_INVALID_HID;
+ }
+ if (dset_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Dclose(dset_id);
+ }
+ H5E_END_TRY;
+ dset_id = H5I_INVALID_HID;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier failed\n");
+ goto error;
+ }
+ }
+
+ if (chunk_dims) {
+ HDfree(chunk_dims);
+ chunk_dims = NULL;
+ }
+
+ if (dims) {
+ HDfree(dims);
+ dims = NULL;
+ }
+
+ if (write_buf) {
+ HDfree(write_buf);
+ write_buf = NULL;
+ }
+
+ if (H5Sclose(mspace_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(group_id) < 0)
+ TEST_ERROR;
+ if (H5Gclose(container_group) < 0)
+ TEST_ERROR;
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ if (write_buf)
+ HDfree(write_buf);
+ if (chunk_dims)
+ HDfree(chunk_dims);
+ if (dims)
+ HDfree(dims);
+ H5Pclose(dcpl_id);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Gclose(group_id);
+ H5Gclose(container_group);
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+int
+H5_api_dataset_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Dataset Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_dataset_tests); i++) {
+ nerrors += (*par_dataset_tests[i])() ? 1 : 0;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_dataset_test_parallel.h b/testpar/API/H5_api_dataset_test_parallel.h
new file mode 100644
index 0000000..1e2cbd0
--- /dev/null
+++ b/testpar/API/H5_api_dataset_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_DATASET_TEST_PARALLEL_H_
+#define H5_API_DATASET_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_dataset_test_parallel(void);
+
+#endif /* H5_API_DATASET_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_datatype_test_parallel.c b/testpar/API/H5_api_datatype_test_parallel.c
new file mode 100644
index 0000000..7d090c0
--- /dev/null
+++ b/testpar/API/H5_api_datatype_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_datatype_test_parallel.h"
+
+/*
+ * The array of parallel datatype tests to be performed.
+ */
+static int (*par_datatype_tests[])(void) = {NULL};
+
+int
+H5_api_datatype_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Datatype Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_datatype_tests); i++) {
+ /* nerrors += (*par_datatype_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_datatype_test_parallel.h b/testpar/API/H5_api_datatype_test_parallel.h
new file mode 100644
index 0000000..0a2ba50
--- /dev/null
+++ b/testpar/API/H5_api_datatype_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_DATATYPE_TEST_PARALLEL_H_
+#define H5_API_DATATYPE_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_datatype_test_parallel(void);
+
+#endif /* H5_API_DATATYPE_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_file_test_parallel.c b/testpar/API/H5_api_file_test_parallel.c
new file mode 100644
index 0000000..20fb2ba
--- /dev/null
+++ b/testpar/API/H5_api_file_test_parallel.c
@@ -0,0 +1,367 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_file_test_parallel.h"
+
+static int test_create_file(void);
+static int test_open_file(void);
+static int test_split_comm_file_access(void);
+
+/*
+ * The array of parallel file tests to be performed.
+ */
+static int (*par_file_tests[])(void) = {
+ test_create_file,
+ test_open_file,
+ test_split_comm_file_access,
+};
+
+/*
+ * A test to ensure that a file can be created in parallel.
+ */
+#define FILE_CREATE_TEST_FILENAME "test_file_parallel.h5"
+static int
+test_create_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ TESTING("H5Fcreate");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ if ((file_id = H5Fcreate(FILE_CREATE_TEST_FILENAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", FILE_CREATE_TEST_FILENAME);
+ goto error;
+ }
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+ if (H5Fclose(file_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * A test to ensure that a file can be opened in parallel.
+ */
+static int
+test_open_file(void)
+{
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ TESTING_MULTIPART("H5Fopen");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ TESTING_2("test setup");
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ BEGIN_MULTIPART
+ {
+ PART_BEGIN(H5Fopen_rdonly)
+ {
+ TESTING_2("H5Fopen in read-only mode");
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDONLY, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to open file '%s' in read-only mode\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Fopen_rdonly);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fopen_rdonly);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+
+ PART_BEGIN(H5Fopen_rdwrite)
+ {
+ TESTING_2("H5Fopen in read-write mode");
+
+ if ((file_id = H5Fopen(H5_api_test_parallel_filename, H5F_ACC_RDWR, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" unable to open file '%s' in read-write mode\n", H5_api_test_parallel_filename);
+ PART_ERROR(H5Fopen_rdwrite);
+ }
+
+ PASSED();
+ }
+ PART_END(H5Fopen_rdwrite);
+
+ if (file_id >= 0) {
+ H5E_BEGIN_TRY
+ {
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+ file_id = H5I_INVALID_HID;
+ }
+
+ /*
+ * XXX: SWMR open flags
+ */
+ }
+ END_MULTIPART;
+
+ TESTING_2("test cleanup");
+
+ if (H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Tests file access by a communicator other than MPI_COMM_WORLD.
+ *
+ * Splits MPI_COMM_WORLD into two groups, where one (even_comm) contains
+ * the original processes of even ranks. The other (odd_comm) contains
+ * the original processes of odd ranks. Processes in even_comm create a
+ * file, then close it, using even_comm. Processes in old_comm just do
+ * a barrier using odd_comm. Then they all do a barrier using MPI_COMM_WORLD.
+ * If the file creation and close does not do correct collective action
+ * according to the communicator argument, the processes will freeze up
+ * sooner or later due to MPI_Barrier calls being mixed up.
+ */
+#define SPLIT_FILE_COMM_TEST_FILE_NAME "split_comm_file.h5"
+static int
+test_split_comm_file_access(void)
+{
+ MPI_Comm comm;
+ MPI_Info info = MPI_INFO_NULL;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ int is_old;
+ int newrank;
+ int err_occurred = 0;
+
+ TESTING("file access with a split communicator");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ SKIPPED();
+ HDprintf(" API functions for basic file aren't supported with this connector\n");
+ return 0;
+ }
+
+ /* set up MPI parameters */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ is_old = mpi_rank % 2;
+ if (MPI_SUCCESS != MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm)) {
+ H5_FAILED();
+ HDprintf(" failed to split communicator!\n");
+ goto error;
+ }
+ MPI_Comm_rank(comm, &newrank);
+
+ if (is_old) {
+ /* odd-rank processes */
+ if (MPI_SUCCESS != MPI_Barrier(comm)) {
+ err_occurred = 1;
+ goto access_end;
+ }
+ }
+ else {
+ /* even-rank processes */
+ int sub_mpi_rank; /* rank in the sub-comm */
+
+ MPI_Comm_rank(comm, &sub_mpi_rank);
+
+ /* setup file access template */
+ if ((fapl_id = create_mpi_fapl(comm, info, TRUE)) < 0) {
+ err_occurred = 1;
+ goto access_end;
+ }
+
+ /* create the file collectively */
+ if ((file_id = H5Fcreate(SPLIT_FILE_COMM_TEST_FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) {
+ H5_FAILED();
+ HDprintf(" couldn't create file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME);
+ err_occurred = 1;
+ goto access_end;
+ }
+
+ /* close the file */
+ if (H5Fclose(file_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to close file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME);
+ err_occurred = 1;
+ goto access_end;
+ }
+
+ /* delete the test file */
+ if (H5Fdelete(SPLIT_FILE_COMM_TEST_FILE_NAME, fapl_id) < 0) {
+ H5_FAILED();
+ HDprintf(" failed to delete file '%s'\n", SPLIT_FILE_COMM_TEST_FILE_NAME);
+ err_occurred = 1;
+ goto access_end;
+ }
+
+ /* Release file-access template */
+ if (H5Pclose(fapl_id) < 0) {
+ err_occurred = 1;
+ goto access_end;
+ }
+ }
+access_end:
+
+ /* Get the collective results about whether an error occurred */
+ if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &err_occurred, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Allreduce failed\n");
+ goto error;
+ }
+
+ if (err_occurred) {
+ H5_FAILED();
+ HDprintf(" an error occurred on only some ranks during split-communicator file access! - "
+ "collectively failing\n");
+ goto error;
+ }
+
+ if (MPI_SUCCESS != MPI_Comm_free(&comm)) {
+ H5_FAILED();
+ HDprintf(" MPI_Comm_free failed\n");
+ goto error;
+ }
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ H5_FAILED();
+ HDprintf(" MPI_Barrier on MPI_COMM_WORLD failed\n");
+ goto error;
+ }
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ H5Fclose(file_id);
+ }
+ H5E_END_TRY;
+
+ return 1;
+}
+
+/*
+ * Cleanup temporary test files
+ */
+static void
+cleanup_files(void)
+{
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, TRUE)) < 0) {
+ if (MAINPROCESS)
+ HDprintf(" failed to create FAPL for deleting test files\n");
+ return;
+ }
+
+ H5Fdelete(FILE_CREATE_TEST_FILENAME, fapl_id);
+
+ /* The below file is deleted as part of the test */
+ /* H5Fdelete(SPLIT_FILE_COMM_TEST_FILE_NAME, H5P_DEFAULT); */
+
+ if (H5Pclose(fapl_id) < 0) {
+ if (MAINPROCESS)
+ HDprintf(" failed to close FAPL used for deleting test files\n");
+ return;
+ }
+}
+
+int
+H5_api_file_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel File Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_file_tests); i++) {
+ nerrors += (*par_file_tests[i])() ? 1 : 0;
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("\n");
+ HDprintf("Cleaning up testing files\n");
+ }
+
+ cleanup_files();
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_file_test_parallel.h b/testpar/API/H5_api_file_test_parallel.h
new file mode 100644
index 0000000..aac9800
--- /dev/null
+++ b/testpar/API/H5_api_file_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_FILE_TEST_PARALLEL_H_
+#define H5_API_FILE_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_file_test_parallel(void);
+
+#endif /* H5_API_FILE_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_group_test_parallel.c b/testpar/API/H5_api_group_test_parallel.c
new file mode 100644
index 0000000..d6d8f18
--- /dev/null
+++ b/testpar/API/H5_api_group_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_group_test_parallel.h"
+
+/*
+ * The array of parallel group tests to be performed.
+ */
+static int (*par_group_tests[])(void) = {NULL};
+
+int
+H5_api_group_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Group Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_group_tests); i++) {
+ /* nerrors += (*par_group_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_group_test_parallel.h b/testpar/API/H5_api_group_test_parallel.h
new file mode 100644
index 0000000..87dd24f
--- /dev/null
+++ b/testpar/API/H5_api_group_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_GROUP_TEST_PARALLEL_H_
+#define H5_API_GROUP_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_group_test_parallel(void);
+
+#endif /* H5_API_GROUP_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_link_test_parallel.c b/testpar/API/H5_api_link_test_parallel.c
new file mode 100644
index 0000000..fb865a0
--- /dev/null
+++ b/testpar/API/H5_api_link_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_link_test_parallel.h"
+
+/*
+ * The array of parallel link tests to be performed.
+ */
+static int (*par_link_tests[])(void) = {NULL};
+
+int
+H5_api_link_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Link Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_link_tests); i++) {
+ /* nerrors += (*par_link_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_link_test_parallel.h b/testpar/API/H5_api_link_test_parallel.h
new file mode 100644
index 0000000..dbf0fc7
--- /dev/null
+++ b/testpar/API/H5_api_link_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_LINK_TEST_PARALLEL_H_
+#define H5_API_LINK_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_link_test_parallel(void);
+
+#endif /* H5_API_LINK_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_misc_test_parallel.c b/testpar/API/H5_api_misc_test_parallel.c
new file mode 100644
index 0000000..0dc85eb
--- /dev/null
+++ b/testpar/API/H5_api_misc_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_misc_test_parallel.h"
+
+/*
+ * The array of parallel miscellaneous tests to be performed.
+ */
+static int (*par_misc_tests[])(void) = {NULL};
+
+int
+H5_api_misc_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Miscellaneous Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_misc_tests); i++) {
+ /* nerrors += (*par_misc_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_misc_test_parallel.h b/testpar/API/H5_api_misc_test_parallel.h
new file mode 100644
index 0000000..84553a9
--- /dev/null
+++ b/testpar/API/H5_api_misc_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_MISC_TEST_PARALLEL_H_
+#define H5_API_MISC_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_misc_test_parallel(void);
+
+#endif /* H5_API_MISC_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_object_test_parallel.c b/testpar/API/H5_api_object_test_parallel.c
new file mode 100644
index 0000000..a264eb2
--- /dev/null
+++ b/testpar/API/H5_api_object_test_parallel.c
@@ -0,0 +1,47 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_object_test_parallel.h"
+
+/*
+ * The array of parallel object tests to be performed.
+ */
+static int (*par_object_tests[])(void) = {NULL};
+
+int
+H5_api_object_test_parallel(void)
+{
+ size_t i;
+ int nerrors;
+
+ if (MAINPROCESS) {
+ HDprintf("**********************************************\n");
+ HDprintf("* *\n");
+ HDprintf("* API Parallel Object Tests *\n");
+ HDprintf("* *\n");
+ HDprintf("**********************************************\n\n");
+ }
+
+ for (i = 0, nerrors = 0; i < ARRAY_LENGTH(par_object_tests); i++) {
+ /* nerrors += (*par_object_tests[i])() ? 1 : 0; */
+
+ if (MPI_SUCCESS != MPI_Barrier(MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" MPI_Barrier() failed!\n");
+ }
+ }
+
+ if (MAINPROCESS)
+ HDprintf("\n");
+
+ return nerrors;
+}
diff --git a/testpar/API/H5_api_object_test_parallel.h b/testpar/API/H5_api_object_test_parallel.h
new file mode 100644
index 0000000..6a8569f
--- /dev/null
+++ b/testpar/API/H5_api_object_test_parallel.h
@@ -0,0 +1,20 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_OBJECT_TEST_PARALLEL_H_
+#define H5_API_OBJECT_TEST_PARALLEL_H_
+
+#include "H5_api_test_parallel.h"
+
+int H5_api_object_test_parallel(void);
+
+#endif /* H5_API_OBJECT_TEST_PARALLEL_H_ */
diff --git a/testpar/API/H5_api_test_parallel.c b/testpar/API/H5_api_test_parallel.c
new file mode 100644
index 0000000..45fa4ec
--- /dev/null
+++ b/testpar/API/H5_api_test_parallel.c
@@ -0,0 +1,338 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "H5_api_test_util.h"
+#include "H5_api_test_parallel.h"
+
+#include "H5_api_attribute_test_parallel.h"
+#include "H5_api_dataset_test_parallel.h"
+#include "H5_api_datatype_test_parallel.h"
+#include "H5_api_file_test_parallel.h"
+#include "H5_api_group_test_parallel.h"
+#include "H5_api_link_test_parallel.h"
+#include "H5_api_misc_test_parallel.h"
+#include "H5_api_object_test_parallel.h"
+#ifdef H5_API_TEST_HAVE_ASYNC
+#include "H5_api_async_test_parallel.h"
+#endif
+
+char H5_api_test_parallel_filename[H5_API_TEST_FILENAME_MAX_LENGTH];
+
+const char *test_path_prefix;
+
+size_t n_tests_run_g;
+size_t n_tests_passed_g;
+size_t n_tests_failed_g;
+size_t n_tests_skipped_g;
+
+int mpi_size;
+int mpi_rank;
+
+/* X-macro to define the following for each test:
+ * - enum type
+ * - name
+ * - test function
+ * - enabled by default
+ */
+#ifdef H5_API_TEST_HAVE_ASYNC
+#define H5_API_PARALLEL_TESTS \
+ X(H5_API_TEST_NULL, "", NULL, 0) \
+ X(H5_API_TEST_FILE, "file", H5_api_file_test_parallel, 1) \
+ X(H5_API_TEST_GROUP, "group", H5_api_group_test_parallel, 1) \
+ X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test_parallel, 1) \
+ X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test_parallel, 1) \
+ X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test_parallel, 1) \
+ X(H5_API_TEST_LINK, "link", H5_api_link_test_parallel, 1) \
+ X(H5_API_TEST_OBJECT, "object", H5_api_object_test_parallel, 1) \
+ X(H5_API_TEST_MISC, "misc", H5_api_misc_test_parallel, 1) \
+ X(H5_API_TEST_ASYNC, "async", H5_api_async_test_parallel, 1) \
+ X(H5_API_TEST_MAX, "", NULL, 0)
+#else
+#define H5_API_PARALLEL_TESTS \
+ X(H5_API_TEST_NULL, "", NULL, 0) \
+ X(H5_API_TEST_FILE, "file", H5_api_file_test_parallel, 1) \
+ X(H5_API_TEST_GROUP, "group", H5_api_group_test_parallel, 1) \
+ X(H5_API_TEST_DATASET, "dataset", H5_api_dataset_test_parallel, 1) \
+ X(H5_API_TEST_DATATYPE, "datatype", H5_api_datatype_test_parallel, 1) \
+ X(H5_API_TEST_ATTRIBUTE, "attribute", H5_api_attribute_test_parallel, 1) \
+ X(H5_API_TEST_LINK, "link", H5_api_link_test_parallel, 1) \
+ X(H5_API_TEST_OBJECT, "object", H5_api_object_test_parallel, 1) \
+ X(H5_API_TEST_MISC, "misc", H5_api_misc_test_parallel, 1) \
+ X(H5_API_TEST_MAX, "", NULL, 0)
+#endif
+
+#define X(a, b, c, d) a,
+enum H5_api_test_type { H5_API_PARALLEL_TESTS };
+#undef X
+#define X(a, b, c, d) b,
+static const char *const H5_api_test_name[] = {H5_API_PARALLEL_TESTS};
+#undef X
+#define X(a, b, c, d) c,
+static int (*H5_api_test_func[])(void) = {H5_API_PARALLEL_TESTS};
+#undef X
+#define X(a, b, c, d) d,
+static int H5_api_test_enabled[] = {H5_API_PARALLEL_TESTS};
+#undef X
+
+static enum H5_api_test_type
+H5_api_test_name_to_type(const char *test_name)
+{
+ enum H5_api_test_type i = 0;
+
+ while (strcmp(H5_api_test_name[i], test_name) && i != H5_API_TEST_MAX)
+ i++;
+
+ return ((i == H5_API_TEST_MAX) ? H5_API_TEST_NULL : i);
+}
+
+static void
+H5_api_test_run(void)
+{
+ enum H5_api_test_type i;
+
+ for (i = H5_API_TEST_FILE; i < H5_API_TEST_MAX; i++)
+ if (H5_api_test_enabled[i])
+ (void)H5_api_test_func[i]();
+}
+
+hid_t
+create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read)
+{
+ hid_t ret_pl = H5I_INVALID_HID;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if ((ret_pl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+
+ if (H5Pset_fapl_mpio(ret_pl, comm, info) < 0)
+ goto error;
+ if (H5Pset_all_coll_metadata_ops(ret_pl, coll_md_read) < 0)
+ goto error;
+ if (H5Pset_coll_metadata_write(ret_pl, TRUE) < 0)
+ goto error;
+
+ return ret_pl;
+
+error:
+ return H5I_INVALID_HID;
+} /* end create_mpi_fapl() */
+
+/*
+ * Generates random dimensions for a dataspace. The first dimension
+ * is always `mpi_size` to allow for convenient subsetting; the rest
+ * of the dimensions are randomized.
+ */
+int
+generate_random_parallel_dimensions(int space_rank, hsize_t **dims_out)
+{
+ hsize_t *dims = NULL;
+ size_t i;
+
+ if (space_rank <= 0)
+ goto error;
+
+ if (NULL == (dims = HDmalloc((size_t)space_rank * sizeof(hsize_t))))
+ goto error;
+ if (MAINPROCESS) {
+ for (i = 0; i < (size_t)space_rank; i++) {
+ if (i == 0)
+ dims[i] = (hsize_t)mpi_size;
+ else
+ dims[i] = (hsize_t)((rand() % MAX_DIM_SIZE) + 1);
+ }
+ }
+
+ /*
+ * Ensure that the dataset dimensions are uniform across ranks.
+ */
+ if (MPI_SUCCESS != MPI_Bcast(dims, space_rank, MPI_UNSIGNED_LONG_LONG, 0, MPI_COMM_WORLD))
+ goto error;
+
+ *dims_out = dims;
+
+ return 0;
+
+error:
+ if (dims)
+ HDfree(dims);
+
+ return -1;
+}
+
+int
+main(int argc, char **argv)
+{
+ const char *vol_connector_name;
+ unsigned seed;
+ hid_t fapl_id = H5I_INVALID_HID;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Simple argument checking, TODO can improve that later */
+ if (argc > 1) {
+ enum H5_api_test_type i = H5_api_test_name_to_type(argv[1]);
+ if (i != H5_API_TEST_NULL) {
+ /* Run only specific API test */
+ memset(H5_api_test_enabled, 0, sizeof(H5_api_test_enabled));
+ H5_api_test_enabled[i] = 1;
+ }
+ }
+
+ /*
+ * Make sure that HDF5 is initialized on all MPI ranks before proceeding.
+ * This is important for certain VOL connectors which may require a
+ * collective initialization.
+ */
+ H5open();
+
+ n_tests_run_g = 0;
+ n_tests_passed_g = 0;
+ n_tests_failed_g = 0;
+ n_tests_skipped_g = 0;
+
+ if (MAINPROCESS) {
+ seed = (unsigned)HDtime(NULL);
+ }
+
+ if (mpi_size > 1) {
+ if (MPI_SUCCESS != MPI_Bcast(&seed, 1, MPI_UNSIGNED, 0, MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf("Couldn't broadcast test seed\n");
+ goto error;
+ }
+ }
+
+ srand(seed);
+
+ if (NULL == (test_path_prefix = HDgetenv(HDF5_API_TEST_PATH_PREFIX)))
+ test_path_prefix = "";
+
+ HDsnprintf(H5_api_test_parallel_filename, H5_API_TEST_FILENAME_MAX_LENGTH, "%s%s", test_path_prefix,
+ PARALLEL_TEST_FILE_NAME);
+
+ if (NULL == (vol_connector_name = HDgetenv(HDF5_VOL_CONNECTOR))) {
+ if (MAINPROCESS)
+ HDprintf("No VOL connector selected; using native VOL connector\n");
+ vol_connector_name = "native";
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("Running parallel API tests with VOL connector '%s'\n\n", vol_connector_name);
+ HDprintf("Test parameters:\n");
+ HDprintf(" - Test file name: '%s'\n", H5_api_test_parallel_filename);
+ HDprintf(" - Number of MPI ranks: %d\n", mpi_size);
+ HDprintf(" - Test seed: %u\n", seed);
+ HDprintf("\n\n");
+ }
+
+ /* Retrieve the VOL cap flags - work around an HDF5
+ * library issue by creating a FAPL
+ */
+ BEGIN_INDEPENDENT_OP(get_capability_flags)
+ {
+ if ((fapl_id = create_mpi_fapl(MPI_COMM_WORLD, MPI_INFO_NULL, FALSE)) < 0) {
+ if (MAINPROCESS)
+ HDfprintf(stderr, "Unable to create FAPL\n");
+ INDEPENDENT_OP_ERROR(get_capability_flags);
+ }
+
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+ if (H5Pget_vol_cap_flags(fapl_id, &vol_cap_flags_g) < 0) {
+ if (MAINPROCESS)
+ HDfprintf(stderr, "Unable to retrieve VOL connector capability flags\n");
+ INDEPENDENT_OP_ERROR(get_capability_flags);
+ }
+ }
+ END_INDEPENDENT_OP(get_capability_flags);
+
+ /*
+ * Create the file that will be used for all of the tests,
+ * except for those which test file creation.
+ */
+ BEGIN_INDEPENDENT_OP(create_test_container)
+ {
+ if (MAINPROCESS) {
+ if (create_test_container(H5_api_test_parallel_filename, vol_cap_flags_g) < 0) {
+ HDprintf(" failed to create testing container file '%s'\n", H5_api_test_parallel_filename);
+ INDEPENDENT_OP_ERROR(create_test_container);
+ }
+ }
+ }
+ END_INDEPENDENT_OP(create_test_container);
+
+ /* Run all the tests that are enabled */
+ H5_api_test_run();
+
+ if (MAINPROCESS)
+ HDprintf("Cleaning up testing files\n");
+ H5Fdelete(H5_api_test_parallel_filename, fapl_id);
+
+ if (n_tests_run_g > 0) {
+ if (MAINPROCESS)
+ HDprintf("The below statistics are minimum values due to the possibility of some ranks failing a "
+ "test while others pass:\n");
+
+ if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &n_tests_passed_g, 1, MPI_UNSIGNED_LONG_LONG, MPI_MIN,
+ MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" failed to collect consensus about the minimum number of tests that passed -- "
+ "reporting rank 0's (possibly inaccurate) value\n");
+ }
+
+ if (MAINPROCESS)
+ HDprintf("%s%zu/%zu (%.2f%%) API tests passed across all ranks with VOL connector '%s'\n",
+ n_tests_passed_g > 0 ? "At least " : "", n_tests_passed_g, n_tests_run_g,
+ ((double)n_tests_passed_g / (double)n_tests_run_g * 100.0), vol_connector_name);
+
+ if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &n_tests_failed_g, 1, MPI_UNSIGNED_LONG_LONG, MPI_MIN,
+ MPI_COMM_WORLD)) {
+ if (MAINPROCESS)
+ HDprintf(" failed to collect consensus about the minimum number of tests that failed -- "
+ "reporting rank 0's (possibly inaccurate) value\n");
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("%s%zu/%zu (%.2f%%) API tests did not pass across all ranks with VOL connector '%s'\n",
+ n_tests_failed_g > 0 ? "At least " : "", n_tests_failed_g, n_tests_run_g,
+ ((double)n_tests_failed_g / (double)n_tests_run_g * 100.0), vol_connector_name);
+
+ HDprintf("%zu/%zu (%.2f%%) API tests were skipped with VOL connector '%s'\n", n_tests_skipped_g,
+ n_tests_run_g, ((double)n_tests_skipped_g / (double)n_tests_run_g * 100.0),
+ vol_connector_name);
+ }
+ }
+
+ if (fapl_id >= 0 && H5Pclose(fapl_id) < 0) {
+ if (MAINPROCESS)
+ HDprintf(" failed to close MPI FAPL\n");
+ }
+
+ H5close();
+
+ MPI_Finalize();
+
+ HDexit(EXIT_SUCCESS);
+
+error:
+ H5E_BEGIN_TRY
+ {
+ H5Pclose(fapl_id);
+ }
+ H5E_END_TRY;
+
+ MPI_Finalize();
+
+ HDexit(EXIT_FAILURE);
+}
diff --git a/testpar/API/H5_api_test_parallel.h b/testpar/API/H5_api_test_parallel.h
new file mode 100644
index 0000000..6df83e8
--- /dev/null
+++ b/testpar/API/H5_api_test_parallel.h
@@ -0,0 +1,188 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef H5_API_TEST_PARALLEL_H
+#define H5_API_TEST_PARALLEL_H
+
+#include <mpi.h>
+
+#include "testpar.h"
+
+#include "H5_api_test.h"
+
+/* Define H5VL_VERSION if not already defined */
+#ifndef H5VL_VERSION
+#define H5VL_VERSION 0
+#endif
+
+/* Define macro to wait forever depending on version */
+#if H5VL_VERSION >= 2
+#define H5_API_TEST_WAIT_FOREVER H5ES_WAIT_FOREVER
+#else
+#define H5_API_TEST_WAIT_FOREVER UINT64_MAX
+#endif
+
+#define PARALLEL_TEST_FILE_NAME "H5_api_test_parallel.h5"
+extern char H5_api_test_parallel_filename[];
+
+#undef TESTING
+#undef TESTING_2
+#undef PASSED
+#undef H5_FAILED
+#undef H5_WARNING
+#undef SKIPPED
+#undef PUTS_ERROR
+#undef TEST_ERROR
+#undef STACK_ERROR
+#undef FAIL_STACK_ERROR
+#undef FAIL_PUTS_ERROR
+#undef TESTING_MULTIPART
+
+#define TESTING(WHAT) \
+ { \
+ if (MAINPROCESS) { \
+ printf("Testing %-62s", WHAT); \
+ fflush(stdout); \
+ } \
+ n_tests_run_g++; \
+ }
+#define TESTING_2(WHAT) \
+ { \
+ if (MAINPROCESS) { \
+ printf(" Testing %-60s", WHAT); \
+ fflush(stdout); \
+ } \
+ n_tests_run_g++; \
+ }
+#define PASSED() \
+ { \
+ if (MAINPROCESS) { \
+ puts(" PASSED"); \
+ fflush(stdout); \
+ } \
+ n_tests_passed_g++; \
+ }
+#define H5_FAILED() \
+ { \
+ if (MAINPROCESS) { \
+ puts("*FAILED*"); \
+ fflush(stdout); \
+ } \
+ n_tests_failed_g++; \
+ }
+#define H5_WARNING() \
+ { \
+ if (MAINPROCESS) { \
+ puts("*WARNING*"); \
+ fflush(stdout); \
+ } \
+ }
+#define SKIPPED() \
+ { \
+ if (MAINPROCESS) { \
+ puts(" -SKIP-"); \
+ fflush(stdout); \
+ } \
+ n_tests_skipped_g++; \
+ }
+#define PUTS_ERROR(s) \
+ { \
+ if (MAINPROCESS) { \
+ puts(s); \
+ AT(); \
+ } \
+ goto error; \
+ }
+#define TEST_ERROR \
+ { \
+ H5_FAILED(); \
+ if (MAINPROCESS) { \
+ AT(); \
+ } \
+ goto error; \
+ }
+#define STACK_ERROR \
+ { \
+ if (MAINPROCESS) { \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ goto error; \
+ }
+#define FAIL_STACK_ERROR \
+ { \
+ H5_FAILED(); \
+ if (MAINPROCESS) { \
+ AT(); \
+ H5Eprint2(H5E_DEFAULT, stdout); \
+ } \
+ goto error; \
+ }
+#define FAIL_PUTS_ERROR(s) \
+ { \
+ H5_FAILED(); \
+ if (MAINPROCESS) { \
+ AT(); \
+ puts(s); \
+ } \
+ goto error; \
+ }
+#define TESTING_MULTIPART(WHAT) \
+ { \
+ if (MAINPROCESS) { \
+ printf("Testing %-62s", WHAT); \
+ HDputs(""); \
+ fflush(stdout); \
+ } \
+ }
+
+/*
+ * Macros to surround an action that will be performed non-collectively. Once the
+ * operation has completed, a consensus will be formed by all ranks on whether the
+ * operation failed.
+ */
+#define BEGIN_INDEPENDENT_OP(op_name) \
+ { \
+ hbool_t ind_op_failed = FALSE; \
+ \
+ {
+
+#define END_INDEPENDENT_OP(op_name) \
+ } \
+ \
+ op_##op_name##_end : if (MPI_SUCCESS != MPI_Allreduce(MPI_IN_PLACE, &ind_op_failed, 1, MPI_C_BOOL, \
+ MPI_LOR, MPI_COMM_WORLD)) \
+ { \
+ if (MAINPROCESS) \
+ HDprintf( \
+ " failed to collect consensus about whether non-collective operation was successful\n"); \
+ goto error; \
+ } \
+ \
+ if (ind_op_failed) { \
+ if (MAINPROCESS) \
+ HDprintf(" failure detected during non-collective operation - all other ranks will now fail " \
+ "too\n"); \
+ goto error; \
+ } \
+ }
+
+#define INDEPENDENT_OP_ERROR(op_name) \
+ ind_op_failed = TRUE; \
+ goto op_##op_name##_end;
+
+hid_t create_mpi_fapl(MPI_Comm comm, MPI_Info info, hbool_t coll_md_read);
+int generate_random_parallel_dimensions(int space_rank, hsize_t **dims_out);
+
+extern int mpi_size;
+extern int mpi_rank;
+
+#endif
diff --git a/testpar/API/t_bigio.c b/testpar/API/t_bigio.c
new file mode 100644
index 0000000..3e18c8f
--- /dev/null
+++ b/testpar/API/t_bigio.c
@@ -0,0 +1,1942 @@
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#if 0
+#include "H5Dprivate.h" /* For Chunk tests */
+#endif
+
+/* FILENAME and filenames must have the same number of names */
+const char *FILENAME[3] = {"bigio_test.h5", "single_rank_independent_io.h5", NULL};
+
+/* Constants definitions */
+#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
+
+/* Define some handy debugging shorthands, routines, ... */
+/* debugging tools */
+
+#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */
+
+/* Constants definitions */
+#define RANK 2
+
+#define IN_ORDER 1
+#define OUT_OF_ORDER 2
+
+#define DATASET1 "DSET1"
+#define DATASET2 "DSET2"
+#define DATASET3 "DSET3"
+#define DATASET4 "DSET4"
+#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
+#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
+#define DXFER_BIGCOUNT (1 << 29)
+
+#define HYPER 1
+#define POINT 2
+#define ALL 3
+
+/* Dataset data type. Int's can be easily octo dumped. */
+typedef hsize_t B_DATATYPE;
+
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+size_t bigcount = (size_t) /* DXFER_BIGCOUNT */ 1310720;
+int nerrors = 0;
+static int mpi_size_g, mpi_rank_g;
+
+hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096
+hsize_t space_dim2 = SPACE_DIM2;
+
+static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option,
+ int file_selection, int mem_selection, int mode);
+
+/*
+ * Setup the coordinates for point selection.
+ */
+static void
+set_coords(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
+{
+ hsize_t i, j, k = 0, m, n, s1, s2;
+
+ if (OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if (IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if (IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2 and data is stored contiguous.
+ */
+static void
+fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
+{
+ B_DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ *dataptr = (B_DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
+ dataptr++;
+ }
+ }
+}
+
+/*
+ * Setup the coordinates for point selection.
+ */
+void
+point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
+{
+ hsize_t i, j, k = 0, m, n, s1, s2;
+
+ HDcompile_assert(RANK == 2);
+
+ if (OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if (IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if (IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "count[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block[]=(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "total datapoints=%" PRIuHSIZE "\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1],
+ block[0] * block[1] * count[0] * count[1]);
+ k = 0;
+ for (i = 0; i < num_points; i++) {
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ k += 2;
+ }
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static void
+dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE *dataset)
+{
+ B_DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3" PRIuHSIZE " ", start[1] + j);
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2" PRIuHSIZE ": ", i + start[0]);
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%" PRIuHSIZE " ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static int
+verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset,
+ B_DATATYPE *original)
+{
+ hsize_t i, j;
+ int vrfyerrs;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ HDprintf("verify_data dumping:::\n");
+ HDprintf("start(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "count(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "stride(%" PRIuHSIZE ", %" PRIuHSIZE "), "
+ "block(%" PRIuHSIZE ", %" PRIuHSIZE ")\n",
+ start[0], start[1], count[0], count[1], stride[0], stride[1], block[0], block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataset != *original) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%" PRIuHSIZE "][%" PRIuHSIZE "]"
+ "(row %" PRIuHSIZE ", col %" PRIuHSIZE "): "
+ "expect %" PRIuHSIZE ", got %" PRIuHSIZE "\n",
+ i, j, i + start[0], j + start[1], *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in verify_data\n", vrfyerrs);
+ return (vrfyerrs);
+}
+
+/* Set up the selection */
+static void
+ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
+{
+
+ switch (mode) {
+
+ case BYROW_CONT:
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = space_dim1;
+ count[1] = space_dim2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_DISCONT:
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = space_dim1 / (stride[0] * block[0]);
+ count[1] = (space_dim2) / (stride[1] * block[1]);
+ start[0] = space_dim1 * (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTNONE:
+ /* Each process takes a slabs of rows, there are
+ no selections for the last process. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : space_dim1);
+ count[1] = space_dim2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTUNBALANCE:
+ /* The first one-third of the number of processes only
+ select top half of the domain, The rest will select the bottom
+ half of the domain. */
+
+ block[0] = 1;
+ count[0] = 2;
+ stride[0] = (hsize_t)(space_dim1 * (hsize_t)mpi_size / 4 + 1);
+ block[1] = space_dim2;
+ count[1] = 1;
+ start[1] = 0;
+ stride[1] = 1;
+ if ((mpi_rank * 3) < (mpi_size * 2))
+ start[0] = (hsize_t)mpi_rank;
+ else
+ start[0] = 1 + space_dim1 * (hsize_t)mpi_size / 2 + (hsize_t)(mpi_rank - 2 * mpi_size / 3);
+ break;
+
+ case BYROW_SELECTINCHUNK:
+ /* Each process will only select one chunk */
+
+ block[0] = 1;
+ count[0] = 1;
+ start[0] = (hsize_t)mpi_rank * space_dim1;
+ stride[0] = 1;
+ block[1] = space_dim2;
+ count[1] = 1;
+ stride[1] = 1;
+ start[1] = 0;
+
+ break;
+
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = space_dim1 * (hsize_t)mpi_size;
+ block[1] = space_dim2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
+ }
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2.
+ */
+static void
+ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset,
+ int mem_selection)
+{
+ DATATYPE *dataptr = dataset;
+ DATATYPE *tmptr;
+ hsize_t i, j, k1, k2, k = 0;
+ /* put some trivial data in the data_array */
+ tmptr = dataptr;
+
+ /* assign the disjoint block (two-dimensional)data array value
+ through the pointer */
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
+
+ if (ALL != mem_selection) {
+ dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = tmptr + k;
+ k++;
+ }
+
+ *dataptr = (DATATYPE)(k1 + k2 + i + j);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Print the first block of the content of the dataset.
+ */
+static void
+ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
+
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("Print only the first block of the dataset\n");
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static int
+ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original, int mem_selection)
+{
+ hsize_t i, j, k1, k2, k = 0;
+ int vrfyerrs;
+ DATATYPE *dataptr, *oriptr;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ ccdataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ ccdataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
+ if (ALL != mem_selection) {
+ dataptr = dataset + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
+ oriptr = original + ((start[0] + k1 * stride[0] + i) * space_dim2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = dataset + k;
+ oriptr = original + k;
+ k++;
+ }
+ if (*dataptr != *oriptr) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
+}
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+static void
+dataset_big_write(void)
+{
+
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t *coords = NULL;
+ herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ size_t num_points;
+ B_DATATYPE *wdata;
+
+ /* allocate memory for data buffer */
+ wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
+ VRFY_G((wdata != NULL), "wdata malloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* create the file collectively */
+ fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY_G((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY_G((ret >= 0), "");
+
+ /* Each process takes a slabs of rows. */
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset1 write by ROW\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)mpi_size_g;
+
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0] / (hsize_t)mpi_size_g;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank_g * block[0];
+ start[1] = 0;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ /* Each process takes a slabs of cols. */
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset2 write by COL\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)mpi_size_g;
+
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0];
+ block[1] = dims[1] / (hsize_t)mpi_size_g;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank_g * block[1];
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ /* ALL selection */
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = 1;
+
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ if (mpi_rank_g == 0) {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_all succeeded");
+ }
+ else {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+ if (mpi_rank_g != 0) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, dims, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ /* Point selection */
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset4 write point selection\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)(mpi_size_g * 4);
+
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0] / 2;
+ block[1] = 2;
+ stride[0] = dims[0] / 2;
+ stride[1] = 2;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
+
+ num_points = bigcount;
+
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
+
+ set_coords(start, count, stride, block, num_points, coords, IN_ORDER);
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
+
+ if (coords)
+ free(coords);
+
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* create a memory dataspace */
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ mem_dataspace = H5Screate_simple(1, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, wdata);
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ HDfree(wdata);
+ H5Fclose(fid);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+static void
+dataset_big_read(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset;
+ B_DATATYPE *rdata = NULL; /* data buffer */
+ B_DATATYPE *wdata = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ size_t num_points;
+ hsize_t *coords = NULL;
+ herr_t ret; /* Generic return value */
+
+ /* allocate memory for data buffer */
+ rdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
+ VRFY_G((rdata != NULL), "rdata malloc succeeded");
+ wdata = (B_DATATYPE *)HDmalloc(bigcount * sizeof(B_DATATYPE));
+ VRFY_G((wdata != NULL), "wdata malloc succeeded");
+
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
+
+ /* setup file access template */
+ acc_tpl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* open the file collectively */
+ fid = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_tpl);
+ VRFY_G((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY_G((ret >= 0), "");
+
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset1 by COL\n");
+
+ dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)mpi_size_g;
+ /* Each process takes a slabs of cols. */
+ block[0] = dims[0];
+ block[1] = dims[1] / (hsize_t)mpi_size_g;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank_g * block[1];
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
+ VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset2 by ROW\n");
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
+ dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)mpi_size_g;
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0] / (hsize_t)mpi_size_g;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank_g * block[0];
+ start[1] = 0;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
+ VRFY_G((ret >= 0), "H5Dread dataset2 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
+ HDmemset(rdata, 0, bigcount * sizeof(B_DATATYPE));
+ dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = 1;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ if (mpi_rank_g == 0) {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_all succeeded");
+ }
+ else {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+ if (mpi_rank_g != 0) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* fill dataset with test data */
+ fill_datasets(start, dims, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
+ VRFY_G((ret >= 0), "H5Dread dataset3 succeeded");
+
+ if (mpi_rank_g == 0) {
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset4 with Point selection\n");
+ dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = (hsize_t)(mpi_size_g * 4);
+
+ block[0] = dims[0] / 2;
+ block[1] = 2;
+ stride[0] = dims[0] / 2;
+ stride[1] = 2;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = dims[1] / (hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
+
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ num_points = bigcount;
+
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
+
+ set_coords(start, count, stride, block, num_points, coords, IN_ORDER);
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
+
+ if (coords)
+ HDfree(coords);
+
+ /* create a memory dataspace */
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ mem_dataspace = H5Screate_simple(1, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace, xfer_plist, rdata);
+ VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
+
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if (ret) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+
+ HDfree(wdata);
+ HDfree(rdata);
+
+ wdata = NULL;
+ rdata = NULL;
+ /* We never wrote Dataset5 in the write section, so we can't
+ * expect to read it...
+ */
+ file_dataspace = -1;
+ mem_dataspace = -1;
+ xfer_plist = -1;
+ dataset = -1;
+
+ /* release all temporary handles. */
+ if (file_dataspace != -1)
+ H5Sclose(file_dataspace);
+ if (mem_dataspace != -1)
+ H5Sclose(mem_dataspace);
+ if (xfer_plist != -1)
+ H5Pclose(xfer_plist);
+ if (dataset != -1) {
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+ }
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (rdata)
+ HDfree(rdata);
+ if (wdata)
+ HDfree(wdata);
+
+} /* dataset_large_readAll */
+
+static void
+single_rank_independent_io(void)
+{
+ if (mpi_rank_g == 0)
+ HDprintf("single_rank_independent_io\n");
+
+ if (MAIN_PROCESS) {
+ hsize_t dims[1];
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dset_id = -1;
+ hid_t fspace_id = -1;
+ herr_t ret;
+ int *data = NULL;
+ uint64_t i;
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS");
+
+ H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL);
+ file_id = H5Fcreate(FILENAME[1], H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY_G((file_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Calculate the number of elements needed to exceed
+ * MPI's INT_MAX limitation
+ */
+ dims[0] = (INT_MAX / sizeof(int)) + 10;
+
+ fspace_id = H5Screate_simple(1, dims, NULL);
+ VRFY_G((fspace_id >= 0), "H5Screate_simple fspace_id succeeded");
+
+ /*
+ * Create and write to a >2GB dataset from a single rank.
+ */
+ dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+
+ VRFY_G((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ data = malloc(dims[0] * sizeof(int));
+
+ /* Initialize data */
+ for (i = 0; i < dims[0]; i++)
+ data[i] = (int)(i % (uint64_t)DXFER_BIGCOUNT);
+
+ /* Write data */
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data);
+ VRFY_G((ret >= 0), "H5Dwrite succeeded");
+
+ /* Wipe buffer */
+ HDmemset(data, 0, dims[0] * sizeof(int));
+
+ /* Read data back */
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, H5S_BLOCK, fspace_id, H5P_DEFAULT, data);
+ VRFY_G((ret >= 0), "H5Dread succeeded");
+
+ /* Verify data */
+ for (i = 0; i < dims[0]; i++)
+ if (data[i] != (int)(i % (uint64_t)DXFER_BIGCOUNT)) {
+ HDfprintf(stderr, "verify failed\n");
+ exit(1);
+ }
+
+ free(data);
+ H5Sclose(fspace_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+
+ H5Fdelete(FILENAME[1], fapl_id);
+
+ H5Pclose(fapl_id);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*
+ * Create the appropriate File access property list
+ */
+hid_t
+create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
+{
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
+
+ /* need the rank for error checking macros */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS");
+
+ if (l_facc_type == FACC_DEFAULT)
+ return (ret_pl);
+
+ if (l_facc_type == FACC_MPIO) {
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY_G((ret >= 0), "");
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ VRFY_G((ret >= 0), "");
+ ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ VRFY_G((ret >= 0), "");
+ return (ret_pl);
+ }
+
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY_G((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return (ret_pl);
+ }
+
+ /* unknown file access types */
+ return (ret_pl);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk1
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: One big singular selection inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = space_dim1(5760)*mpi_size
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = space_dim1(5760)
+ * count1 = space_dim2(3)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk1(void)
+{
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk1\n");
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk2
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: many disjoint selections inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = space_dim1*mpi_size(5760)
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 3 for all dimensions
+ * count0 = space_dim1/stride0(5760/3)
+ * count1 = space_dim2/stride(3/3 = 1)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+void
+coll_chunk2(void)
+{
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk2\n");
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk3
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = space_dim1*mpi_size
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = space_dim1
+ * chunk_dim2 = dim2/2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = space_dim1
+ * count1 = space_dim2(3)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk3(void)
+{
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk3\n");
+
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+//-------------------------------------------------------------------------
+// Borrowed/Modified (slightly) from t_coll_chunk.c
+/*-------------------------------------------------------------------------
+ * Function: coll_chunktest
+ *
+ * Purpose: The real testing routine for regular selection of collective
+ chunking storage
+ testing both write and read,
+ If anything fails, it may be read or write. There is no
+ separation test between read and write.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection,
+ int mem_selection, int mode)
+{
+ hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t acc_plist, xfer_plist, crp_plist;
+
+ hsize_t dims[RANK], chunk_dims[RANK];
+ int *data_array1 = NULL;
+ int *data_origin1 = NULL;
+
+ hsize_t start[RANK], count[RANK], stride[RANK], block[RANK];
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ unsigned prop_value;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ herr_t status;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+
+ /* Create the data space */
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY_G((acc_plist >= 0), "");
+
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist);
+ VRFY_G((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY_G((status >= 0), "");
+
+ /* setup dimensionality object */
+ dims[0] = space_dim1 * (hsize_t)mpi_size_g;
+ dims[1] = space_dim2;
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
+
+ /* set up the coords array selection */
+ num_points = block[0] * block[1] * count[0] * count[1];
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
+ point_set(start, count, stride, block, num_points, coords, mode);
+
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ file_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY_G((file_dataspace >= 0), "file dataspace created succeeded");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded");
+ }
+ else {
+ /* Putting the warning about H5Screate_simple (above) into practice... */
+ hsize_t dsdims[1] = {num_points};
+ mem_dataspace = H5Screate_simple(1, dsdims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ crp_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY_G((crp_plist >= 0), "");
+
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
+
+ /* to decrease the testing time, maintain bigger chunk size */
+ (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2 / 2);
+ status = H5Pset_chunk(crp_plist, 2, chunk_dims);
+ VRFY_G((status >= 0), "chunk creation property list succeeded");
+
+ dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT,
+ crp_plist, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "dataset created succeeded");
+
+ status = H5Pclose(crp_plist);
+ VRFY_G((status >= 0), "");
+
+ /*put some trivial data in the data array */
+ ccdataset_fill(start, stride, count, block, data_array1, mem_selection);
+
+ MESG("data_array initialized");
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* set up the collective transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO);
+ VRFY_G((status >= 0), "collective chunk optimization succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY_G((status >= 0), "collective chunk optimization succeeded ");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY_G((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100);
+ VRFY_G((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ default:;
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
+ case API_LINK_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,
+ H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_IND:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY_G((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ default:;
+ }
+ }
+#endif
+
+ /* write data collectively */
+ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY_G((status >= 0), "dataset write succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value);
+ VRFY_G((status >= 0), "testing property list get succeeded");
+ VRFY_G((prop_value == 0),
+ "API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ break;
+
+ default:;
+ }
+ }
+#endif
+
+ status = H5Dclose(dataset);
+ VRFY_G((status >= 0), "");
+
+ status = H5Pclose(xfer_plist);
+ VRFY_G((status >= 0), "property list closed");
+
+ status = H5Sclose(file_dataspace);
+ VRFY_G((status >= 0), "");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY_G((status >= 0), "");
+
+ status = H5Fclose(file);
+ VRFY_G((status >= 0), "");
+
+ if (data_array1)
+ HDfree(data_array1);
+
+ /* Use collective read to verify the correctness of collective write. */
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY_G((acc_plist >= 0), "MPIO creation property list succeeded");
+
+ file = H5Fopen(FILENAME[0], H5F_ACC_RDONLY, acc_plist);
+ VRFY_G((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY_G((status >= 0), "");
+
+ /* open the collective dataset*/
+ dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
+ VRFY_G((dataset >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
+
+ /* obtain the file and mem dataspace*/
+ file_dataspace = H5Dget_space(dataset);
+ VRFY_G((file_dataspace >= 0), "");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Dget_space(dataset);
+ VRFY_G((mem_dataspace >= 0), "");
+ }
+ else {
+ /* Warning: H5Screate_simple requires an array of hsize_t elements
+ * even if we only pass only a single value. Attempting anything else
+ * appears to cause problems with 32 bit compilers.
+ */
+ hsize_t dsdims[1] = {num_points};
+ mem_dataspace = H5Screate_simple(1, dsdims, NULL);
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY_G((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY_G((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY_G((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* fill dataset with test data */
+ ccdataset_fill(start, stride, count, block, data_origin1, mem_selection);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY_G((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY_G((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY_G((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY_G((status >= 0), "dataset read succeeded");
+
+ /* verify the read data with original expected data */
+ status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
+ if (status)
+ nerrors++;
+
+ status = H5Pclose(xfer_plist);
+ VRFY_G((status >= 0), "property list closed");
+
+ /* close dataset collectively */
+ status = H5Dclose(dataset);
+ VRFY_G((status >= 0), "H5Dclose");
+
+ /* release all IDs created */
+ status = H5Sclose(file_dataspace);
+ VRFY_G((status >= 0), "H5Sclose");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY_G((status >= 0), "H5Sclose");
+
+ /* close the file collectively */
+ status = H5Fclose(file);
+ VRFY_G((status >= 0), "H5Fclose");
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+int
+main(int argc, char **argv)
+{
+ hid_t acc_plist = H5I_INVALID_HID;
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size_g);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank_g);
+
+ /* Attempt to turn off atexit post processing so that in case errors
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0)
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
+
+ /* set alarm. */
+ /* TestAlarmOn(); */
+
+ acc_plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+
+ /* Get the capability flag of the VOL connector being used */
+ if (H5Pget_vol_cap_flags(acc_plist, &vol_cap_flags_g) < 0) {
+ if (MAIN_PROCESS)
+ HDprintf("Failed to get the capability flag of the VOL connector being used\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
+ /* Make sure the connector supports the API functions being tested. This test only
+ * uses a few API functions, such as H5Fcreate/open/close/delete, H5Dcreate/write/read/close,
+ * and H5Dget_space. */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAIN_PROCESS)
+ HDprintf(
+ "API functions for basic file, dataset basic or more aren't supported with this connector\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
+ dataset_big_write();
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ dataset_big_read();
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ coll_chunk1();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk2();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk3();
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ single_rank_independent_io();
+
+ /* turn off alarm */
+ /* TestAlarmOff(); */
+
+ if (mpi_rank_g == 0) {
+ hid_t fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL);
+
+ H5E_BEGIN_TRY
+ {
+ H5Fdelete(FILENAME[0], fapl_id);
+ H5Fdelete(FILENAME[1], fapl_id);
+ }
+ H5E_END_TRY;
+
+ H5Pclose(fapl_id);
+ }
+
+ H5Pclose(acc_plist);
+
+ /* close HDF5 library */
+ H5close();
+
+ MPI_Finalize();
+
+ return 0;
+}
diff --git a/testpar/API/t_chunk_alloc.c b/testpar/API/t_chunk_alloc.c
new file mode 100644
index 0000000..dd78225
--- /dev/null
+++ b/testpar/API/t_chunk_alloc.c
@@ -0,0 +1,512 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This verifies if the storage space allocation methods are compatible between
+ * serial and parallel modes.
+ *
+ * Created by: Christian Chilan and Albert Cheng
+ * Date: 2006/05/25
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+static int mpi_size, mpi_rank;
+
+#define DSET_NAME "ExtendibleArray"
+#define CHUNK_SIZE 1000 /* #elements per chunk */
+#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
+#define CLOSE 1
+#define NO_CLOSE 0
+
+#if 0
+static MPI_Offset
+get_filesize(const char *filename)
+{
+ int mpierr;
+ MPI_File fd;
+ MPI_Offset filesize;
+
+ mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fd);
+ VRFY((mpierr == MPI_SUCCESS), "");
+
+ mpierr = MPI_File_get_size(fd, &filesize);
+ VRFY((mpierr == MPI_SUCCESS), "");
+
+ mpierr = MPI_File_close(&fd);
+ VRFY((mpierr == MPI_SUCCESS), "");
+
+ return (filesize);
+}
+#endif
+
+typedef enum write_pattern { none, sec_last, all } write_type;
+
+typedef enum access_ { write_all, open_only, extend_only } access_type;
+
+/*
+ * This creates a dataset serially with chunks, each of CHUNK_SIZE
+ * elements. The allocation time is set to H5D_ALLOC_TIME_EARLY. Another
+ * routine will open this in parallel for extension test.
+ */
+static void
+create_chunked_dataset(const char *filename, int chunk_factor, write_type write_pattern)
+{
+ hid_t file_id, dataset; /* handles */
+ hid_t dataspace, memspace;
+ hid_t cparms;
+ hsize_t dims[1];
+ hsize_t maxdims[1] = {H5S_UNLIMITED};
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
+ /* Variables used in reading data back */
+ char buffer[CHUNK_SIZE];
+ long nchunks;
+ herr_t hrc;
+#if 0
+ MPI_Offset filesize, /* actual file size */
+ est_filesize; /* estimated file size */
+#endif
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Only MAINPROCESS should create the file. Others just wait. */
+ if (MAINPROCESS) {
+ nchunks = chunk_factor * mpi_size;
+ dims[0] = (hsize_t)(nchunks * CHUNK_SIZE);
+ /* Create the data space with unlimited dimensions. */
+ dataspace = H5Screate_simple(1, dims, maxdims);
+ VRFY((dataspace >= 0), "");
+
+ memspace = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((memspace >= 0), "");
+
+ /* Create a new file. If file exists its contents will be overwritten. */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((file_id >= 0), "H5Fcreate");
+
+ /* Modify dataset creation properties, i.e. enable chunking */
+ cparms = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((cparms >= 0), "");
+
+ hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Pset_chunk(cparms, 1, chunk_dims);
+ VRFY((hrc >= 0), "");
+
+ /* Create a new dataset within the file using cparms creation properties. */
+ dataset =
+ H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
+ VRFY((dataset >= 0), "");
+
+ if (write_pattern == sec_last) {
+ HDmemset(buffer, 100, CHUNK_SIZE);
+
+ count[0] = 1;
+ stride[0] = 1;
+ block[0] = chunk_dims[0];
+ offset[0] = (hsize_t)(nchunks - 2) * chunk_dims[0];
+
+ hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
+ VRFY((hrc >= 0), "");
+
+ /* Write sec_last chunk */
+ hrc = H5Dwrite(dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+ } /* end if */
+
+ /* Close resources */
+ hrc = H5Dclose(dataset);
+ VRFY((hrc >= 0), "");
+ dataset = -1;
+
+ hrc = H5Sclose(dataspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Sclose(memspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Pclose(cparms);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Fclose(file_id);
+ VRFY((hrc >= 0), "");
+ file_id = -1;
+
+#if 0
+ /* verify file size */
+ filesize = get_filesize(filename);
+ est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
+ VRFY((filesize >= est_filesize), "file size check");
+#endif
+ }
+
+ /* Make sure all processes are done before exiting this routine. Otherwise,
+ * other tests may start and change the test data file before some processes
+ * of this test are still accessing the file.
+ */
+
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*
+ * This program performs three different types of parallel access. It writes on
+ * the entire dataset, it extends the dataset to nchunks*CHUNK_SIZE, and it only
+ * opens the dataset. At the end, it verifies the size of the dataset to be
+ * consistent with argument 'chunk_factor'.
+ */
+static void
+parallel_access_dataset(const char *filename, int chunk_factor, access_type action, hid_t *file_id,
+ hid_t *dataset)
+{
+ /* HDF5 gubbins */
+ hid_t memspace, dataspace; /* HDF5 file identifier */
+ hid_t access_plist; /* HDF5 ID for file access property list */
+ herr_t hrc; /* HDF5 return code */
+ hsize_t size[1];
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
+ hsize_t dims[1];
+ hsize_t maxdims[1];
+
+ /* Variables used in reading data back */
+ char buffer[CHUNK_SIZE];
+ int i;
+ long nchunks;
+#if 0
+ /* MPI Gubbins */
+ MPI_Offset filesize, /* actual file size */
+ est_filesize; /* estimated file size */
+#endif
+
+ /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ nchunks = chunk_factor * mpi_size;
+
+ /* Set up MPIO file access property lists */
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((access_plist >= 0), "");
+
+ hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((hrc >= 0), "");
+
+ /* Open the file */
+ if (*file_id < 0) {
+ *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
+ VRFY((*file_id >= 0), "");
+ }
+
+ /* Open dataset*/
+ if (*dataset < 0) {
+ *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
+ VRFY((*dataset >= 0), "");
+ }
+
+ /* Make sure all processes are done before continuing. Otherwise, one
+ * process could change the dataset extent before another finishes opening
+ * it, resulting in only some of the processes calling H5Dset_extent(). */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ memspace = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((memspace >= 0), "");
+
+ dataspace = H5Dget_space(*dataset);
+ VRFY((dataspace >= 0), "");
+
+ size[0] = (hsize_t)nchunks * CHUNK_SIZE;
+
+ switch (action) {
+
+ /* all chunks are written by all the processes in an interleaved way*/
+ case write_all:
+
+ HDmemset(buffer, mpi_rank + 1, CHUNK_SIZE);
+ count[0] = 1;
+ stride[0] = 1;
+ block[0] = chunk_dims[0];
+ for (i = 0; i < nchunks / mpi_size; i++) {
+ offset[0] = (hsize_t)(i * mpi_size + mpi_rank) * chunk_dims[0];
+
+ hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
+ VRFY((hrc >= 0), "");
+
+ /* Write the buffer out */
+ hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
+ }
+
+ break;
+
+ /* only extends the dataset */
+ case extend_only:
+ /* check if new size is larger than old size */
+ hrc = H5Sget_simple_extent_dims(dataspace, dims, maxdims);
+ VRFY((hrc >= 0), "");
+
+ /* Extend dataset*/
+ if (size[0] > dims[0]) {
+ hrc = H5Dset_extent(*dataset, size);
+ VRFY((hrc >= 0), "");
+ }
+ break;
+
+ /* only opens the *dataset */
+ case open_only:
+ break;
+ default:
+ HDassert(0);
+ }
+
+ /* Close up */
+ hrc = H5Dclose(*dataset);
+ VRFY((hrc >= 0), "");
+ *dataset = -1;
+
+ hrc = H5Sclose(dataspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Sclose(memspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Fclose(*file_id);
+ VRFY((hrc >= 0), "");
+ *file_id = -1;
+
+#if 0
+ /* verify file size */
+ filesize = get_filesize(filename);
+ est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
+ VRFY((filesize >= est_filesize), "file size check");
+#endif
+
+ /* Can close some plists */
+ hrc = H5Pclose(access_plist);
+ VRFY((hrc >= 0), "");
+
+ /* Make sure all processes are done before exiting this routine. Otherwise,
+ * other tests may start and change the test data file before some processes
+ * of this test are still accessing the file.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*
+ * This routine verifies the data written in the dataset. It does one of the
+ * three cases according to the value of parameter `write_pattern'.
+ * 1. it returns correct fill values though the dataset has not been written;
+ * 2. it still returns correct fill values though only a small part is written;
+ * 3. it returns correct values when the whole dataset has been written in an
+ * interleaved pattern.
+ */
+static void
+verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose, hid_t *file_id,
+ hid_t *dataset)
+{
+ /* HDF5 gubbins */
+ hid_t dataspace, memspace; /* HDF5 file identifier */
+ hid_t access_plist; /* HDF5 ID for file access property list */
+ herr_t hrc; /* HDF5 return code */
+
+ hsize_t chunk_dims[1] = {CHUNK_SIZE};
+ hsize_t count[1];
+ hsize_t stride[1];
+ hsize_t block[1];
+ hsize_t offset[1]; /* Selection offset within dataspace */
+ /* Variables used in reading data back */
+ char buffer[CHUNK_SIZE];
+ int value, i;
+ int index_l;
+ long nchunks;
+ /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ nchunks = chunk_factor * mpi_size;
+
+ /* Set up MPIO file access property lists */
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((access_plist >= 0), "");
+
+ hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((hrc >= 0), "");
+
+ /* Open the file */
+ if (*file_id < 0) {
+ *file_id = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
+ VRFY((*file_id >= 0), "");
+ }
+
+ /* Open dataset*/
+ if (*dataset < 0) {
+ *dataset = H5Dopen2(*file_id, DSET_NAME, H5P_DEFAULT);
+ VRFY((*dataset >= 0), "");
+ }
+
+ memspace = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((memspace >= 0), "");
+
+ dataspace = H5Dget_space(*dataset);
+ VRFY((dataspace >= 0), "");
+
+ /* all processes check all chunks. */
+ count[0] = 1;
+ stride[0] = 1;
+ block[0] = chunk_dims[0];
+ for (i = 0; i < nchunks; i++) {
+ /* reset buffer values */
+ HDmemset(buffer, -1, CHUNK_SIZE);
+
+ offset[0] = (hsize_t)i * chunk_dims[0];
+
+ hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
+ VRFY((hrc >= 0), "");
+
+ /* Read the chunk */
+ hrc = H5Dread(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
+ VRFY((hrc >= 0), "H5Dread");
+
+ /* set expected value according the write pattern */
+ switch (write_pattern) {
+ case all:
+ value = i % mpi_size + 1;
+ break;
+ case none:
+ value = 0;
+ break;
+ case sec_last:
+ if (i == nchunks - 2)
+ value = 100;
+ else
+ value = 0;
+ break;
+ default:
+ HDassert(0);
+ }
+
+ /* verify content of the chunk */
+ for (index_l = 0; index_l < CHUNK_SIZE; index_l++)
+ VRFY((buffer[index_l] == value), "data verification");
+ }
+
+ hrc = H5Sclose(dataspace);
+ VRFY((hrc >= 0), "");
+
+ hrc = H5Sclose(memspace);
+ VRFY((hrc >= 0), "");
+
+ /* Can close some plists */
+ hrc = H5Pclose(access_plist);
+ VRFY((hrc >= 0), "");
+
+ /* Close up */
+ if (vclose) {
+ hrc = H5Dclose(*dataset);
+ VRFY((hrc >= 0), "");
+ *dataset = -1;
+
+ hrc = H5Fclose(*file_id);
+ VRFY((hrc >= 0), "");
+ *file_id = -1;
+ }
+
+ /* Make sure all processes are done before exiting this routine. Otherwise,
+ * other tests may start and change the test data file before some processes
+ * of this test are still accessing the file.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*
+ * Test following possible scenarios,
+ * Case 1:
+ * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large
+ * size, no write, close, reopen in parallel, read to verify all return
+ * the fill value.
+ * Case 2:
+ * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY but small
+ * size, no write, close, reopen in parallel, extend to large size, then close,
+ * then reopen in parallel and read to verify all return the fill value.
+ * Case 3:
+ * Sequential create a file and dataset with H5D_ALLOC_TIME_EARLY and large
+ * size, write just a small part of the dataset (second to the last), close,
+ * then reopen in parallel, read to verify all return the fill value except
+ * those small portion that has been written. Without closing it, writes
+ * all parts of the dataset in a interleave pattern, close it, and reopen
+ * it, read to verify all data are as written.
+ */
+void
+test_chunk_alloc(void)
+{
+ const char *filename;
+ hid_t file_id, dataset;
+
+ file_id = dataset = -1;
+
+ /* Initialize MPI */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend Chunked allocation test on file %s\n", filename);
+
+ /* Case 1 */
+ /* Create chunked dataset without writing anything.*/
+ create_chunked_dataset(filename, CHUNK_FACTOR, none);
+ /* reopen dataset in parallel and check for file size */
+ parallel_access_dataset(filename, CHUNK_FACTOR, open_only, &file_id, &dataset);
+ /* reopen dataset in parallel, read and verify the data */
+ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
+
+ /* Case 2 */
+ /* Create chunked dataset without writing anything */
+ create_chunked_dataset(filename, 20, none);
+ /* reopen dataset in parallel and only extend it */
+ parallel_access_dataset(filename, CHUNK_FACTOR, extend_only, &file_id, &dataset);
+ /* reopen dataset in parallel, read and verify the data */
+ verify_data(filename, CHUNK_FACTOR, none, CLOSE, &file_id, &dataset);
+
+ /* Case 3 */
+ /* Create chunked dataset and write in the second to last chunk */
+ create_chunked_dataset(filename, CHUNK_FACTOR, sec_last);
+ /* Reopen dataset in parallel, read and verify the data. The file and dataset are not closed*/
+ verify_data(filename, CHUNK_FACTOR, sec_last, NO_CLOSE, &file_id, &dataset);
+ /* All processes write in all the chunks in a interleaved way */
+ parallel_access_dataset(filename, CHUNK_FACTOR, write_all, &file_id, &dataset);
+ /* reopen dataset in parallel, read and verify the data */
+ verify_data(filename, CHUNK_FACTOR, all, CLOSE, &file_id, &dataset);
+}
diff --git a/testpar/API/t_coll_chunk.c b/testpar/API/t_coll_chunk.c
new file mode 100644
index 0000000..57ee605
--- /dev/null
+++ b/testpar/API/t_coll_chunk.c
@@ -0,0 +1,1417 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#define HYPER 1
+#define POINT 2
+#define ALL 3
+
+/* some commonly used routines for collective chunk IO tests*/
+
+static void ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[],
+ hsize_t block[], int mode);
+
+static void ccdataset_fill(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ DATATYPE *dataset, int mem_selection);
+
+static void ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset);
+
+static int ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ DATATYPE *dataset, DATATYPE *original, int mem_selection);
+
+static void coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option,
+ int file_selection, int mem_selection, int mode);
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk1
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: One big singular selection inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = SPACE_DIM1(5760)*mpi_size
+ * dim2 = SPACE_DIM2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1(5760)
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk1(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk2
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: many disjoint selections inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = SPACE_DIM1*mpi_size(5760)
+ * dim2 = SPACE_DIM2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 3 for all dimensions
+ * count0 = SPACE_DIM1/stride0(5760/3)
+ * count1 = SPACE_DIM2/stride(3/3 = 1)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+void
+coll_chunk2(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk3
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2(3)
+ * chunk_dim1 = SPACE_DIM1
+ * chunk_dim2 = dim2/2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk3(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_size;
+ int mpi_rank;
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk4
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk4(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_SELECTNONE, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk4
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk5(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_HARD, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk6
+ *
+ * Purpose: Test direct request for multi-chunk-io.
+ * Wrapper to test the collective chunk IO for regular JOINT
+ * selection with at least number of 2*mpi_size chunks
+ * Test for direct to Multi Chunk I/O.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk6(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_HARD, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk7
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk7(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_TRUE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk8
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk8(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_LINK_FALSE, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk9
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk9(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTUNBALANCE, API_MULTI_COLL, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk10
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection across many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = SPACE_DIM1*mpi_size
+ * dim2 = SPACE_DIM2
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = SPACE_DIM1
+ * count1 = SPACE_DIM2(3)
+ * start0 = mpi_rank*SPACE_DIM1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk10(void)
+{
+ const char *filename = PARATESTFILE /* GetTestParameters() */;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 4, BYROW_SELECTINCHUNK, API_MULTI_IND, POINT, HYPER, IN_ORDER);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunktest
+ *
+ * Purpose: The real testing routine for regular selection of collective
+ chunking storage
+ testing both write and read,
+ If anything fails, it may be read or write. There is no
+ separation test between read and write.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Modifications:
+ * Remove invalid temporary property checkings for API_LINK_HARD and
+ * API_LINK_TRUE cases.
+ * Programmer: Jonathan Kim
+ * Date: 2012-10-10
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+coll_chunktest(const char *filename, int chunk_factor, int select_factor, int api_option, int file_selection,
+ int mem_selection, int mode)
+{
+ hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t acc_plist, xfer_plist, crp_plist;
+
+ hsize_t dims[RANK], chunk_dims[RANK];
+ int *data_array1 = NULL;
+ int *data_origin1 = NULL;
+
+ hsize_t start[RANK], count[RANK], stride[RANK], block[RANK];
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ unsigned prop_value;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ int mpi_size, mpi_rank;
+
+ herr_t status;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+
+ /* set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Create the data space */
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_plist >= 0), "");
+
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY((status >= 0), "");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)(SPACE_DIM1 * mpi_size);
+ dims[1] = SPACE_DIM2;
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+
+ /* set up the coords array selection */
+ num_points = block[0] * block[1] * count[0] * count[1];
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+ point_set(start, count, stride, block, num_points, coords, mode);
+
+ file_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((file_dataspace >= 0), "file dataspace created succeeded");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ crp_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((crp_plist >= 0), "");
+
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)chunk_factor;
+
+ /* to decrease the testing time, maintain bigger chunk size */
+ (chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2 / 2);
+ status = H5Pset_chunk(crp_plist, 2, chunk_dims);
+ VRFY((status >= 0), "chunk creation property list succeeded");
+
+ dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT, file_dataspace, H5P_DEFAULT,
+ crp_plist, H5P_DEFAULT);
+ VRFY((dataset >= 0), "dataset created succeeded");
+
+ status = H5Pclose(crp_plist);
+ VRFY((status >= 0), "");
+
+ /*put some trivial data in the data array */
+ ccdataset_fill(start, stride, count, block, data_array1, mem_selection);
+
+ MESG("data_array initialized");
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* set up the collective transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_ONE_IO);
+ VRFY((status >= 0), "collective chunk optimization succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((status >= 0), "collective chunk optimization succeeded ");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 2);
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 6);
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 50);
+ VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist, 8); /* make sure it is using multi-chunk IO */
+ VRFY((status >= 0), "collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist, 100);
+ VRFY((status >= 0), "collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ default:;
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if (facc_type == FACC_MPIO) {
+ switch (api_option) {
+ case API_LINK_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,
+ H5D_XFER_COLL_CHUNK_SIZE, &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_IND:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status =
+ H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE,
+ &prop_value, NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0), "testing property list inserted succeeded");
+ break;
+
+ default:;
+ }
+ }
+#endif
+
+ /* write data collectively */
+ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((status >= 0), "dataset write succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ /* Only check chunk optimization mode if selection I/O is not being used -
+ * selection I/O bypasses this IO mode decision - it's effectively always
+ * multi chunk currently */
+ if (facc_type == FACC_MPIO && /* !H5_use_selection_io_g */ TRUE) {
+ switch (api_option) {
+ case API_LINK_HARD:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO directly succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK COLLECTIVE IO succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set LINK IO transferring to multi-chunk IO succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0), "API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pget(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, &prop_value);
+ VRFY((status >= 0), "testing property list get succeeded");
+ VRFY((prop_value == 0),
+ "API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ break;
+
+ default:;
+ }
+ }
+#endif
+
+ status = H5Dclose(dataset);
+ VRFY((status >= 0), "");
+
+ status = H5Pclose(xfer_plist);
+ VRFY((status >= 0), "property list closed");
+
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0), "");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0), "");
+
+ status = H5Fclose(file);
+ VRFY((status >= 0), "");
+
+ if (data_array1)
+ HDfree(data_array1);
+
+ /* Use collective read to verify the correctness of collective write. */
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_origin1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_plist >= 0), "MPIO creation property list succeeded");
+
+ file = H5Fopen(filename, H5F_ACC_RDONLY, acc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY((status >= 0), "");
+
+ /* open the collective dataset*/
+ dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
+ VRFY((dataset >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+
+ /* obtain the file and mem dataspace*/
+ file_dataspace = H5Dget_space(dataset);
+ VRFY((file_dataspace >= 0), "");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Dget_space(dataset);
+ VRFY((mem_dataspace >= 0), "");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0), "hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0), "Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0), "none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* fill dataset with test data */
+ ccdataset_fill(start, stride, count, block, data_origin1, mem_selection);
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "MPIO collective transfer property succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((status >= 0), "dataset read succeeded");
+
+ /* verify the read data with original expected data */
+ status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
+ if (status)
+ nerrors++;
+
+ status = H5Pclose(xfer_plist);
+ VRFY((status >= 0), "property list closed");
+
+ /* close dataset collectively */
+ status = H5Dclose(dataset);
+ VRFY((status >= 0), "H5Dclose");
+
+ /* release all IDs created */
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0), "H5Sclose");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0), "H5Sclose");
+
+ /* close the file collectively */
+ status = H5Fclose(file);
+ VRFY((status >= 0), "H5Fclose");
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+/* Set up the selection */
+static void
+ccslab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
+{
+
+ switch (mode) {
+
+ case BYROW_CONT:
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = SPACE_DIM1;
+ count[1] = SPACE_DIM2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_DISCONT:
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = SPACE_DIM1 / (stride[0] * block[0]);
+ count[1] = (SPACE_DIM2) / (stride[1] * block[1]);
+ start[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTNONE:
+ /* Each process takes a slabs of rows, there are
+ no selections for the last process. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1, (mpi_size - 2))) ? 0 : SPACE_DIM1);
+ count[1] = SPACE_DIM2;
+ start[0] = (hsize_t)mpi_rank * count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTUNBALANCE:
+ /* The first one-third of the number of processes only
+ select top half of the domain, The rest will select the bottom
+ half of the domain. */
+
+ block[0] = 1;
+ count[0] = 2;
+ stride[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size / 4 + 1;
+ block[1] = SPACE_DIM2;
+ count[1] = 1;
+ start[1] = 0;
+ stride[1] = 1;
+ if ((mpi_rank * 3) < (mpi_size * 2))
+ start[0] = (hsize_t)mpi_rank;
+ else
+ start[0] = (hsize_t)(1 + SPACE_DIM1 * mpi_size / 2 + (mpi_rank - 2 * mpi_size / 3));
+ break;
+
+ case BYROW_SELECTINCHUNK:
+ /* Each process will only select one chunk */
+
+ block[0] = 1;
+ count[0] = 1;
+ start[0] = (hsize_t)(mpi_rank * SPACE_DIM1);
+ stride[0] = 1;
+ block[1] = SPACE_DIM2;
+ count[1] = 1;
+ stride[1] = 1;
+ start[1] = 0;
+
+ break;
+
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = (hsize_t)SPACE_DIM1 * (hsize_t)mpi_size;
+ block[1] = SPACE_DIM2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
+ }
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2.
+ */
+static void
+ccdataset_fill(hsize_t start[], hsize_t stride[], hsize_t count[], hsize_t block[], DATATYPE *dataset,
+ int mem_selection)
+{
+ DATATYPE *dataptr = dataset;
+ DATATYPE *tmptr;
+ hsize_t i, j, k1, k2, k = 0;
+ /* put some trivial data in the data_array */
+ tmptr = dataptr;
+
+ /* assign the disjoint block (two-dimensional)data array value
+ through the pointer */
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
+
+ if (ALL != mem_selection) {
+ dataptr = tmptr + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = tmptr + k;
+ k++;
+ }
+
+ *dataptr = (DATATYPE)(k1 + k2 + i + j);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Print the first block of the content of the dataset.
+ */
+static void
+ccdataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
+
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("Print only the first block of the dataset\n");
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static int
+ccdataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original, int mem_selection)
+{
+ hsize_t i, j, k1, k2, k = 0;
+ int vrfyerrs;
+ DATATYPE *dataptr, *oriptr;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ ccdataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ ccdataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for (i = 0; i < block[0]; i++) {
+ for (k2 = 0; k2 < count[1]; k2++) {
+ for (j = 0; j < block[1]; j++) {
+ if (ALL != mem_selection) {
+ dataptr = dataset + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
+ oriptr = original + ((start[0] + k1 * stride[0] + i) * SPACE_DIM2 + start[1] +
+ k2 * stride[1] + j);
+ }
+ else {
+ dataptr = dataset + k;
+ oriptr = original + k;
+ k++;
+ }
+ if (*dataptr != *oriptr) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, *(oriptr), *(dataptr));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
+}
diff --git a/testpar/API/t_coll_md_read.c b/testpar/API/t_coll_md_read.c
new file mode 100644
index 0000000..f6f99bf
--- /dev/null
+++ b/testpar/API/t_coll_md_read.c
@@ -0,0 +1,654 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * A test suite to test HDF5's collective metadata read and write capabilities,
+ * as enabled by making a call to H5Pset_all_coll_metadata_ops() and/or
+ * H5Pset_coll_metadata_write().
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * Define the non-participating process as the "last"
+ * rank to avoid any weirdness potentially caused by
+ * an if (mpi_rank == 0) check.
+ */
+#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1)
+#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset"
+#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2
+#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5
+#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5
+
+#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2
+
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM 10000
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
+
+#define COLL_GHEAP_WRITE_ATTR_NELEMS 10
+#define COLL_GHEAP_WRITE_ATTR_NAME "coll_gheap_write_attr"
+#define COLL_GHEAP_WRITE_ATTR_DIMS 1
+
+/*
+ * A test for issue HDFFV-10501. A parallel hang was reported which occurred
+ * in linked-chunk I/O when collective metadata reads are enabled and some ranks
+ * do not have any selection in a dataset's dataspace, while others do. The ranks
+ * which have no selection during the read/write operation called H5D__chunk_addrmap()
+ * to retrieve the lowest chunk address, since we require that the read/write be done
+ * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees,
+ * this caused the non-participating ranks to issue a collective MPI_Bcast() call
+ * which the other ranks did not issue, thus causing a hang.
+ *
+ * However, since these ranks are not actually reading/writing anything, this call
+ * can simply be removed and the address used for the read/write can be set to an
+ * arbitrary number (0 was chosen).
+ */
+void
+test_partial_no_selection_coll_md_read(void)
+{
+ const char *filename;
+ hsize_t *dataset_dims = NULL;
+ hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t sel_dims[1];
+ hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = {PARTIAL_NO_SELECTION_Y_DIM_SCALE,
+ PARTIAL_NO_SELECTION_X_DIM_SCALE};
+ hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ dataset_dims = HDmalloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
+ VRFY((dataset_dims != NULL), "malloc succeeded");
+
+ dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size;
+ dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size;
+ max_dataset_dims[0] = H5S_UNLIMITED;
+ max_dataset_dims[1] = H5S_UNLIMITED;
+
+ fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Set up chunking on the dataset in order to reproduce the problem.
+ */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Setup hyperslab selection to split the dataset among the ranks.
+ *
+ * The ranks will write rows across the dataset.
+ */
+ start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank;
+ start[1] = 0;
+ stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+ count[0] = 1;
+ count[1] = (hsize_t)mpi_size;
+ block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE);
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ data = HDcalloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int));
+ VRFY((data != NULL), "calloc succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ /*
+ * Enable collective access for the data transfer.
+ */
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ /*
+ * Ensure that linked-chunk I/O is performed since this is
+ * the particular code path where the issue lies and we don't
+ * want the library doing multi-chunk I/O behind our backs.
+ */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ read_buf = HDmalloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ /*
+ * Make sure to call H5Sselect_none() on the non-participating process.
+ */
+ if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
+ VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded");
+ VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded");
+ }
+
+ /*
+ * Finally have each rank read their section of data back from the dataset.
+ */
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
+ "H5Dread succeeded");
+
+ /*
+ * Check data integrity just to be sure.
+ */
+ if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
+ VRFY((!HDmemcmp(data, read_buf,
+ count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) *
+ sizeof(int))),
+ "memcmp succeeded");
+ }
+
+ if (dataset_dims) {
+ HDfree(dataset_dims);
+ dataset_dims = NULL;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for HDFFV-10562 which attempts to verify that using multi-chunk
+ * I/O with collective metadata reads enabled doesn't causes issues due to
+ * collective metadata reads being made only by process 0 in H5D__chunk_addrmap().
+ *
+ * Failure in this test may either cause a hang, or, due to how the MPI calls
+ * pertaining to this issue might mistakenly match up, may cause an MPI error
+ * message similar to:
+ *
+ * #008: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): MPI_BCast failed
+ * major: Internal error (too specific to document in detail)
+ * minor: Some MPI function failed
+ * #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack:
+ *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006)
+ *failed MPIR_Bcast_impl(1452).............: MPIR_Bcast(1476)..................:
+ *MPIR_Bcast_intra(1249)............:
+ *MPIR_SMP_Bcast(1088)..............:
+ *MPIR_Bcast_binomial(239)..........:
+ *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer
+ *size is 18 major: Internal error (too specific to document in detail) minor: MPI Error String
+ *
+ */
+void
+test_multi_chunk_io_addrmap_issue(void)
+{
+ const char *filename;
+ hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5};
+ hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5};
+ hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+ int mpi_rank;
+ int data[5][5] = {{0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}, {0, 1, 2, 3, 4}};
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ space_id = H5Screate_simple(MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, dims, max_dims);
+ VRFY((space_id >= 0), "H5Screate_simple succeeded");
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 5;
+ block[0] = block[1] = 1;
+
+ if (mpi_rank == 0)
+ start[0] = 0;
+ else
+ start[0] = 5;
+
+ VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
+ if (mpi_rank != 0)
+ VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, space_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ read_buf = HDmalloc(50 * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(space_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for HDFFV-10562 which attempts to verify that using linked-chunk
+ * I/O with collective metadata reads enabled doesn't cause issues due to
+ * collective metadata reads being made only by process 0 in H5D__sort_chunk().
+ *
+ * NOTE: Due to the way that the threshold value which pertains to this test
+ * is currently calculated within HDF5, the following two conditions must be
+ * true to trigger the issue:
+ *
+ * Condition 1: A certain threshold ratio must be met in order to have HDF5
+ * obtain all chunk addresses collectively inside H5D__sort_chunk(). This is
+ * given by the following:
+ *
+ * (sum_chunk * 100) / (dataset_nchunks * mpi_size) >= 30%
+ *
+ * where:
+ * * `sum_chunk` is the combined sum of the number of chunks selected in
+ * the dataset by all ranks (chunks selected by more than one rank count
+ * individually toward the sum for each rank selecting that chunk)
+ * * `dataset_nchunks` is the number of chunks in the dataset (selected
+ * or not)
+ * * `mpi_size` is the size of the MPI Communicator
+ *
+ * Condition 2: `sum_chunk` divided by `mpi_size` must exceed or equal a certain
+ * threshold (as of this writing, 10000).
+ *
+ * To satisfy both these conditions, we #define a macro,
+ * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM, which corresponds to the
+ * value of the H5D_ALL_CHUNK_ADDR_THRES_COL_NUM macro in H5Dmpio.c (the
+ * 10000 threshold from condition 2). We then create a dataset of that many
+ * chunks and have each MPI rank write to and read from a piece of every single
+ * chunk in the dataset. This ensures chunk utilization is the max possible
+ * and exceeds our 30% target ratio, while always exactly matching the numeric
+ * chunk threshold value of condition 2.
+ *
+ * Failure in this test may either cause a hang, or, due to how the MPI calls
+ * pertaining to this issue might mistakenly match up, may cause an MPI error
+ * message similar to:
+ *
+ * #008: H5Dmpio.c line 2338 in H5D__sort_chunk(): MPI_BCast failed
+ * major: Internal error (too specific to document in detail)
+ * minor: Some MPI function failed
+ * #009: H5Dmpio.c line 2338 in H5D__sort_chunk(): Other MPI error, error stack:
+ *PMPI_Bcast(1600)........: MPI_Bcast(buf=0x7eae610, count=320000, MPI_BYTE, root=0, comm=0x84000006) failed
+ *MPIR_Bcast_impl(1452)...:
+ *MPIR_Bcast(1476)........:
+ *MPIR_Bcast_intra(1249)..:
+ *MPIR_SMP_Bcast(1088)....:
+ *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received
+ *2096 but expected 320000 major: Internal error (too specific to document in detail) minor: MPI Error String
+ */
+void
+test_link_chunk_io_sort_chunk_issue(void)
+{
+ const char *filename;
+ hsize_t dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t sel_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ /*
+ * Create a one-dimensional dataset of exactly LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM
+ * chunks, where every rank writes to a piece of every single chunk to keep utilization high.
+ */
+ dataset_dims[0] = (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
+
+ fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, NULL);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Set up chunking on the dataset in order to reproduce the problem.
+ */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ /* Chunk size is equal to MPI size since each rank writes to a piece of every chunk */
+ chunk_dims[0] = (hsize_t)mpi_size;
+
+ VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0),
+ "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Setup hyperslab selection to split the dataset among the ranks.
+ */
+ start[0] = (hsize_t)mpi_rank;
+ stride[0] = (hsize_t)mpi_size;
+ count[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_COLL_THRESH_NUM;
+ block[0] = 1;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[0];
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ data = HDcalloc(1, count[0] * sizeof(int));
+ VRFY((data != NULL), "calloc succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ /*
+ * Enable collective access for the data transfer.
+ */
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ /*
+ * Ensure that linked-chunk I/O is performed since this is
+ * the particular code path where the issue lies and we don't
+ * want the library doing multi-chunk I/O behind our backs.
+ */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0),
+ "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ read_buf = HDmalloc(count[0] * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[0];
+
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Finally have each rank read their section of data back from the dataset.
+ */
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0),
+ "H5Dread succeeded");
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for GitHub issue #2433 which causes a collective metadata write
+ * of global heap data. This test is meant to ensure that global heap data
+ * gets correctly mapped as raw data during a collective metadata write
+ * using vector I/O.
+ *
+ * An assertion exists in the library that should be triggered if global
+ * heap data is not correctly mapped as raw data.
+ */
+void
+test_collective_global_heap_write(void)
+{
+ const char *filename;
+ hsize_t attr_dims[COLL_GHEAP_WRITE_ATTR_DIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t attr_id = H5I_INVALID_HID;
+ hid_t vl_type = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hvl_t vl_data;
+ int mpi_rank, mpi_size;
+ int data_buf[COLL_GHEAP_WRITE_ATTR_NELEMS];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset or file flush aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata
+ * writes on the FAPL, we call it here just to be sure this is futureproof,
+ * since demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_coll_metadata_write(fapl_id, true) >= 0), "Set collective metadata writes succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ attr_dims[0] = 1;
+
+ fspace_id = H5Screate_simple(COLL_GHEAP_WRITE_ATTR_DIMS, attr_dims, NULL);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ vl_type = H5Tvlen_create(H5T_NATIVE_INT);
+ VRFY((vl_type >= 0), "H5Tvlen_create succeeded");
+
+ vl_data.len = COLL_GHEAP_WRITE_ATTR_NELEMS;
+ vl_data.p = data_buf;
+
+ /*
+ * Create a variable-length attribute that will get written to the global heap
+ */
+ attr_id = H5Acreate2(file_id, COLL_GHEAP_WRITE_ATTR_NAME, vl_type, fspace_id, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((attr_id >= 0), "H5Acreate2 succeeded");
+
+ for (size_t i = 0; i < COLL_GHEAP_WRITE_ATTR_NELEMS; i++)
+ data_buf[i] = (int)i;
+
+ VRFY((H5Awrite(attr_id, vl_type, &vl_data) >= 0), "H5Awrite succeeded");
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Tclose(vl_type) >= 0), "H5Sclose succeeded");
+ VRFY((H5Aclose(attr_id) >= 0), "H5Aclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
diff --git a/testpar/API/t_dset.c b/testpar/API/t_dset.c
new file mode 100644
index 0000000..d005243
--- /dev/null
+++ b/testpar/API/t_dset.c
@@ -0,0 +1,4335 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for datasets
+ */
+
+/*
+ * Example of using the parallel HDF5 library to access datasets.
+ *
+ * This program contains three major parts. Part 1 tests fixed dimension
+ * datasets, for both independent and collective transfer modes.
+ * Part 2 tests extendible datasets, for independent transfer mode
+ * only.
+ * Part 3 tests extendible datasets, for collective transfer mode
+ * only.
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+/*
+ * The following are various utility routines used by the tests.
+ */
+
+/*
+ * Setup the dimensions of the hyperslab.
+ * Two modes--by rows or by columns.
+ * Assume dimension rank is 2.
+ * BYROW divide into slabs of rows
+ * BYCOL divide into blocks of columns
+ * ZROW same as BYROW except process 0 gets 0 rows
+ * ZCOL same as BYCOL except process 0 gets 0 columns
+ */
+static void
+slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
+ int mode)
+{
+ switch (mode) {
+ case BYROW:
+ /* Each process takes a slabs of rows. */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYROW\n");
+ break;
+ case BYCOL:
+ /* Each process takes a block of columns. */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYCOL\n");
+ break;
+ case ZROW:
+ /* Similar to BYROW except process 0 gets 0 row */
+ block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = (hsize_t)dim1;
+ stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZROW\n");
+ break;
+ case ZCOL:
+ /* Similar to BYCOL except process 0 gets 0 column */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
+ stride[0] = block[0];
+ stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZCOL\n");
+ break;
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ HDprintf("unknown slab_set mode (%d)\n", mode);
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set wholeset\n");
+ break;
+ }
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ }
+}
+
+/*
+ * Setup the coordinates for point selection.
+ */
+void
+point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order)
+{
+ hsize_t i, j, k = 0, m, n, s1, s2;
+
+ HDcompile_assert(RANK == 2);
+
+ if (OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if (IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for (i = 0; i < count[0]; i++)
+ for (j = 0; j < count[1]; j++)
+ for (m = 0; m < block[0]; m++)
+ for (n = 0; n < block[1]; n++)
+ if (OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if (IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+
+ if (VERBOSE_MED) {
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total "
+ "datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ k = 0;
+ for (i = 0; i < num_points; i++) {
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ k += 2;
+ }
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2 and data is stored contiguous.
+ */
+static void
+dataset_fill(hsize_t start[], hsize_t block[], DATATYPE *dataset)
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ *dataptr = (DATATYPE)((i + start[0]) * 100 + (j + start[1] + 1));
+ dataptr++;
+ }
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static void
+dataset_print(hsize_t start[], hsize_t block[], DATATYPE *dataset)
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("%-8s", "Cols:");
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%3lu ", (unsigned long)(start[1] + j));
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i = 0; i < block[0]; i++) {
+ HDprintf("Row %2lu: ", (unsigned long)(i + start[0]));
+ for (j = 0; j < block[1]; j++) {
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+int
+dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original)
+{
+ hsize_t i, j;
+ int vrfyerrs;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0],
+ (unsigned long)count[1], (unsigned long)stride[0], (unsigned long)stride[1],
+ (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+ for (i = 0; i < block[0]; i++) {
+ for (j = 0; j < block[1]; j++) {
+ if (*dataset != *original) {
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED) {
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)(i + start[0]),
+ (unsigned long)(j + start[1]), *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (vrfyerrs)
+ HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
+ return (vrfyerrs);
+}
+
+/*
+ * Part 1.a--Independent read/write for fixed dimension datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 files with parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+dataset_writeInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* ---------------------------------------------
+ * Define the dimensions of the overall datasets
+ * and the slabs local to the MPI process.
+ * ------------------------------------------- */
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * To test the independent orders of writes between processes, all
+ * even number processes write to dataset1 first, then dataset2.
+ * All odd number processes write to dataset2 first, then dataset1.
+ */
+
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+
+ /* setup dimensions again to write with zero rows for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("writeInd by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeInd by some with zero row");
+ if ((mpi_rank / 2) * 2 != mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+ }
+#ifdef BARRIER_CHECKS
+ MPI_Barrier(MPI_COMM_WORLD);
+#endif /* BARRIER_CHECKS */
+
+ /* release dataspace ID */
+ H5Sclose(file_dataspace);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* release all IDs created */
+ H5Sclose(sid);
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+}
+
+/* Example of using the parallel HDF5 library to read a dataset */
+void
+dataset_readInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+ /* release all IDs created */
+ H5Sclose(file_dataspace);
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+/*
+ * Part 1.b--Collective read/write for fixed dimension datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+dataset_writeAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
+ hid_t dataset5, dataset6, dataset7; /* Dataset ID */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Collective write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* set up the coords array selection */
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------
+ * Define the dimensions of the overall datasets
+ * and create the dataset
+ * ------------------------- */
+ /* setup 2-D dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another dataset collectively */
+ datatype = H5Tcopy(H5T_NATIVE_INT);
+ ret = H5Tset_order(datatype, H5T_ORDER_LE);
+ VRFY((ret >= 0), "H5Tset_order succeeded");
+
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 2 succeeded");
+
+ /* create a third dataset collectively */
+ dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset3 >= 0), "H5Dcreate2 succeeded");
+
+ dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset5 >= 0), "H5Dcreate2 succeeded");
+ dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset6 >= 0), "H5Dcreate2 succeeded");
+ dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset7 >= 0), "H5Dcreate2 succeeded");
+
+ /* release 2-D space ID created */
+ H5Sclose(sid);
+
+ /* setup scalar dimensionality object */
+ sid = H5Screate(H5S_SCALAR);
+ VRFY((sid >= 0), "H5Screate succeeded");
+
+ /* create a fourth dataset collectively */
+ dataset4 = H5Dcreate2(fid, DATASETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset4 >= 0), "H5Dcreate2 succeeded");
+
+ /* release scalar space ID created */
+ H5Sclose(sid);
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of rows. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll by Row");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* setup dimensions again to writeAll with zero rows for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeAll by some with zero row");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of columns. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+
+ /* setup dimensions again to writeAll with zero columns for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("writeAll by some with zero col\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeAll by some with zero col");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset3 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset3);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
+ } /* end else */
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
+ } /* end if */
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ } /* end if */
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll with none");
+ ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
+
+ /* write data collectively (with datatype conversion) */
+ MESG("writeAll with none");
+ ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset4 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset4: each process writes no data, except process zero uses "all" selection. */
+ /* Additionally, these are in a scalar dataspace */
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset4);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ } /* end else */
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate(H5S_SCALAR);
+ VRFY((mem_dataspace >= 0), "");
+ if (MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_all(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ } /* end else */
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ } /* end if */
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll with scalar dataspace");
+ ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
+
+ /* write data collectively (with datatype conversion) */
+ MESG("writeAll with scalar dataspace");
+ ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
+ stride[0] = 1;
+ stride[1] = (hsize_t)dim1;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* Dataset5: point selection in File - Hyperslab selection in Memory*/
+ /* create a file dataspace independently */
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ mem_dataspace = H5Dget_space(dataset5);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset5 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset6: point selection in File - Point selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset6 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset7: point selection in File - All selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple(1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+
+ ret = H5Sselect_all(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset7 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /*
+ * All writes completed. Close datasets collectively
+ */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+ ret = H5Dclose(dataset3);
+ VRFY((ret >= 0), "H5Dclose3 succeeded");
+ ret = H5Dclose(dataset4);
+ VRFY((ret >= 0), "H5Dclose4 succeeded");
+ ret = H5Dclose(dataset5);
+ VRFY((ret >= 0), "H5Dclose5 succeeded");
+ ret = H5Dclose(dataset6);
+ VRFY((ret >= 0), "H5Dclose6 succeeded");
+ ret = H5Dclose(dataset7);
+ VRFY((ret >= 0), "H5Dclose7 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+dataset_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ int i, j, k;
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Collective read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* set up the coords array selection */
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------
+ * Open the datasets in it
+ * ------------------------- */
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded");
+
+ /* open another dataset collectively */
+ dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT);
+ VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded");
+ dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT);
+ VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded");
+ dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT);
+ VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded");
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of columns. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* setup dimensions again to readAll with zero columns for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero col\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("readAll by some with zero col");
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of rows. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* setup dimensions again to readAll with zero rows for process 0 */
+ if (VERBOSE_MED)
+ HDprintf("readAll by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if (MAINPROCESS) {
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("readAll by some with zero row");
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if (data_array1)
+ free(data_array1);
+ if (data_origin1)
+ free(data_origin1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
+ stride[0] = 1;
+ stride[1] = (hsize_t)dim1;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* Dataset5: point selection in memory - Hyperslab selection in file*/
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset5);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset5 succeeded");
+
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* Dataset6: point selection in File - Point selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space(dataset6);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set(start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space(dataset6);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset6 succeeded");
+
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if (data_array1)
+ free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* Dataset7: point selection in memory - All selection in file*/
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all succeeded");
+
+ num_points = (size_t)(dim0 * dim1);
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ for (j = 0; j < dim1; j++) {
+ coords[k++] = (hsize_t)i;
+ coords[k++] = (hsize_t)j;
+ }
+ }
+ mem_dataspace = H5Dget_space(dataset7);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset7 succeeded");
+
+ start[0] = (hsize_t)(dim0 / mpi_size * mpi_rank);
+ start[1] = 0;
+ ret = dataset_vrfy(start, count, stride, block, data_array1 + (dim0 / mpi_size * dim1 * mpi_rank),
+ data_origin1);
+ if (ret)
+ nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /*
+ * All reads completed. Close datasets collectively
+ */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+ ret = H5Dclose(dataset5);
+ VRFY((ret >= 0), "H5Dclose5 succeeded");
+ ret = H5Dclose(dataset6);
+ VRFY((ret >= 0), "H5Dclose6 succeeded");
+ ret = H5Dclose(dataset7);
+ VRFY((ret >= 0), "H5Dclose7 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (coords)
+ HDfree(coords);
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+/*
+ * Part 2--Independent read/write for extendible datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two extendible
+ * datasets in one HDF5 file with independent parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+extend_writeInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ /* start out with no rows, extend it later. */
+ dims[0] = dims[1] = 0;
+ sid = H5Screate_simple(RANK, dims, max_dims);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+ /* -------------------------
+ * Test writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Extend its current dim sizes before writing */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset1, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+
+ /* -------------------------
+ * Test writing to dataset2
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Try write to dataset2 beyond its current dim sizes. Should fail. */
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently. Should fail. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ }
+ H5E_END_TRY
+ VRFY((ret < 0), "H5Dwrite failed as expected");
+
+ H5Sclose(file_dataspace);
+
+ /* Extend dataset2 and try again. Should succeed. */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset2, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to create an extendable dataset
+ * and perform I/O on it in a way that verifies that the chunk cache is
+ * bypassed for parallel I/O.
+ */
+
+void
+extend_writeInd2(void)
+{
+ const char *filename;
+ hid_t fid; /* HDF5 file ID */
+ hid_t fapl; /* File access templates */
+ hid_t fs; /* File dataspace ID */
+ hid_t ms; /* Memory dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ hsize_t orig_size = 10; /* Original dataset dim size */
+ hsize_t new_size = 20; /* Extended dataset dim size */
+ hsize_t one = 1;
+ hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
+ hsize_t chunk_size = 16384; /* chunk size */
+ hid_t dcpl; /* dataset create prop. list */
+ int written[10], /* Data to write */
+ retrieved[10]; /* Data read in */
+ int mpi_size, mpi_rank; /* MPI settings */
+ int i; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test #2 on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dcpl, 1, &chunk_size);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ fs = H5Screate_simple(1, &orig_size, &max_size);
+ VRFY((fs >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreat2e succeeded");
+
+ /* release resource */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* -------------------------
+ * Test writing to dataset
+ * -------------------------*/
+ /* create a memory dataspace independently */
+ ms = H5Screate_simple(1, &orig_size, &max_size);
+ VRFY((ms >= 0), "H5Screate_simple succeeded");
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < (int)orig_size; i++)
+ written[i] = i;
+ MESG("data array initialized");
+ if (VERBOSE_MED) {
+ MESG("writing at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
+ HDprintf("\n");
+ }
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* -------------------------
+ * Read initial data from dataset.
+ * -------------------------*/
+ ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
+ VRFY((ret >= 0), "H5Dread succeeded");
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
+ nerrors++;
+ }
+ if (VERBOSE_MED) {
+ MESG("read at offset zero: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
+ HDprintf("\n");
+ }
+
+ /* -------------------------
+ * Extend the dataset & retrieve new dataspace
+ * -------------------------*/
+ ret = H5Dset_extent(dataset, &new_size);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+ ret = H5Sclose(fs);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ fs = H5Dget_space(dataset);
+ VRFY((fs >= 0), "H5Dget_space succeeded");
+
+ /* -------------------------
+ * Write to the second half of the dataset
+ * -------------------------*/
+ for (i = 0; i < (int)orig_size; i++)
+ written[i] = (int)orig_size + i;
+ MESG("data array re-initialized");
+ if (VERBOSE_MED) {
+ MESG("writing at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", written[i]);
+ HDprintf("\n");
+ }
+ ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* -------------------------
+ * Read the new data
+ * -------------------------*/
+ ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
+ VRFY((ret >= 0), "H5Dread succeeded");
+ for (i = 0; i < (int)orig_size; i++)
+ if (written[i] != retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n", __LINE__, i,
+ written[i], i, retrieved[i]);
+ nerrors++;
+ }
+ if (VERBOSE_MED) {
+ MESG("read at offset 10: ");
+ for (i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i ? ", " : "", retrieved[i]);
+ HDprintf("\n");
+ }
+
+ /* Close dataset collectively */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+
+ /* Close the file collectively */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+}
+
+/* Example of using the parallel HDF5 library to read an extendible dataset */
+void
+extend_readInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+ /* Try extend dataset1 which is open RDONLY. Should fail. */
+
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
+ VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
+ dims[0]++;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dataset1, dims);
+ }
+ H5E_END_TRY
+ VRFY((ret < 0), "H5Dset_extent failed as expected");
+
+ H5Sclose(file_dataspace);
+
+ /* Read dataset1 using BYROW pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset1 read verified correct");
+ if (ret)
+ nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+
+ /* Read dataset2 using BYCOL pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset2 read verified correct");
+ if (ret)
+ nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+/*
+ * Part 3--Collective read/write for extendible datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two extendible
+ * datasets in one HDF5 file with collective parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+extend_writeAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t max_dims[RANK] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+ {
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts = 4;
+ ret = H5Pset_cache(acc_tpl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+ }
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ /* start out with no rows, extend it later. */
+ dims[0] = dims[1] = 0;
+ sid = H5Screate_simple(RANK, dims, max_dims);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+ /* -------------------------
+ * Test writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Extend its current dim sizes before writing */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset1, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* -------------------------
+ * Test writing to dataset2
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Try write to dataset2 beyond its current dim sizes. Should fail. */
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently. Should fail. */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ }
+ H5E_END_TRY
+ VRFY((ret < 0), "H5Dwrite failed as expected");
+
+ H5Sclose(file_dataspace);
+
+ /* Extend dataset2 and try again. Should succeed. */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset2, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+}
+
+/* Example of using the parallel HDF5 library to read an extendible dataset */
+void
+extend_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0 * (size_t)dim1 * sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+ /* Try extend dataset1 which is open RDONLY. Should fail. */
+
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
+ VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
+ dims[0]++;
+ H5E_BEGIN_TRY
+ {
+ ret = H5Dset_extent(dataset1, dims);
+ }
+ H5E_END_TRY
+ VRFY((ret < 0), "H5Dset_extent failed as expected");
+
+ H5Sclose(file_dataspace);
+
+ /* Read dataset1 using BYROW pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset1 read verified correct");
+ if (ret)
+ nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Read dataset2 using BYCOL pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset2 read verified correct");
+ if (ret)
+ nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_array1)
+ HDfree(data_array1);
+ if (data_array2)
+ HDfree(data_array2);
+ if (data_origin1)
+ HDfree(data_origin1);
+}
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+static const char *
+h5_rmprefix(const char *filename)
+{
+ const char *ret_ptr;
+
+ if ((ret_ptr = HDstrstr(filename, ":")) == NULL)
+ ret_ptr = filename;
+ else
+ ret_ptr++;
+
+ return (ret_ptr);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read a compressed
+ * dataset in an HDF5 file with collective parallel access support.
+ */
+void
+compress_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ int rank = 1; /* Dataspace rank */
+ hsize_t dim = (hsize_t)dim0; /* Dataspace dimensions */
+ unsigned u; /* Local index variable */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
+ DATATYPE *data_read = NULL; /* data buffer */
+ DATATYPE *data_orig = NULL; /* expected data buffer */
+ const char *filename;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Collective chunked dataset read test on file %s\n", filename);
+
+ /* Retrieve MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* Allocate data buffer */
+ data_orig = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
+ VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded");
+ data_read = (DATATYPE *)HDmalloc((size_t)dim * sizeof(DATATYPE));
+ VRFY((data_read != NULL), "data_array1 HDmalloc succeeded");
+
+ /* Initialize data buffers */
+ for (u = 0; u < dim; u++)
+ data_orig[u] = (DATATYPE)u;
+
+ /* Run test both with and without filters disabled on partial chunks */
+ for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Process zero creates the file with a compressed, chunked dataset */
+ if (mpi_rank == 0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
+
+ /* Create the file */
+ fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+
+ /* Create property list for chunking and compression */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ VRFY((ret >= 0), "H5Pset_layout succeeded");
+
+ /* Use eight chunks */
+ chunk_dim = dim / 8;
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* Set chunk options appropriately */
+ if (disable_partial_chunk_filters) {
+ ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
+ VRFY((ret >= 0), "H5Pget_chunk_opts succeeded");
+
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+
+ ret = H5Pset_chunk_opts(dcpl, chunk_opts);
+ VRFY((ret >= 0), "H5Pset_chunk_opts succeeded");
+ } /* end if */
+
+ ret = H5Pset_deflate(dcpl, 9);
+ VRFY((ret >= 0), "H5Pset_deflate succeeded");
+
+ /* Create dataspace */
+ dataspace = H5Screate_simple(rank, &dim, NULL);
+ VRFY((dataspace > 0), "H5Screate_simple succeeded");
+
+ /* Create dataset */
+ dataset =
+ H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dcreate2 succeeded");
+
+ /* Write compressed data */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Close objects */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Sclose(dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ /* Wait for file to be created */
+ MPI_Barrier(comm);
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
+ VRFY((fid > 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Open dataset with compressed chunks */
+ dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dopen2 succeeded");
+
+ /* Try reading & writing data */
+ if (dataset > 0) {
+ /* Create dataset transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Try reading the data */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* Verify data read */
+ for (u = 0; u < dim; u++)
+ if (data_orig[u] != data_read[u]) {
+ HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n", __LINE__,
+ (unsigned)u, data_orig[u], (unsigned)u, data_read[u]);
+ nerrors++;
+ }
+
+#ifdef H5_HAVE_PARALLEL_FILTERED_WRITES
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+#endif
+
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ } /* end if */
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ } /* end for */
+
+ /* release data buffers */
+ if (data_read)
+ HDfree(data_read);
+ if (data_orig)
+ HDfree(data_orig);
+}
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+/*
+ * Part 4--Non-selection for chunked dataset
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create chunked
+ * dataset in one HDF5 file with collective and independent parallel
+ * MPIO access support. The Datasets are of sizes dim0 x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within the
+ * dataset with the exception that one processor selects no element.
+ */
+
+void
+none_selection_chunk(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_origin = NULL; /* data buffer */
+ DATATYPE *data_array = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t mstart[RANK]; /* for data buffer in memory */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if (VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+ /* -------------------------
+ * Test collective writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* allocate memory for data buffer. Only allocate enough buffer for
+ * each processor's data. */
+ if (mpi_rank) {
+ data_origin = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
+ VRFY((data_origin != NULL), "data_origin HDmalloc succeeded");
+
+ data_array = (DATATYPE *)HDmalloc(block[0] * block[1] * sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+
+ /* put some trivial data in the data_array */
+ mstart[0] = mstart[1] = 0;
+ dataset_fill(mstart, block, data_origin);
+ MESG("data_array initialized");
+ if (VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(mstart, block, data_origin);
+ }
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Process 0 has no selection */
+ if (!mpi_rank) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ }
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space(dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Process 0 has no selection */
+ if (!mpi_rank) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ if (mpi_rank) {
+ ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
+ if (ret)
+ nerrors++;
+ }
+
+ /* -------------------------
+ * Test independent writing to dataset2
+ * -------------------------*/
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, xfer_plist, data_origin);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ if (mpi_rank) {
+ ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
+ if (ret)
+ nerrors++;
+ }
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if (data_origin)
+ HDfree(data_origin);
+ if (data_array)
+ HDfree(data_array);
+}
+
+/* Function: test_actual_io_mode
+ *
+ * Purpose: tests one specific case of collective I/O and checks that the
+ * actual_chunk_opt_mode property and the actual_io_mode
+ * properties in the DXPL have the correct values.
+ *
+ * Input: selection_mode: changes the way processes select data from the space, as well
+ * as some dxpl flags to get collective I/O to break in different ways.
+ *
+ * The relevant I/O function and expected response for each mode:
+ * TEST_ACTUAL_IO_MULTI_CHUNK_IND:
+ * H5D_mpi_chunk_collective_io, each process reports independent I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_COL:
+ * H5D_mpi_chunk_collective_io, each process reports collective I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
+ * H5D_mpi_chunk_collective_io, each process reports mixed I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
+ * H5D_mpi_chunk_collective_io, processes disagree. The root reports
+ * collective, the rest report independent I/O
+ *
+ * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
+ * Set directly go to multi-chunk-io without num threshold calc.
+ * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL.
+ * Set directly go to multi-chunk-io without num threshold calc.
+ *
+ * TEST_ACTUAL_IO_LINK_CHUNK:
+ * H5D_link_chunk_collective_io, processes report linked chunk I/O
+ *
+ * TEST_ACTUAL_IO_CONTIGUOUS:
+ * H5D__contig_collective_write or H5D__contig_collective_read
+ * each process reports contiguous collective I/O
+ *
+ * TEST_ACTUAL_IO_NO_COLLECTIVE:
+ * Simple independent I/O. This tests that the defaults are properly set.
+ *
+ * TEST_ACTUAL_IO_RESET:
+ * Performs collective and then independent I/O with hthe same dxpl to
+ * make sure the peroperty is correctly reset to the default on each use.
+ * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE
+ * (The most complex case that works on all builds) and then performs
+ * an independent read and write with the same dxpls.
+ *
+ * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
+ * is not needed as they are covered by DIRECT_CHUNK_MIX and
+ * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
+ * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO instead of num-threshold.
+ *
+ * Modification:
+ * - Refctore to remove multi-chunk-without-opimization test and update for
+ * testing direct to multi-chunk-io
+ * Programmer: Jonathan Kim
+ * Date: 2012-10-10
+ *
+ *
+ * Programmer: Jacob Gruber
+ * Date: 2011-04-06
+ */
+static void
+test_actual_io_mode(int selection_mode)
+{
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ H5D_mpio_actual_io_mode_t actual_io_mode_write = H5D_MPIO_NO_COLLECTIVE;
+ H5D_mpio_actual_io_mode_t actual_io_mode_read = H5D_MPIO_NO_COLLECTIVE;
+ H5D_mpio_actual_io_mode_t actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ const char *filename;
+ const char *test_name;
+ hbool_t direct_multi_chunk_io;
+ hbool_t multi_chunk_io;
+ hbool_t is_chunked;
+ hbool_t is_collective;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int *buffer;
+ int i;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl = -1;
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[RANK];
+ hsize_t chunk_dims[RANK];
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
+ char message[256];
+ herr_t ret;
+
+ /* Set up some flags to make some future if statements slightly more readable */
+ direct_multi_chunk_io = (selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+
+ /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
+ * tests independent I/O
+ */
+ multi_chunk_io =
+ (selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || selection_mode == TEST_ACTUAL_IO_RESET);
+
+ is_chunked =
+ (selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
+
+ is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
+
+ /* Set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ /* Setup the file access template */
+ fapl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist() succeeded");
+
+ /* Create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Create the basic Space */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* Create the dataset creation plist */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation plist created successfully");
+
+ /* If we are not testing contiguous datasets */
+ if (is_chunked) {
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
+ chunk_dims[1] = dims[1];
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
+ }
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
+
+ /* Create the file dataspace */
+ file_space = H5Dget_space(dataset);
+ VRFY((file_space >= 0), "H5Dget_space succeeded");
+
+ /* Choose a selection method based on the type of I/O we want to occur,
+ * and also set up some selection-dependeent test info. */
+ switch (selection_mode) {
+
+ /* Independent I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
+ case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
+ /* Since the dataset is chunked by row and each process selects a row,
+ * each process writes to a different chunk. This forces all I/O to be
+ * independent.
+ */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Multi Chunk - Independent";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ break;
+
+ /* Collective I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_COL:
+ case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
+ /* The dataset is chunked by rows, so each process takes a column which
+ * spans all chunks. Since the processes write non-overlapping regular
+ * selections to each chunk, the operation is purely collective.
+ */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ test_name = "Multi Chunk - Collective";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ if (mpi_size > 1)
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ break;
+
+ /* Mixed I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
+ /* A chunk will be assigned collective I/O only if it is selected by each
+ * process. To get mixed I/O, have the root select all chunks and each
+ * subsequent process select the first and nth chunk. The first chunk,
+ * accessed by all, will be assigned collective I/O while each other chunk
+ * will be accessed only by the root and the nth process and will be
+ * assigned independent I/O. Each process will access one chunk collectively
+ * and at least one chunk independently, reporting mixed I/O.
+ */
+
+ if (mpi_rank == 0) {
+ /* Select the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ }
+ else {
+ /* Select the first and the nth chunk in the nth column */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
+ stride[0] = (hsize_t)mpi_rank * block[0];
+ stride[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ }
+
+ test_name = "Multi Chunk - Mixed";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ break;
+
+ /* RESET tests that the properties are properly reset to defaults each time I/O is
+ * performed. To achieve this, we have RESET perform collective I/O (which would change
+ * the values from the defaults) followed by independent I/O (which should report the
+ * default values). RESET doesn't need to have a unique selection, so we reuse
+ * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
+ * on all builds. The independent section of RESET can be found at the end of this function.
+ */
+ case TEST_ACTUAL_IO_RESET:
+
+ /* Mixed I/O with optimization and internal disagreement */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
+ /* A chunk will be assigned collective I/O only if it is selected by each
+ * process. To get mixed I/O with disagreement, assign process n to the
+ * first chunk and the nth chunk. The first chunk, selected by all, is
+ * assgigned collective I/O, while each other process gets independent I/O.
+ * Since the root process with only access the first chunk, it will report
+ * collective I/O. The subsequent processes will access the first chunk
+ * collectively, and their other chunk independently, reporting mixed I/O.
+ */
+
+ if (mpi_rank == 0) {
+ /* Select the first chunk in the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ block[0] = block[0] / (hsize_t)mpi_size;
+ }
+ else {
+ /* Select the first and the nth chunk in the nth column */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
+ stride[0] = (hsize_t)mpi_rank * block[0];
+ stride[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ }
+
+ /* If the testname was not already set by the RESET case */
+ if (selection_mode == TEST_ACTUAL_IO_RESET)
+ test_name = "RESET";
+ else
+ test_name = "Multi Chunk - Mixed (Disagreement)";
+
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ if (mpi_size > 1) {
+ if (mpi_rank == 0)
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ }
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+
+ break;
+
+ /* Linked Chunk I/O */
+ case TEST_ACTUAL_IO_LINK_CHUNK:
+ /* Nothing special; link chunk I/O is forced in the dxpl settings. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Link Chunk";
+ actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ break;
+
+ /* Contiguous Dataset */
+ case TEST_ACTUAL_IO_CONTIGUOUS:
+ /* A non overlapping, regular selection in a contiguous dataset leads to
+ * collective I/O */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Contiguous";
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
+ break;
+
+ case TEST_ACTUAL_IO_NO_COLLECTIVE:
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Independent";
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ break;
+
+ default:
+ test_name = "Undefined Selection Mode";
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ break;
+ }
+
+ ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Create a memory dataspace mirroring the dataset and select the same hyperslab
+ * as in the file space.
+ */
+ mem_space = H5Screate_simple(RANK, dims, NULL);
+ VRFY((mem_space >= 0), "mem_space created");
+
+ ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Get the number of elements in the selection */
+ length = dim0 * dim1;
+
+ /* Allocate and initialize the buffer */
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for (i = 0; i < length; i++)
+ buffer[i] = i;
+
+ /* Set up the dxpl for the write */
+ dxpl_write = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ /* Set collective I/O properties in the dxpl. */
+ if (is_collective) {
+ /* Request collective I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Set the threshold number of processes per chunk to twice mpi_size.
+ * This will prevent the threshold from ever being met, thus forcing
+ * multi chunk io instead of link chunk io.
+ * This is via default.
+ */
+ if (multi_chunk_io) {
+ /* force multi-chunk-io by threshold */
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned)mpi_size * 2);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
+
+ /* set this to manipulate testing scenario about allocating processes
+ * to chunks */
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned)99);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
+ }
+
+ /* Set directly go to multi-chunk-io without threshold calc. */
+ if (direct_multi_chunk_io) {
+ /* set for multi chunk io by property*/
+ ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+ }
+
+ /* Make a copy of the dxpl to test the read operation */
+ dxpl_read = H5Pcopy(dxpl_write);
+ VRFY((dxpl_read >= 0), "H5Pcopy succeeded");
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Retrieve Actual io values */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
+
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
+
+ /* Retrieve Actual io values */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
+
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
+
+ /* Check write vs read */
+ VRFY((actual_io_mode_read == actual_io_mode_write),
+ "reading and writing are the same for actual_io_mode");
+ VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write),
+ "reading and writing are the same for actual_chunk_opt_mode");
+
+ /* Test values */
+ if (actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t)-1 &&
+ actual_io_mode_expected != (H5D_mpio_actual_io_mode_t)-1) {
+ HDsnprintf(message, sizeof(message), "Actual Chunk Opt Mode has the correct value for %s.\n",
+ test_name);
+ VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
+ HDsnprintf(message, sizeof(message), "Actual IO Mode has the correct value for %s.\n", test_name);
+ VRFY((actual_io_mode_write == actual_io_mode_expected), message);
+ }
+ else {
+ HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, actual_chunk_opt_mode_write,
+ actual_io_mode_write);
+ }
+
+ /* To test that the property is successfully reset to the default, we perform some
+ * independent I/O after the collective I/O
+ */
+ if (selection_mode == TEST_ACTUAL_IO_RESET) {
+ if (mpi_rank == 0) {
+ /* Switch to independent io */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Check Properties */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
+
+ VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
+ "actual_chunk_opt_mode has correct value for reset write (independent)");
+ VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
+ "actual_io_mode has correct value for reset write (independent)");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Check Properties */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
+ VRFY((ret >= 0), "retrieving actual io mode succeeded");
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
+ VRFY((ret >= 0), "retrieving actual chunk opt mode succeeded");
+
+ VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
+ "actual_chunk_opt_mode has correct value for reset read (independent)");
+ VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
+ "actual_io_mode has correct value for reset read (independent)");
+ }
+ }
+
+ /* Release some resources */
+ ret = H5Sclose(sid);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(dxpl_write);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(dxpl_read);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(mem_space);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(file_space);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ HDfree(buffer);
+ return;
+}
+
+/* Function: actual_io_mode_tests
+ *
+ * Purpose: Tests all possible cases of the actual_io_mode property.
+ *
+ * Programmer: Jacob Gruber
+ * Date: 2011-04-06
+ */
+void
+actual_io_mode_tests(void)
+{
+ int mpi_size = -1;
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Only run these tests if selection I/O is not being used - selection I/O
+ * bypasses this IO mode decision - it's effectively always multi chunk
+ * currently */
+ if (/* !H5_use_selection_io_g */ TRUE) {
+ test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
+
+ /*
+ * Test multi-chunk-io via proc_num threshold
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
+
+ /* The Multi Chunk Mixed test requires at least three processes. */
+ if (mpi_size > 2)
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
+ else
+ HDfprintf(stdout, "Multi Chunk Mixed test requires 3 processes minimum\n");
+
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
+
+ /*
+ * Test multi-chunk-io via setting direct property
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
+ test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_RESET);
+ }
+
+ return;
+}
+
+/*
+ * Function: test_no_collective_cause_mode
+ *
+ * Purpose:
+ * tests cases for broken collective I/O and checks that the
+ * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values.
+ *
+ * Input:
+ * selection_mode: various mode to cause broken collective I/O
+ * Note: Originally, each TEST case is supposed to be used alone.
+ * After some discussion, this is updated to take multiple TEST cases
+ * with '|'. However there is no error check for any of combined
+ * test cases, so a tester is responsible to understand and feed
+ * proper combination of TESTs if needed.
+ *
+ *
+ * TEST_COLLECTIVE:
+ * Test for regular collective I/O without cause of breaking.
+ * Just to test normal behavior.
+ *
+ * TEST_SET_INDEPENDENT:
+ * Test for Independent I/O as the cause of breaking collective I/O.
+ *
+ * TEST_DATATYPE_CONVERSION:
+ * Test for Data Type Conversion as the cause of breaking collective I/O.
+ *
+ * TEST_DATA_TRANSFORMS:
+ * Test for Data Transform feature as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
+ * Test for NULL dataspace as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT:
+ * Test for Compact layout as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
+ * Test for Externl-File storage as the cause of breaking collective I/O.
+ *
+ * Programmer: Jonathan Kim
+ * Date: Aug, 2012
+ */
+#ifdef LATER
+#define DSET_NOCOLCAUSE "nocolcause"
+#endif
+#define FILE_EXTERNAL "nocolcause_extern.data"
+static void
+test_no_collective_cause_mode(int selection_mode)
+{
+ uint32_t no_collective_cause_local_write = 0;
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_write = 0;
+ uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_global_expected = 0;
+
+ const char *filename;
+ const char *test_name;
+ hbool_t is_chunked = 1;
+ hbool_t is_independent = 0;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int *buffer;
+ int i;
+ MPI_Comm mpi_comm;
+ MPI_Info mpi_info;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[RANK];
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hsize_t chunk_dims[RANK];
+ herr_t ret;
+ /* set to global value as default */
+ int l_facc_type = facc_type;
+ char message[256];
+
+ /* Set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+ /* Create the dataset creation plist */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation plist created successfully");
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+ VRFY((ret >= 0), "set COMPACT layout succeeded");
+ is_chunked = 0;
+ }
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ ret = H5Pset_external(dcpl, FILE_EXTERNAL, (off_t)0, H5F_UNLIMITED);
+ VRFY((ret >= 0), "set EXTERNAL file layout succeeded");
+ is_chunked = 0;
+ }
+
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
+ sid = H5Screate(H5S_NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ is_chunked = 0;
+ }
+ else {
+ /* Create the basic Space */
+ /* if this is a compact dataset, create a small dataspace that does not exceed 64K */
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
+ dims[0] = ROW_FACTOR * 6;
+ dims[1] = COL_FACTOR * 6;
+ }
+ else {
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ }
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ }
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ /* Setup the file access template */
+ fapl = create_faccess_plist(mpi_comm, mpi_info, l_facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist() succeeded");
+
+ /* Create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* If we are not testing contiguous datasets */
+ if (is_chunked) {
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0] / (hsize_t)mpi_size;
+ chunk_dims[1] = dims[1];
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
+ }
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
+
+ /*
+ * Set expected causes and some tweaks based on the type of test
+ */
+ if (selection_mode & TEST_DATATYPE_CONVERSION) {
+ test_name = "Broken Collective I/O - Datatype Conversion";
+ no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION;
+ no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION;
+ /* set different sign to trigger type conversion */
+ data_type = H5T_NATIVE_UINT;
+ }
+
+ if (selection_mode & TEST_DATA_TRANSFORMS) {
+ test_name = "Broken Collective I/O - DATA Transforms";
+ no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS;
+ no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS;
+ }
+
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
+ test_name = "Broken Collective I/O - No Simple or Scalar DataSpace";
+ no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
+ no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
+ }
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT ||
+ selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset";
+ no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
+ no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
+ }
+
+ if (selection_mode & TEST_COLLECTIVE) {
+ test_name = "Broken Collective I/O - Not Broken";
+ no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
+ no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE;
+ }
+
+ if (selection_mode & TEST_SET_INDEPENDENT) {
+ test_name = "Broken Collective I/O - Independent";
+ no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
+ no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT;
+ /* switch to independent io */
+ is_independent = 1;
+ }
+
+ /* use all spaces for certain tests */
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES ||
+ selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ file_space = H5S_ALL;
+ mem_space = H5S_ALL;
+ }
+ else {
+ /* Get the file dataspace */
+ file_space = H5Dget_space(dataset);
+ VRFY((file_space >= 0), "H5Dget_space succeeded");
+
+ /* Create the memory dataspace */
+ mem_space = H5Screate_simple(RANK, dims, NULL);
+ VRFY((mem_space >= 0), "mem_space created");
+ }
+
+ /* Get the number of elements in the selection */
+ length = (int)(dims[0] * dims[1]);
+
+ /* Allocate and initialize the buffer */
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for (i = 0; i < length; i++)
+ buffer[i] = i;
+
+ /* Set up the dxpl for the write */
+ dxpl_write = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ if (is_independent) {
+ /* Set Independent I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+ else {
+ /* Set Collective I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+
+ if (selection_mode & TEST_DATA_TRANSFORMS) {
+ ret = H5Pset_data_transform(dxpl_write, "x+1");
+ VRFY((ret >= 0), "H5Pset_data_transform succeeded");
+ }
+
+ /*---------------------
+ * Test Write access
+ *---------------------*/
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Get the cause of broken collective I/O */
+ ret = H5Pget_mpio_no_collective_cause(dxpl_write, &no_collective_cause_local_write,
+ &no_collective_cause_global_write);
+ VRFY((ret >= 0), "retrieving no collective cause succeeded");
+
+ /*---------------------
+ * Test Read access
+ *---------------------*/
+
+ /* Make a copy of the dxpl to test the read operation */
+ dxpl_read = H5Pcopy(dxpl_write);
+ VRFY((dxpl_read >= 0), "H5Pcopy succeeded");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
+
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
+
+ /* Get the cause of broken collective I/O */
+ ret = H5Pget_mpio_no_collective_cause(dxpl_read, &no_collective_cause_local_read,
+ &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retrieving no collective cause succeeded");
+
+ /* Check write vs read */
+ VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
+ "reading and writing are the same for local cause of Broken Collective I/O");
+ VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
+ "reading and writing are the same for global cause of Broken Collective I/O");
+
+ /* Test values */
+ HDmemset(message, 0, sizeof(message));
+ HDsnprintf(message, sizeof(message),
+ "Local cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
+ HDmemset(message, 0, sizeof(message));
+ HDsnprintf(message, sizeof(message),
+ "Global cause of Broken Collective I/O has the correct value for %s.\n", test_name);
+ VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
+
+ /* Release some resources */
+ if (sid)
+ H5Sclose(sid);
+ if (dcpl)
+ H5Pclose(dcpl);
+ if (dxpl_write)
+ H5Pclose(dxpl_write);
+ if (dxpl_read)
+ H5Pclose(dxpl_read);
+ if (dataset)
+ H5Dclose(dataset);
+ if (mem_space)
+ H5Sclose(mem_space);
+ if (file_space)
+ H5Sclose(file_space);
+ if (fid)
+ H5Fclose(fid);
+ HDfree(buffer);
+
+ /* clean up external file */
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL)
+ H5Fdelete(FILE_EXTERNAL, fapl);
+
+ if (fapl)
+ H5Pclose(fapl);
+
+ return;
+}
+
+/* Function: no_collective_cause_tests
+ *
+ * Purpose: Tests cases for broken collective IO.
+ *
+ * Programmer: Jonathan Kim
+ * Date: Aug, 2012
+ */
+void
+no_collective_cause_tests(void)
+{
+ /*
+ * Test individual cause
+ */
+ test_no_collective_cause_mode(TEST_COLLECTIVE);
+ test_no_collective_cause_mode(TEST_SET_INDEPENDENT);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
+
+ /*
+ * Test combined causes
+ */
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode(TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode(TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION |
+ TEST_DATA_TRANSFORMS);
+
+ return;
+}
+
+/*
+ * Test consistency semantics of atomic mode
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create a dataset,
+ * where process 0 writes and the other processes read at the same
+ * time. If atomic mode is set correctly, the other processes should
+ * read the old values in the dataset or the new ones.
+ */
+
+void
+dataset_atomicity(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ int *write_buf = NULL; /* data buffer */
+ int *read_buf = NULL; /* data buffer */
+ int buf_size;
+ hid_t dataset2;
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
+ hsize_t start[RANK];
+ hsize_t stride[RANK];
+ hsize_t count[RANK];
+ hsize_t block[RANK];
+ const char *filename;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+ int i, j, k;
+ hbool_t atomicity = FALSE;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ dim0 = 64;
+ dim1 = 32;
+ filename = PARATESTFILE /* GetTestParameters() */;
+ if (facc_type != FACC_MPIO) {
+ HDprintf("Atomicity tests will not work without the MPIO VFD\n");
+ return;
+ }
+ if (VERBOSE_MED)
+ HDprintf("atomic writes to file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, basic dataset, or more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ buf_size = dim0 * dim1;
+ /* allocate memory for data buffer */
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
+ /* allocate memory for data buffer */
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create datasets */
+ dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* initialize datasets to 0s */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+ }
+
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(sid);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ MPI_Barrier(comm);
+
+ /* make sure setting atomicity fails on a serial file ID */
+ /* file locking allows only one file open (serial) for writing */
+ if (MAINPROCESS) {
+ fid = H5Fopen(filename, H5F_ACC_RDWR, H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* should fail */
+ H5E_BEGIN_TRY
+ {
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
+ }
+ H5E_END_TRY
+ VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ MPI_Barrier(comm);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, acc_tpl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = H5Fset_mpi_atomicity(fid, TRUE);
+ VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
+
+ /* open dataset1 (contiguous case) */
+ dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
+
+ if (0 == mpi_rank) {
+ for (i = 0; i < buf_size; i++) {
+ write_buf[i] = 5;
+ }
+ }
+ else {
+ for (i = 0; i < buf_size; i++) {
+ read_buf[i] = 8;
+ }
+ }
+
+ /* check that the atomicity flag is set */
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
+ VRFY((ret >= 0), "atomcity get failed");
+ VRFY((atomicity == TRUE), "atomcity set failed");
+
+ MPI_Barrier(comm);
+
+ /* Process 0 writes contiguously to the entire dataset */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ }
+ /* The other processes read the entire dataset */
+ else {
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+ }
+
+ if (VERBOSE_MED) {
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
+ }
+ }
+
+ /* The processes that read the dataset must either read all values
+ as 0 (read happened before process 0 wrote to dataset 1), or 5
+ (read happened after process 0 wrote to dataset 1) */
+ if (0 != mpi_rank) {
+ int compare = read_buf[0];
+
+ VRFY((compare == 0 || compare == 5),
+ "Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
+ for (i = 1; i < buf_size; i++) {
+ if (read_buf[i] != compare) {
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i,
+ read_buf[i], compare);
+ nerrors++;
+ }
+ }
+ }
+
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5D close succeeded");
+
+ /* release data buffers */
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+
+ /* open dataset2 (non-contiguous case) */
+ dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dopen2 succeeded");
+
+ /* allocate memory for data buffer */
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
+ /* allocate memory for data buffer */
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
+
+ for (i = 0; i < buf_size; i++) {
+ write_buf[i] = 5;
+ }
+ for (i = 0; i < buf_size; i++) {
+ read_buf[i] = 8;
+ }
+
+ atomicity = FALSE;
+ /* check that the atomicity flag is set */
+ ret = H5Fget_mpi_atomicity(fid, &atomicity);
+ VRFY((ret >= 0), "atomcity get failed");
+ VRFY((atomicity == TRUE), "atomcity set failed");
+
+ block[0] = (hsize_t)(dim0 / mpi_size - 1);
+ block[1] = (hsize_t)(dim1 / mpi_size - 1);
+ stride[0] = block[0] + 1;
+ stride[1] = block[1] + 1;
+ count[0] = (hsize_t)mpi_size;
+ count[1] = (hsize_t)mpi_size;
+ start[0] = 0;
+ start[1] = 0;
+
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space(dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace */
+ mem_dataspace = H5Screate_simple(RANK, dims, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ MPI_Barrier(comm);
+
+ /* Process 0 writes to the dataset */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+ }
+ /* All processes wait for the write to finish. This works because
+ atomicity is set to true */
+ MPI_Barrier(comm);
+ /* The other processes read the entire dataset */
+ if (0 != mpi_rank) {
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, H5P_DEFAULT, read_buf);
+ VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+ }
+
+ if (VERBOSE_MED) {
+ if (mpi_rank == 1) {
+ i = 0;
+ j = 0;
+ k = 0;
+ for (i = 0; i < dim0; i++) {
+ HDprintf("\n");
+ for (j = 0; j < dim1; j++)
+ HDprintf("%d ", read_buf[k++]);
+ }
+ HDprintf("\n");
+ }
+ }
+
+ /* The processes that read the dataset must either read all values
+ as 5 (read happened after process 0 wrote to dataset 1) */
+ if (0 != mpi_rank) {
+ int compare;
+ i = 0;
+ j = 0;
+ k = 0;
+
+ compare = 5;
+
+ for (i = 0; i < dim0; i++) {
+ if (i >= mpi_rank * ((int)block[0] + 1)) {
+ break;
+ }
+ if ((i + 1) % ((int)block[0] + 1) == 0) {
+ k += dim1;
+ continue;
+ }
+ for (j = 0; j < dim1; j++) {
+ if (j >= mpi_rank * ((int)block[1] + 1)) {
+ k += dim1 - mpi_rank * ((int)block[1] + 1);
+ break;
+ }
+ if ((j + 1) % ((int)block[1] + 1) == 0) {
+ k++;
+ continue;
+ }
+ else if (compare != read_buf[k]) {
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank,
+ k, read_buf[k], compare);
+ nerrors++;
+ }
+ k++;
+ }
+ }
+ }
+
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+ /* release data buffers */
+ if (write_buf)
+ HDfree(write_buf);
+ if (read_buf)
+ HDfree(read_buf);
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+}
+
+/* Function: dense_attr_test
+ *
+ * Purpose: Test cases for writing dense attributes in parallel
+ *
+ * Programmer: Quincey Koziol
+ * Date: April, 2013
+ */
+void
+test_dense_attr(void)
+{
+ int mpi_size, mpi_rank;
+ hid_t fpid, fid;
+ hid_t gid, gpid;
+ hid_t atFileSpace, atid;
+ hsize_t atDims[1] = {10000};
+ herr_t status;
+ const char *filename;
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* get filename */
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ fpid = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fpid > 0), "H5Pcreate succeeded");
+ status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ VRFY((status >= 0), "H5Pset_libver_bounds succeeded");
+ status = H5Pset_fapl_mpio(fpid, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((status >= 0), "H5Pset_fapl_mpio succeeded");
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+ status = H5Pclose(fpid);
+ VRFY((status >= 0), "H5Pclose succeeded");
+
+ gpid = H5Pcreate(H5P_GROUP_CREATE);
+ VRFY((gpid > 0), "H5Pcreate succeeded");
+ status = H5Pset_attr_phase_change(gpid, 0, 0);
+ VRFY((status >= 0), "H5Pset_attr_phase_change succeeded");
+ gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT);
+ VRFY((gid > 0), "H5Gcreate2 succeeded");
+ status = H5Pclose(gpid);
+ VRFY((status >= 0), "H5Pclose succeeded");
+
+ atFileSpace = H5Screate_simple(1, atDims, NULL);
+ VRFY((atFileSpace > 0), "H5Screate_simple succeeded");
+ atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((atid > 0), "H5Acreate succeeded");
+ status = H5Sclose(atFileSpace);
+ VRFY((status >= 0), "H5Sclose succeeded");
+
+ status = H5Aclose(atid);
+ VRFY((status >= 0), "H5Aclose succeeded");
+
+ status = H5Gclose(gid);
+ VRFY((status >= 0), "H5Gclose succeeded");
+ status = H5Fclose(fid);
+ VRFY((status >= 0), "H5Fclose succeeded");
+
+ return;
+}
diff --git a/testpar/API/t_file.c b/testpar/API/t_file.c
new file mode 100644
index 0000000..936454a
--- /dev/null
+++ b/testpar/API/t_file.c
@@ -0,0 +1,1032 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for file operations
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#if 0
+#include "H5CXprivate.h" /* API Contexts */
+#include "H5Iprivate.h"
+#include "H5PBprivate.h"
+
+/*
+ * This file needs to access private information from the H5F package.
+ */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#include "H5ACpkg.h"
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#include "H5Cpkg.h"
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#include "H5Fpkg.h"
+#define H5MF_FRIEND /*suppress error about including H5MFpkg */
+#include "H5MFpkg.h"
+#endif
+
+#define NUM_DSETS 5
+
+int mpi_size, mpi_rank;
+
+#if 0
+static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy);
+static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size,
+ size_t page_buffer_size);
+#endif
+
+/*
+ * test file access by communicator besides COMM_WORLD.
+ * Split COMM_WORLD into two, one (even_comm) contains the original
+ * processes of even ranks. The other (odd_comm) contains the original
+ * processes of odd ranks. Processes in even_comm creates a file, then
+ * cloose it, using even_comm. Processes in old_comm just do a barrier
+ * using odd_comm. Then they all do a barrier using COMM_WORLD.
+ * If the file creation and cloose does not do correct collective action
+ * according to the communicator argument, the processes will freeze up
+ * sooner or later due to barrier mixed up.
+ */
+void
+test_split_comm_access(void)
+{
+ MPI_Comm comm;
+ MPI_Info info = MPI_INFO_NULL;
+ int is_old, mrc;
+ int newrank, newprocs;
+ hid_t fid; /* file IDs */
+ hid_t acc_tpl; /* File access properties */
+ herr_t ret; /* generic return value */
+ const char *filename;
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters()*/;
+ if (VERBOSE_MED)
+ HDprintf("Split Communicator access test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ is_old = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, is_old, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ MPI_Comm_size(comm, &newprocs);
+ MPI_Comm_rank(comm, &newrank);
+
+ if (is_old) {
+ /* odd-rank processes */
+ mrc = MPI_Barrier(comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ }
+ else {
+ /* even-rank processes */
+ int sub_mpi_rank; /* rank in the sub-comm */
+ MPI_Comm_rank(comm, &sub_mpi_rank);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* close the file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "");
+
+ /* delete the test file */
+ ret = H5Fdelete(filename, acc_tpl);
+ VRFY((ret >= 0), "H5Fdelete succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+ }
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded");
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "final MPI_Barrier succeeded");
+}
+
+#if 0
+void
+test_page_buffer_access(void)
+{
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl, fapl;
+ size_t page_count = 0;
+ int i, num_elements = 200;
+ haddr_t raw_addr, meta_addr;
+ int *data;
+ H5F_t *f = NULL;
+ herr_t ret; /* generic return value */
+ const char *filename;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = (const char *)GetTestParameters();
+
+ if (VERBOSE_MED)
+ HDprintf("Page Buffer Usage in Parallel %s\n", filename);
+
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ VRFY((fcpl >= 0), "");
+
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
+ VRFY((ret == 0), "");
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 128);
+ VRFY((ret == 0), "");
+ ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 100000, 0, 0);
+ VRFY((ret == 0), "");
+
+ /* This should fail because collective metadata writes are not supported with page buffering */
+ H5E_BEGIN_TRY
+ {
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ }
+ H5E_END_TRY;
+ VRFY((file_id < 0), "H5Fcreate failed");
+
+ /* disable collective metadata writes for page buffering to work */
+ ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ VRFY((ret >= 0), "");
+
+ ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ VRFY((ret == 0), "");
+ ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int) * 100,
+ sizeof(int) * 100000);
+ VRFY((ret == 0), "");
+
+ ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
+ VRFY((ret == 0), "");
+ ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int) * 100,
+ sizeof(int) * 100000);
+ VRFY((ret == 0), "");
+
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int) * 100);
+ VRFY((ret == 0), "");
+
+ data = (int *)HDmalloc(sizeof(int) * (size_t)num_elements);
+
+ /* initialize all the elements to have a value of -1 */
+ for (i = 0; i < num_elements; i++)
+ data[i] = -1;
+ if (MAINPROCESS) {
+ hid_t fapl_self = H5I_INVALID_HID;
+ fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
+
+ ret = H5Pset_page_buffer_size(fapl_self, sizeof(int) * 1000, 0, 0);
+ VRFY((ret == 0), "");
+ /* collective metadata writes do not work with page buffering */
+ ret = H5Pset_coll_metadata_write(fapl_self, FALSE);
+ VRFY((ret >= 0), "");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self);
+ VRFY((file_id >= 0), "");
+
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
+ /* Get a pointer to the internal file object */
+ f = (H5F_t *)H5I_object(file_id);
+
+ VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
+
+ /* allocate space for 200 raw elements */
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
+ VRFY((raw_addr != HADDR_UNDEF), "");
+
+ /* allocate space for 200 metadata elements */
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
+ VRFY((meta_addr != HADDR_UNDEF), "");
+
+ page_count = 0;
+
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
+ VRFY((ret == 0), "");
+
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the first 50 elements */
+ for (i = 0; i < 50; i++)
+ data[i] = i;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ page_count += 2;
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the second 50 elements */
+ for (i = 0; i < 50; i++)
+ data[i] = i + 50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update 100 - 200 */
+ for (i = 0; i < 100; i++)
+ data[i] = i + 100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ ret = H5PB_flush(f->shared);
+ VRFY((ret == 0), "");
+
+ /* read elements 0 - 200 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* close the file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ ret = H5Pclose(fapl_self);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Pop API context */
+ if (api_ctx_pushed) {
+ ret = H5CX_pop(FALSE);
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (mpi_size > 1) {
+ ret = H5Pset_page_buffer_size(fapl, sizeof(int) * 1000, 0, 0);
+ VRFY((ret == 0), "");
+ /* collective metadata writes do not work with page buffering */
+ ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ VRFY((ret >= 0), "");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ VRFY((file_id >= 0), "");
+
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
+ /* Get a pointer to the internal file object */
+ f = (H5F_t *)H5I_object(file_id);
+
+ VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
+
+ /* allocate space for 200 raw elements */
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int) * (size_t)num_elements);
+ VRFY((raw_addr != HADDR_UNDEF), "");
+ /* allocate space for 200 metadata elements */
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int) * (size_t)num_elements);
+ VRFY((meta_addr != HADDR_UNDEF), "");
+
+ page_count = 0;
+
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * (size_t)num_elements, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * (size_t)num_elements, data);
+ VRFY((ret == 0), "");
+
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the first 50 elements */
+ for (i = 0; i < 50; i++)
+ data[i] = i;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the second 50 elements */
+ for (i = 0; i < 50; i++)
+ data[i] = i + 50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 50), sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update 100 - 200 */
+ for (i = 0; i < 100; i++)
+ data[i] = i + 100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr + (sizeof(int) * 100), sizeof(int) * 100, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ /* read elements 0 - 200 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 200, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 200, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ page_count += 1;
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ /* reset the first 50 elements to -1*/
+ for (i = 0; i < 50; i++)
+ data[i] = -1;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == -1), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int) * 50, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i = 0; i < 50; i++)
+ VRFY((data[i] == -1), "Read different values than written");
+
+ /* close the file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(fcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Pop API context */
+ if (api_ctx_pushed) {
+ ret = H5CX_pop(FALSE);
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
+
+ HDfree(data);
+ data = NULL;
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+static int
+create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy)
+{
+ hid_t file_id, dset_id, grp_id;
+ hid_t sid, mem_dataspace;
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ DATATYPE *data_array = NULL;
+ hsize_t dims[RANK], i;
+ hsize_t num_elements;
+ int k;
+ char dset_name[20];
+ H5F_t *f = NULL;
+ H5C_t *cache_ptr = NULL;
+ H5AC_cache_config_t config;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ herr_t ret;
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ VRFY((file_id >= 0), "");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
+ f = (H5F_t *)H5I_object(file_id);
+ VRFY((f != NULL), "");
+
+ cache_ptr = f->shared->cache;
+ VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
+
+ cache_ptr->ignore_tags = TRUE;
+ H5C_stats__reset(cache_ptr);
+ config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+
+ ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config);
+ VRFY((ret == 0), "");
+
+ config.metadata_write_strategy = metadata_write_strategy;
+
+ ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config);
+ VRFY((ret == 0), "");
+
+ grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((grp_id >= 0), "");
+
+ dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ num_elements = block[0] * block[1];
+ /* allocate memory for data buffer */
+ data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+ /* put some trivial data in the data_array */
+ for (i = 0; i < num_elements; i++)
+ data_array[i] = mpi_rank + 1;
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(1, &num_elements, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ for (k = 0; k < NUM_DSETS; k++) {
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret == 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDmemset(data_array, 0, num_elements * sizeof(DATATYPE));
+ dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret == 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ for (i = 0; i < num_elements; i++)
+ VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
+
+ HDsnprintf(dset_name, sizeof(dset_name), "D1dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ HDsnprintf(dset_name, sizeof(dset_name), "D2dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ HDsnprintf(dset_name, sizeof(dset_name), "D3dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ }
+
+ ret = H5Gclose(grp_id);
+ VRFY((ret == 0), "");
+ ret = H5Fclose(file_id);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(sid);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret == 0), "");
+
+ /* Pop API context */
+ if (api_ctx_pushed) {
+ ret = H5CX_pop(FALSE);
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ HDfree(data_array);
+ return 0;
+} /* create_file */
+
+static int
+open_file(const char *filename, hid_t fapl, int metadata_write_strategy, hsize_t page_size,
+ size_t page_buffer_size)
+{
+ hid_t file_id, dset_id, grp_id, grp_id2;
+ hid_t sid, mem_dataspace;
+ DATATYPE *data_array = NULL;
+ hsize_t dims[RANK];
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ int i, k, ndims;
+ hsize_t num_elements;
+ char dset_name[20];
+ H5F_t *f = NULL;
+ H5C_t *cache_ptr = NULL;
+ H5AC_cache_config_t config;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
+ herr_t ret;
+
+ config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ ret = H5Pget_mdc_config(fapl, &config);
+ VRFY((ret == 0), "");
+
+ config.metadata_write_strategy = metadata_write_strategy;
+
+ ret = H5Pget_mdc_config(fapl, &config);
+ VRFY((ret == 0), "");
+
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((file_id >= 0), "");
+
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ f = (H5F_t *)H5I_object(file_id);
+ VRFY((f != NULL), "");
+
+ cache_ptr = f->shared->cache;
+ VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ VRFY((f->shared->page_buf != NULL), "");
+ VRFY((f->shared->page_buf->page_size == page_size), "");
+ VRFY((f->shared->page_buf->max_size == page_buffer_size), "");
+
+ grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT);
+ VRFY((grp_id >= 0), "");
+
+ dims[0] = (hsize_t)(ROW_FACTOR * mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR * mpi_size);
+
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ num_elements = block[0] * block[1];
+ /* allocate memory for data buffer */
+ data_array = (DATATYPE *)HDmalloc(num_elements * sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(1, &num_elements, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ for (k = 0; k < NUM_DSETS; k++) {
+ HDsnprintf(dset_name, sizeof(dset_name), "dset%d", k);
+ dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ sid = H5Dget_space(dset_id);
+ VRFY((dset_id >= 0), "H5Dget_space succeeded");
+
+ ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
+ VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
+ VRFY(dims[0] == (hsize_t)(ROW_FACTOR * mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[1] == (hsize_t)(COL_FACTOR * mpi_size), "Wrong dataset dimensions");
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(sid);
+ VRFY((ret == 0), "");
+
+ for (i = 0; i < (int)num_elements; i++)
+ VRFY((data_array[i] == mpi_rank + 1), "Dataset Verify failed");
+ }
+
+ grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((grp_id2 >= 0), "");
+ ret = H5Gclose(grp_id2);
+ VRFY((ret == 0), "");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ for (i = 0; i < H5C__HASH_TABLE_LEN; i++) {
+ H5C_cache_entry_t *entry_ptr = NULL;
+
+ entry_ptr = cache_ptr->index[i];
+
+ while (entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->is_dirty == FALSE);
+
+ if (!entry_ptr->is_pinned && !entry_ptr->is_protected) {
+ ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0);
+ VRFY((ret == 0), "");
+ }
+
+ entry_ptr = entry_ptr->ht_next;
+ }
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((grp_id2 >= 0), "");
+ ret = H5Gclose(grp_id2);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret == 0), "");
+
+ ret = H5Gclose(grp_id);
+ VRFY((ret == 0), "");
+ ret = H5Fclose(file_id);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret == 0), "");
+
+ /* Pop API context */
+ if (api_ctx_pushed) {
+ ret = H5CX_pop(FALSE);
+ VRFY((ret == 0), "H5CX_pop()");
+ api_ctx_pushed = FALSE;
+ }
+
+ HDfree(data_array);
+
+ return nerrors;
+}
+#endif
+
+/*
+ * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the
+ * incoming fapl that could conflict with the existing values in H5F_shared_t on
+ * multiple opens of the same file.
+ */
+void
+test_file_properties(void)
+{
+ hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
+ hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
+ hbool_t is_coll;
+ htri_t are_equal;
+ const char *filename;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ MPI_Comm comm_out = MPI_COMM_NULL;
+ MPI_Info info_out = MPI_INFO_NULL;
+ herr_t ret; /* Generic return value */
+ int mpi_ret; /* MPI return value */
+ int cmp; /* Compare value */
+
+ /* set up MPI parameters */
+ mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded");
+ mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+
+ mpi_ret = MPI_Info_create(&info);
+ VRFY((mpi_ret >= 0), "MPI_Info_create succeeded");
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+
+ /* setup file access plist */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_fapl_mpio");
+
+ /* Check getting and setting MPI properties
+ * (for use in VOL connectors, not the MPI-I/O VFD)
+ */
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ ret = H5Pget_mpi_params(fapl_id, &comm_out, &info_out);
+ VRFY((ret >= 0), "H5Pget_mpi_params succeeded");
+
+ /* Check the communicator */
+ VRFY((comm != comm_out), "Communicators should not be bitwise identical");
+ cmp = MPI_UNEQUAL;
+ mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp);
+ VRFY((ret >= 0), "MPI_Comm_compare succeeded");
+ VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare");
+
+ /* Check the info object */
+ VRFY((info != info_out), "Info objects should not be bitwise identical");
+
+ /* Free the obtained comm and info object */
+ mpi_ret = MPI_Comm_free(&comm_out);
+ VRFY((mpi_ret >= 0), "MPI_Comm_free succeeded");
+ mpi_ret = MPI_Info_free(&info_out);
+ VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
+
+ /* Copy the fapl and ensure it's equal to the original */
+ fapl_copy_id = H5Pcopy(fapl_id);
+ VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((TRUE == are_equal), "H5Pequal");
+
+ /* Add a property to the copy and ensure it's different now */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_copy_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((FALSE == are_equal), "H5Pequal");
+
+ /* Add a property with the same key but a different value to the original
+ * and ensure they are still different.
+ */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "ijk");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((FALSE == are_equal), "H5Pequal");
+
+ /* Set the second property in the original to the same
+ * value as the copy and ensure they are the same now.
+ */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((TRUE == are_equal), "H5Pequal");
+
+ /* create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
+
+ /* verify settings for file access properties */
+
+ /* Collective metadata writes */
+ ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
+
+ /* Collective metadata read API calling requirement */
+ ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* Open the file with the MPI-IO driver */
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
+
+ /* verify settings for file access properties */
+
+ /* Collective metadata writes */
+ ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata writes");
+
+ /* Collective metadata read API calling requirement */
+ ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ VRFY((is_coll == FALSE), "Incorrect property setting for coll metadata API calls requirement");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* Open the file with the MPI-IO driver w/ collective settings */
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
+ /* Collective metadata writes */
+ ret = H5Pset_coll_metadata_write(fapl_id, TRUE);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ /* Collective metadata read API calling requirement */
+ ret = H5Pset_all_coll_metadata_ops(fapl_id, TRUE);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
+
+ /* verify settings for file access properties */
+
+ /* Collective metadata writes */
+ ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
+
+ /* Collective metadata read API calling requirement */
+ ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
+
+ /* close fapl and retrieve it from file */
+ ret = H5Pclose(fapl_id);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ fapl_id = H5I_INVALID_HID;
+
+ fapl_id = H5Fget_access_plist(fid);
+ VRFY((fapl_id != H5I_INVALID_HID), "H5P_FILE_ACCESS");
+
+ /* verify settings for file access properties */
+
+ /* Collective metadata writes */
+ ret = H5Pget_coll_metadata_write(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_coll_metadata_write succeeded");
+ VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata writes");
+
+ /* Collective metadata read API calling requirement */
+ ret = H5Pget_all_coll_metadata_ops(fapl_id, &is_coll);
+ VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
+ VRFY((is_coll == TRUE), "Incorrect property setting for coll metadata API calls requirement");
+
+ /* close file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* Release file-access plist */
+ ret = H5Pclose(fapl_id);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(fapl_copy_id);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Free the MPI info object */
+ mpi_ret = MPI_Info_free(&info);
+ VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
+
+} /* end test_file_properties() */
+
+void
+test_delete(void)
+{
+ hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
+ const char *filename = NULL;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ htri_t is_hdf5 = FAIL; /* Whether a file is an HDF5 file */
+ herr_t ret; /* Generic return value */
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup file access plist */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
+ ret = H5Pset_fapl_mpio(fapl_id, comm, info);
+ VRFY((SUCCEED == ret), "H5Pset_fapl_mpio");
+
+ /* create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate");
+
+ /* close the file */
+ ret = H5Fclose(fid);
+ VRFY((SUCCEED == ret), "H5Fclose");
+
+ /* Verify that the file is an HDF5 file */
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ VRFY((TRUE == is_hdf5), "H5Fis_accessible");
+
+ /* Delete the file */
+ ret = H5Fdelete(filename, fapl_id);
+ VRFY((SUCCEED == ret), "H5Fdelete");
+
+ /* Verify that the file is NO LONGER an HDF5 file */
+ /* This should fail since there is no file */
+ H5E_BEGIN_TRY
+ {
+ is_hdf5 = H5Fis_accessible(filename, fapl_id);
+ }
+ H5E_END_TRY;
+ VRFY((is_hdf5 != SUCCEED), "H5Fis_accessible");
+
+ /* Release file-access plist */
+ ret = H5Pclose(fapl_id);
+ VRFY((SUCCEED == ret), "H5Pclose");
+
+} /* end test_delete() */
diff --git a/testpar/API/t_file_image.c b/testpar/API/t_file_image.c
new file mode 100644
index 0000000..4f4fa96
--- /dev/null
+++ b/testpar/API/t_file_image.c
@@ -0,0 +1,371 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for file image operations
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+/* file_image_daisy_chain_test
+ *
+ * Process zero:
+ *
+ * 1) Creates a core file with an integer vector data set of
+ * length n (= mpi_size),
+ *
+ * 2) Initializes the vector to zero in * location 0, and to -1
+ * everywhere else.
+ *
+ * 3) Flushes the core file, and gets an image of it. Closes
+ * the core file.
+ *
+ * 4) Sends the image to process 1.
+ *
+ * 5) Awaits receipt on a file image from process n-1.
+ *
+ * 6) opens the image received from process n-1, verifies that
+ * it contains a vector of length equal to mpi_size, and
+ * that the vector contains (0, 1, 2, ... n-1)
+ *
+ * 7) closes the core file and exits.
+ *
+ * Process i (0 < i < n)
+ *
+ * 1) Await receipt of file image from process (i - 1).
+ *
+ * 2) Open the image with the core file driver, verify that i
+ * contains a vector v of length, and that v[j] = j for
+ * 0 <= j < i, and that v[j] == -1 for i <= j < n
+ *
+ * 3) Set v[i] = i in the core file.
+ *
+ * 4) Flush the core file and send it to process (i + 1) % n.
+ *
+ * 5) close the core file and exit.
+ *
+ * Test fails on a hang (if an image is not received), or on invalid data.
+ *
+ * JRM -- 11/28/11
+ */
+void
+file_image_daisy_chain_test(void)
+{
+ char file_name[1024] = "\0";
+ int mpi_size, mpi_rank;
+ int mpi_result;
+ int i;
+ int space_ndims;
+ MPI_Status rcvstat;
+ int *vector_ptr = NULL;
+ hid_t fapl_id = -1;
+ hid_t file_id; /* file IDs */
+ hid_t dset_id = -1;
+ hid_t dset_type_id = -1;
+ hid_t space_id = -1;
+ herr_t err;
+ hsize_t dims[1];
+ void *image_ptr = NULL;
+ ssize_t bytes_read;
+ ssize_t image_len;
+ hbool_t vector_ok = TRUE;
+ htri_t tri_result;
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup file name */
+ HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5", (int)mpi_rank);
+
+ if (mpi_rank == 0) {
+
+ /* 1) Creates a core file with an integer vector data set
+ * of length mpi_size,
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ file_id = H5Fcreate(file_name, 0, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "created core file");
+
+ dims[0] = (hsize_t)mpi_size;
+ space_id = H5Screate_simple(1, dims, dims);
+ VRFY((space_id >= 0), "created data space");
+
+ dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "created data set");
+
+ /* 2) Initialize the vector to zero in location 0, and
+ * to -1 everywhere else.
+ */
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory representation of vector");
+
+ vector_ptr[0] = 0;
+ for (i = 1; i < mpi_size; i++)
+ vector_ptr[i] = -1;
+
+ err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "wrote initial data to vector.");
+
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
+ /* 3) Flush the core file, and get an image of it. Close
+ * the core file.
+ */
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "flushed core file.");
+
+ image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
+ VRFY((image_len > 0), "got image file size");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image buffer.");
+
+ bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
+ VRFY(bytes_read == image_len, "wrote file into image buffer");
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+
+ /* 4) Send the image to process 1. */
+
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, 1, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
+
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, 1, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+
+ /* 5) Await receipt on a file image from process n-1. */
+
+ mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_size - 1, 0,
+ MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received image len from process n-1");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image receive buffer.");
+
+ mpi_result =
+ MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_size - 1, 0, MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received file image from process n-1");
+
+ /* 6) open the image received from process n-1, verify that
+ * it contains a vector of length equal to mpi_size, and
+ * that the vector contains (0, 1, 2, ... n-1).
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
+ VRFY((err >= 0), "set file image in fapl.");
+
+ file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "opened received file image file");
+
+ dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
+ VRFY((dset_id >= 0), "opened data set");
+
+ dset_type_id = H5Dget_type(dset_id);
+ VRFY((dset_type_id >= 0), "obtained data set type");
+
+ tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
+ VRFY((tri_result == TRUE), "verified data set type");
+
+ space_id = H5Dget_space(dset_id);
+ VRFY((space_id >= 0), "opened data space");
+
+ space_ndims = H5Sget_simple_extent_ndims(space_id);
+ VRFY((space_ndims == 1), "verified data space num dims(1)");
+
+ space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ VRFY((space_ndims == 1), "verified data space num dims(2)");
+ VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
+
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "read received vector.");
+
+ vector_ok = TRUE;
+ for (i = 0; i < mpi_size; i++)
+ if (vector_ptr[i] != i)
+ vector_ok = FALSE;
+ VRFY((vector_ok), "verified received vector.");
+
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
+ /* 7) closes the core file and exit. */
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+ }
+ else {
+ /* 1) Await receipt of file image from process (i - 1). */
+
+ mpi_result = MPI_Recv((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE, mpi_rank - 1, 0,
+ MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received image size from process mpi_rank-1");
+
+ image_ptr = (void *)HDmalloc((size_t)image_len);
+ VRFY(image_ptr != NULL, "allocated file image receive buffer.");
+
+ mpi_result =
+ MPI_Recv((void *)image_ptr, (int)image_len, MPI_BYTE, mpi_rank - 1, 0, MPI_COMM_WORLD, &rcvstat);
+ VRFY((mpi_result == MPI_SUCCESS), "received file image from process mpi_rank-1");
+
+ /* 2) Open the image with the core file driver, verify that it
+ * contains a vector v of length, and that v[j] = j for
+ * 0 <= j < i, and that v[j] == -1 for i <= j < n
+ */
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id >= 0), "creating fapl");
+
+ err = H5Pset_fapl_core(fapl_id, (size_t)(64 * 1024), FALSE);
+ VRFY((err >= 0), "setting core file driver in fapl.");
+
+ err = H5Pset_file_image(fapl_id, image_ptr, (size_t)image_len);
+ VRFY((err >= 0), "set file image in fapl.");
+
+ file_id = H5Fopen(file_name, H5F_ACC_RDWR, fapl_id);
+ H5Eprint2(H5P_DEFAULT, stderr);
+ VRFY((file_id >= 0), "opened received file image file");
+
+ dset_id = H5Dopen2(file_id, "v", H5P_DEFAULT);
+ VRFY((dset_id >= 0), "opened data set");
+
+ dset_type_id = H5Dget_type(dset_id);
+ VRFY((dset_type_id >= 0), "obtained data set type");
+
+ tri_result = H5Tequal(dset_type_id, H5T_NATIVE_INT);
+ VRFY((tri_result == TRUE), "verified data set type");
+
+ space_id = H5Dget_space(dset_id);
+ VRFY((space_id >= 0), "opened data space");
+
+ space_ndims = H5Sget_simple_extent_ndims(space_id);
+ VRFY((space_ndims == 1), "verified data space num dims(1)");
+
+ space_ndims = H5Sget_simple_extent_dims(space_id, dims, NULL);
+ VRFY((space_ndims == 1), "verified data space num dims(2)");
+ VRFY((dims[0] == (hsize_t)mpi_size), "verified data space dims");
+
+ vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
+ VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
+
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "read received vector.");
+
+ vector_ok = TRUE;
+ for (i = 0; i < mpi_size; i++) {
+ if (i < mpi_rank) {
+ if (vector_ptr[i] != i)
+ vector_ok = FALSE;
+ }
+ else {
+ if (vector_ptr[i] != -1)
+ vector_ok = FALSE;
+ }
+ }
+ VRFY((vector_ok), "verified received vector.");
+
+ /* 3) Set v[i] = i in the core file. */
+
+ vector_ptr[mpi_rank] = mpi_rank;
+
+ err = H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, (void *)vector_ptr);
+ VRFY((err >= 0), "wrote modified data to vector.");
+
+ HDfree(vector_ptr);
+ vector_ptr = NULL;
+
+ /* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
+
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "flushed core file.");
+
+ image_len = H5Fget_file_image(file_id, NULL, (size_t)0);
+ VRFY((image_len > 0), "got (possibly modified) image file len");
+
+ image_ptr = (void *)HDrealloc((void *)image_ptr, (size_t)image_len);
+ VRFY(image_ptr != NULL, "re-allocated file image buffer.");
+
+ bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
+ VRFY(bytes_read == image_len, "wrote file into image buffer");
+
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t), MPI_BYTE,
+ (mpi_rank + 1) % mpi_size, 0, MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image size to process (mpi_rank + 1) % mpi_size");
+
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len, MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ MPI_COMM_WORLD);
+ VRFY((mpi_result == MPI_SUCCESS), "sent image to process (mpi_rank + 1) % mpi_size");
+
+ HDfree(image_ptr);
+ image_ptr = NULL;
+ image_len = 0;
+
+ /* 5) close the core file and exit. */
+
+ err = H5Sclose(space_id);
+ VRFY((err >= 0), "closed data space.");
+
+ err = H5Dclose(dset_id);
+ VRFY((err >= 0), "closed data set.");
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "closed core file(1).");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "closed fapl(1).");
+ }
+
+ return;
+
+} /* file_image_daisy_chain_test() */
diff --git a/testpar/API/t_filter_read.c b/testpar/API/t_filter_read.c
new file mode 100644
index 0000000..f32c21b
--- /dev/null
+++ b/testpar/API/t_filter_read.c
@@ -0,0 +1,564 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This verifies the correctness of parallel reading of a dataset that has been
+ * written serially using filters.
+ *
+ * Created by: Christian Chilan
+ * Date: 2007/05/15
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#ifdef H5_HAVE_SZLIB_H
+#include "szlib.h"
+#endif
+
+static int mpi_size, mpi_rank;
+
+/* Chunk sizes */
+#define CHUNK_DIM1 7
+#define CHUNK_DIM2 27
+
+/* Sizes of the vertical hyperslabs. Total dataset size is
+ {HS_DIM1, HS_DIM2 * mpi_size } */
+#define HS_DIM1 200
+#define HS_DIM2 100
+
+const char *
+h5_rmprefix(const char *filename)
+{
+ const char *ret_ptr;
+
+ if ((ret_ptr = HDstrstr(filename, ":")) == NULL)
+ ret_ptr = filename;
+ else
+ ret_ptr++;
+
+ return (ret_ptr);
+}
+
+#ifdef H5_HAVE_FILTER_SZIP
+
+/*-------------------------------------------------------------------------
+ * Function: h5_szip_can_encode
+ *
+ * Purpose: Retrieve the filter config flags for szip, tell if
+ * encoder is available.
+ *
+ * Return: 1: decode+encode is enabled
+ * 0: only decode is enabled
+ * -1: other
+ *
+ * Programmer:
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+h5_szip_can_encode(void)
+{
+ unsigned int filter_config_flags;
+
+ H5Zget_filter_info(H5Z_FILTER_SZIP, &filter_config_flags);
+ if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) == 0) {
+ /* filter present but neither encode nor decode is supported (???) */
+ return -1;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ H5Z_FILTER_CONFIG_DECODE_ENABLED) {
+ /* decoder only: read but not write */
+ return 0;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ H5Z_FILTER_CONFIG_ENCODE_ENABLED) {
+ /* encoder only: write but not read (???) */
+ return -1;
+ }
+ else if ((filter_config_flags & (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) ==
+ (H5Z_FILTER_CONFIG_ENCODE_ENABLED | H5Z_FILTER_CONFIG_DECODE_ENABLED)) {
+ return 1;
+ }
+ return (-1);
+}
+#endif /* H5_HAVE_FILTER_SZIP */
+
+/*-------------------------------------------------------------------------
+ * Function: filter_read_internal
+ *
+ * Purpose: Tests parallel reading of a 2D dataset written serially using
+ * filters. During the parallel reading phase, the dataset is
+ * divided evenly among the processors in vertical hyperslabs.
+ *
+ * Programmer: Christian Chilan
+ * Tuesday, May 15, 2007
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+filter_read_internal(const char *filename, hid_t dcpl, hsize_t *dset_size)
+{
+ hid_t file, dataset; /* HDF5 IDs */
+ hid_t access_plist; /* Access property list ID */
+ hid_t sid, memspace; /* Dataspace IDs */
+ hsize_t size[2]; /* Dataspace dimensions */
+ hsize_t hs_offset[2]; /* Hyperslab offset */
+ hsize_t hs_size[2]; /* Hyperslab size */
+ size_t i, j; /* Local index variables */
+ char name[32] = "dataset";
+ herr_t hrc; /* Error status */
+ int *points = NULL; /* Writing buffer for entire dataset */
+ int *check = NULL; /* Reading buffer for selected hyperslab */
+
+ (void)dset_size; /* silence compiler */
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* set sizes for dataset and hyperslabs */
+ hs_size[0] = size[0] = HS_DIM1;
+ hs_size[1] = HS_DIM2;
+
+ size[1] = hs_size[1] * (hsize_t)mpi_size;
+
+ hs_offset[0] = 0;
+ hs_offset[1] = hs_size[1] * (hsize_t)mpi_rank;
+
+ /* Create the data space */
+ sid = H5Screate_simple(2, size, NULL);
+ VRFY(sid >= 0, "H5Screate_simple");
+
+ /* Create buffers */
+ points = (int *)HDmalloc(size[0] * size[1] * sizeof(int));
+ VRFY(points != NULL, "HDmalloc");
+
+ check = (int *)HDmalloc(hs_size[0] * hs_size[1] * sizeof(int));
+ VRFY(check != NULL, "HDmalloc");
+
+ /* Initialize writing buffer with random data */
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ points[i * size[1] + j] = (int)(i + j + 7);
+
+ VRFY(H5Pall_filters_avail(dcpl), "Incorrect filter availability");
+
+ /* Serial write phase */
+ if (MAINPROCESS) {
+
+ file = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY(file >= 0, "H5Fcreate");
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(file, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY(dataset >= 0, "H5Dcreate2");
+
+ hrc = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, points);
+ VRFY(hrc >= 0, "H5Dwrite");
+#if 0
+ *dset_size = H5Dget_storage_size(dataset);
+ VRFY(*dset_size > 0, "H5Dget_storage_size");
+#endif
+
+ hrc = H5Dclose(dataset);
+ VRFY(hrc >= 0, "H5Dclose");
+
+ hrc = H5Fclose(file);
+ VRFY(hrc >= 0, "H5Fclose");
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Parallel read phase */
+ /* Set up MPIO file access property lists */
+ access_plist = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((access_plist >= 0), "H5Pcreate");
+
+ hrc = H5Pset_fapl_mpio(access_plist, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((hrc >= 0), "H5Pset_fapl_mpio");
+
+ /* Open the file */
+ file = H5Fopen(filename, H5F_ACC_RDWR, access_plist);
+ VRFY((file >= 0), "H5Fopen");
+
+ dataset = H5Dopen2(file, name, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2");
+
+ hrc = H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL);
+ VRFY(hrc >= 0, "H5Sselect_hyperslab");
+
+ memspace = H5Screate_simple(2, hs_size, NULL);
+ VRFY(memspace >= 0, "H5Screate_simple");
+
+ hrc = H5Dread(dataset, H5T_NATIVE_INT, memspace, sid, H5P_DEFAULT, check);
+ VRFY(hrc >= 0, "H5Dread");
+
+ /* Check that the values read are the same as the values written */
+ for (i = 0; i < hs_size[0]; i++) {
+ for (j = 0; j < hs_size[1]; j++) {
+ if (points[i * size[1] + (size_t)hs_offset[1] + j] != check[i * hs_size[1] + j]) {
+ HDfprintf(stderr, " Read different values than written.\n");
+ HDfprintf(stderr, " At index %lu,%lu\n", (unsigned long)(i),
+ (unsigned long)(hs_offset[1] + j));
+ HDfprintf(stderr, " At original: %d\n",
+ (int)points[i * size[1] + (size_t)hs_offset[1] + j]);
+ HDfprintf(stderr, " At returned: %d\n", (int)check[i * hs_size[1] + j]);
+ VRFY(FALSE, "");
+ }
+ }
+ }
+#if 0
+ /* Get the storage size of the dataset */
+ *dset_size = H5Dget_storage_size(dataset);
+ VRFY(*dset_size != 0, "H5Dget_storage_size");
+#endif
+
+ /* Clean up objects used for this test */
+ hrc = H5Dclose(dataset);
+ VRFY(hrc >= 0, "H5Dclose");
+
+ hrc = H5Sclose(sid);
+ VRFY(hrc >= 0, "H5Sclose");
+
+ hrc = H5Sclose(memspace);
+ VRFY(hrc >= 0, "H5Sclose");
+
+ hrc = H5Pclose(access_plist);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ hrc = H5Fclose(file);
+ VRFY(hrc >= 0, "H5Fclose");
+
+ HDfree(points);
+ HDfree(check);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: test_filter_read
+ *
+ * Purpose: Tests parallel reading of datasets written serially using
+ * several (combinations of) filters.
+ *
+ * Programmer: Christian Chilan
+ * Tuesday, May 15, 2007
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+test_filter_read(void)
+{
+ hid_t dc; /* HDF5 IDs */
+ const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
+#if 0
+ hsize_t null_size; /* Size of dataset without filters */
+#endif
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
+ herr_t hrc;
+ const char *filename;
+#ifdef H5_HAVE_FILTER_FLETCHER32
+ hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
+#endif
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ hsize_t deflate_size; /* Size of dataset with deflate filter */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+#ifdef H5_HAVE_FILTER_SZIP
+ hsize_t szip_size; /* Size of dataset with szip filter */
+ unsigned szip_options_mask = H5_SZIP_NN_OPTION_MASK;
+ unsigned szip_pixels_per_block = 4;
+#endif /* H5_HAVE_FILTER_SZIP */
+
+#if 0
+ hsize_t shuffle_size; /* Size of dataset with shuffle filter */
+#endif
+
+#if (defined H5_HAVE_FILTER_DEFLATE || defined H5_HAVE_FILTER_SZIP)
+ hsize_t combo_size; /* Size of dataset with multiple filters */
+#endif /* H5_HAVE_FILTER_DEFLATE || H5_HAVE_FILTER_SZIP */
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ if (VERBOSE_MED)
+ HDprintf("Parallel reading of dataset written with filters %s\n", filename);
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FILTERS)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(
+ " API functions for basic file, dataset or filter aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /*----------------------------------------------------------
+ * STEP 0: Test without filters.
+ *----------------------------------------------------------
+ */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ filter_read_internal(filename, dc, /* &null_size */ NULL);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /* Run steps 1-3 both with and without filters disabled on partial chunks */
+ for (disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Set chunk options appropriately */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_filter");
+
+ hrc = H5Pget_chunk_opts(dc, &chunk_opts);
+ VRFY(hrc >= 0, "H5Pget_chunk_opts");
+
+ if (disable_partial_chunk_filters)
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /*----------------------------------------------------------
+ * STEP 1: Test Fletcher32 Checksum by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_FLETCHER32
+
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pset_filter");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_filter");
+
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
+
+ hrc = H5Pset_filter(dc, H5Z_FILTER_FLETCHER32, 0, 0, NULL);
+ VRFY(hrc >= 0, "H5Pset_filter");
+
+ filter_read_internal(filename, dc, &fletcher32_size);
+ VRFY(fletcher32_size > null_size, "Size after checksumming is incorrect.");
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+#endif /* H5_HAVE_FILTER_FLETCHER32 */
+
+ /*----------------------------------------------------------
+ * STEP 2: Test deflation by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
+
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
+
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
+
+ filter_read_internal(filename, dc, &deflate_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /*----------------------------------------------------------
+ * STEP 3: Test szip compression by itself.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_SZIP
+ if (h5_szip_can_encode() == 1) {
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_chunk_opts(dc, chunk_opts);
+ VRFY(hrc >= 0, "H5Pset_chunk_opts");
+
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc >= 0, "H5Pset_szip");
+
+ filter_read_internal(filename, dc, &szip_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+ }
+#endif /* H5_HAVE_FILTER_SZIP */
+ } /* end for */
+
+ /*----------------------------------------------------------
+ * STEP 4: Test shuffling by itself.
+ *----------------------------------------------------------
+ */
+
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ filter_read_internal(filename, dc, /* &shuffle_size */ NULL);
+#if 0
+ VRFY(shuffle_size == null_size, "Shuffled size not the same as uncompressed size.");
+#endif
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /*----------------------------------------------------------
+ * STEP 5: Test shuffle + deflate + checksum in any order.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
+ /* Testing shuffle+deflate+checksum filters (checksum first) */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
+
+ filter_read_internal(filename, dc, &combo_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /* Testing shuffle+deflate+checksum filters (checksum last) */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ hrc = H5Pset_deflate(dc, 6);
+ VRFY(hrc >= 0, "H5Pset_deflate");
+
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
+
+ filter_read_internal(filename, dc, &combo_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /*----------------------------------------------------------
+ * STEP 6: Test shuffle + szip + checksum in any order.
+ *----------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_SZIP
+
+ /* Testing shuffle+szip(with encoder)+checksum filters(checksum first) */
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ /* Make sure encoding is enabled */
+ if (h5_szip_can_encode() == 1) {
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc >= 0, "H5Pset_szip");
+
+ filter_read_internal(filename, dc, &combo_size);
+ }
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+
+ /* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */
+ /* Make sure encoding is enabled */
+ if (h5_szip_can_encode() == 1) {
+ dc = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY(dc >= 0, "H5Pcreate");
+
+ hrc = H5Pset_chunk(dc, 2, chunk_size);
+ VRFY(hrc >= 0, "H5Pset_chunk");
+
+ hrc = H5Pset_shuffle(dc);
+ VRFY(hrc >= 0, "H5Pset_shuffle");
+
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ VRFY(hrc >= 0, "H5Pset_szip");
+
+ hrc = H5Pset_fletcher32(dc);
+ VRFY(hrc >= 0, "H5Pset_fletcher32");
+
+ filter_read_internal(filename, dc, &combo_size);
+
+ /* Clean up objects used for this test */
+ hrc = H5Pclose(dc);
+ VRFY(hrc >= 0, "H5Pclose");
+ }
+
+#endif /* H5_HAVE_FILTER_SZIP */
+}
diff --git a/testpar/API/t_mdset.c b/testpar/API/t_mdset.c
new file mode 100644
index 0000000..e11818f
--- /dev/null
+++ b/testpar/API/t_mdset.c
@@ -0,0 +1,2814 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#if 0
+#include "H5Dprivate.h"
+#include "H5private.h"
+#endif
+
+#define DIM 2
+#define SIZE 32
+#define NDATASET 4
+#define GROUP_DEPTH 32
+enum obj_type { is_group, is_dset };
+
+static int get_size(void);
+static void write_dataset(hid_t, hid_t, hid_t);
+static int read_dataset(hid_t, hid_t, hid_t);
+static void create_group_recursive(hid_t, hid_t, hid_t, int);
+static void recursive_read_group(hid_t, hid_t, hid_t, int);
+static void group_dataset_read(hid_t fid, int mpi_rank, int m);
+static void write_attribute(hid_t, int, int);
+static int read_attribute(hid_t, int, int);
+static int check_value(DATATYPE *, DATATYPE *, int);
+static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int);
+
+/*
+ * The size value computed by this function is used extensively in
+ * configuring tests for the current number of processes.
+ *
+ * This function was created as part of an effort to allow the
+ * test functions in this file to run on an arbitrary number of
+ * processors.
+ * JRM - 8/11/04
+ */
+
+static int
+get_size(void)
+{
+ int mpi_rank;
+ int mpi_size;
+ int size = SIZE;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ if (mpi_size > size) {
+ if ((mpi_size % 2) == 0) {
+ size = mpi_size;
+ }
+ else {
+ size = mpi_size + 1;
+ }
+ }
+
+ VRFY((mpi_size <= size), "mpi_size <= size");
+ VRFY(((size % 2) == 0), "size isn't even");
+
+ return (size);
+
+} /* get_size() */
+
+/*
+ * Example of using PHDF5 to create a zero sized dataset.
+ *
+ */
+void
+zero_dim_dset(void)
+{
+ int mpi_size, mpi_rank;
+ const char *filename;
+ hid_t fid, plist, dcpl, dsid, sid;
+ hsize_t dim, chunk_dim;
+ herr_t ret;
+ int data[1];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((plist >= 0), "create_faccess_plist succeeded");
+
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+ ret = H5Pclose(plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "failed H5Pcreate");
+
+ /* Set 1 chunk size */
+ chunk_dim = 1;
+ ret = H5Pset_chunk(dcpl, 1, &chunk_dim);
+ VRFY((ret >= 0), "failed H5Pset_chunk");
+
+ /* Create 1D dataspace with 0 dim size */
+ dim = 0;
+ sid = H5Screate_simple(1, &dim, NULL);
+ VRFY((sid >= 0), "failed H5Screate_simple");
+
+ /* Create chunked dataset */
+ dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dsid >= 0), "failed H5Dcreate2");
+
+ /* write 0 elements from dataset */
+ ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
+ VRFY((ret >= 0), "failed H5Dwrite");
+
+ /* Read 0 elements from dataset */
+ ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
+ VRFY((ret >= 0), "failed H5Dread");
+
+ H5Pclose(dcpl);
+ H5Dclose(dsid);
+ H5Sclose(sid);
+ H5Fclose(fid);
+}
+
+/*
+ * Example of using PHDF5 to create ndatasets datasets. Each process write
+ * a slab of array to the file.
+ */
+void
+multiple_dset_write(void)
+{
+ int i, j, n, mpi_size, mpi_rank, size;
+ hid_t iof, plist, dataset, memspace, filespace;
+ hid_t dcpl; /* Dataset creation property list */
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM];
+ hsize_t count[DIM] = {1, 1};
+ double *outme = NULL;
+ double fill = 1.0; /* Fill value */
+ char dname[100];
+ herr_t ret;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ndatasets;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ndatasets = pt->count; */ ndatasets = NDATASETS;
+
+ size = get_size();
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
+ VRFY((outme != NULL), "HDmalloc succeeded for outme");
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((plist >= 0), "create_faccess_plist succeeded");
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+ ret = H5Pclose(plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* decide the hyperslab according to process number. */
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+
+ memspace = H5Screate_simple(DIM, chunk_dims, NULL);
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mdata hyperslab selection");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation property list succeeded");
+
+ ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
+ VRFY((ret >= 0), "set fill-value succeeded");
+
+ for (n = 0; n < ndatasets; n++) {
+ HDsnprintf(dname, sizeof(dname), "dataset %d", n);
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), dname);
+
+ /* calculate data to write */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = n * 1000 + mpi_rank;
+
+ H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
+
+ H5Dclose(dataset);
+#ifdef BARRIER_CHECKS
+ if (!((n + 1) % 10)) {
+ HDprintf("created %d datasets\n", n + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+ }
+
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Pclose(dcpl);
+ H5Fclose(iof);
+
+ HDfree(outme);
+}
+
+/* Example of using PHDF5 to create, write, and read compact dataset.
+ */
+void
+compact_dataset(void)
+{
+ int i, j, mpi_size, mpi_rank, size, err_num = 0;
+ hid_t iof, plist, dcpl, dxpl, dataset, filespace;
+ hsize_t file_dims[DIM];
+ double *outme;
+ double *inme;
+ char dname[] = "dataset";
+ herr_t ret;
+ const char *filename;
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif
+
+ size = get_size();
+
+ for (i = 0; i < DIM; i++)
+ file_dims[i] = (hsize_t)size;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ outme = HDmalloc((size_t)((size_t)size * (size_t)size * sizeof(double)));
+ VRFY((outme != NULL), "HDmalloc succeeded for outme");
+
+ inme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
+ VRFY((outme != NULL), "HDmalloc succeeded for inme");
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+ VRFY((mpi_size <= size), "mpi_size <= size");
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+
+ /* Define data space */
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+
+ /* Create a compact dataset */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation property list succeeded");
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+ VRFY((dcpl >= 0), "set property list for compact dataset");
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ VRFY((ret >= 0), "set space allocation time for compact dataset");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* set up the collective transfer properties list */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "");
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Recalculate data to write. Each process writes the same data. */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = (i + j) * 1000;
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ H5Pclose(dcpl);
+ H5Pclose(plist);
+ H5Dclose(dataset);
+ H5Sclose(filespace);
+ H5Fclose(iof);
+
+ /* Open the file and dataset, read and compare the data. */
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((iof >= 0), "H5Fopen succeeded");
+
+ /* set up the collective transfer properties list */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "");
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL,
+ NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0), "H5Pinsert2() succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "H5Pget succeeded");
+ VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),
+ "rank 0 Bcast optimization was performed for a compact dataset");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify data value */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ if (!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j]))
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j,
+ outme[(i * size) + j], inme[(i * size) + j]);
+
+ H5Pclose(plist);
+ H5Pclose(dxpl);
+ H5Dclose(dataset);
+ H5Fclose(iof);
+ HDfree(inme);
+ HDfree(outme);
+}
+
+/*
+ * Example of using PHDF5 to create, write, and read dataset and attribute
+ * of Null dataspace.
+ */
+void
+null_dataset(void)
+{
+ int mpi_size, mpi_rank;
+ hid_t iof, plist, dxpl, dataset, attr, sid;
+ unsigned uval = 2; /* Buffer for writing to dataset */
+ int val = 1; /* Buffer for writing to attribute */
+ hssize_t nelem;
+ char dname[] = "dataset";
+ char attr_name[] = "attribute";
+ herr_t ret;
+ const char *filename;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or attribute aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+
+ /* Define data space */
+ sid = H5Screate(H5S_NULL);
+
+ /* Check that the null dataspace actually has 0 elements */
+ nelem = H5Sget_simple_extent_npoints(sid);
+ VRFY((nelem == 0), "H5Sget_simple_extent_npoints");
+
+ /* Create a compact dataset */
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* set up the collective transfer properties list */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "");
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Write "nothing" to the dataset(with type conversion) */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Create an attribute for the group */
+ attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((attr >= 0), "H5Acreate2");
+
+ /* Write "nothing" to the attribute(with type conversion) */
+ ret = H5Awrite(attr, H5T_NATIVE_INT, &val);
+ VRFY((ret >= 0), "H5Awrite");
+
+ H5Aclose(attr);
+ H5Dclose(dataset);
+ H5Pclose(plist);
+ H5Sclose(sid);
+ H5Fclose(iof);
+
+ /* Open the file and dataset, read and compare the data. */
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((iof >= 0), "H5Fopen succeeded");
+
+ /* set up the collective transfer properties list */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "");
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+ /* Try reading from the dataset(make certain our buffer is unmodified) */
+ ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval);
+ VRFY((ret >= 0), "H5Dread");
+ VRFY((uval == 2), "H5Dread");
+
+ /* Open the attribute for the dataset */
+ attr = H5Aopen(dataset, attr_name, H5P_DEFAULT);
+ VRFY((attr >= 0), "H5Aopen");
+
+ /* Try reading from the attribute(make certain our buffer is unmodified) */ ret =
+ H5Aread(attr, H5T_NATIVE_INT, &val);
+ VRFY((ret >= 0), "H5Aread");
+ VRFY((val == 1), "H5Aread");
+
+ H5Pclose(plist);
+ H5Pclose(dxpl);
+ H5Aclose(attr);
+ H5Dclose(dataset);
+ H5Fclose(iof);
+}
+
+/* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB)
+ * Actual data is _not_ written to these datasets. Dataspaces are exact
+ * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
+ * the boundary of interest.
+ */
+void
+big_dataset(void)
+{
+ int mpi_size, mpi_rank; /* MPI info */
+ hid_t iof, /* File ID */
+ fapl, /* File access property list ID */
+ dataset, /* Dataset ID */
+ filespace; /* Dataset's dataspace ID */
+ hsize_t file_dims[4]; /* Dimensions of dataspace */
+ char dname[] = "dataset"; /* Name of dataset */
+#if 0
+ MPI_Offset file_size; /* Size of file on disk */
+#endif
+ herr_t ret; /* Generic return value */
+ const char *filename;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /* Verify MPI_Offset can handle larger than 2GB sizes */
+ VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4");
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Create >2GB HDF5 file
+ */
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+
+ /* Define dataspace for 2GB dataspace */
+ file_dims[0] = 2;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* Close all file objects */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(iof);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+#if 0
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)");
+#endif
+
+ /*
+ * Create >4GB HDF5 file
+ */
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+
+ /* Define dataspace for 4GB dataspace */
+ file_dims[0] = 4;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* Close all file objects */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(iof);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+#if 0
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)");
+#endif
+
+ /*
+ * Create >8GB HDF5 file
+ */
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+
+ /* Define dataspace for 8GB dataspace */
+ file_dims[0] = 8;
+ file_dims[1] = 1024;
+ file_dims[2] = 1024;
+ file_dims[3] = 1024;
+ filespace = H5Screate_simple(4, file_dims, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ /* Close all file objects */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(iof);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+#if 0
+ /* Check that file of the correct size was created */
+ file_size = h5_get_file_size(filename, fapl);
+ VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)");
+#endif
+
+ /* Close fapl */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+}
+
+/* Example of using PHDF5 to read a partial written dataset. The dataset does
+ * not have actual data written to the entire raw data area and relies on the
+ * default fill value of zeros to work correctly.
+ */
+void
+dataset_fillvalue(void)
+{
+ int mpi_size, mpi_rank; /* MPI info */
+ int err_num; /* Number of errors */
+ hid_t iof, /* File ID */
+ fapl, /* File access property list ID */
+ dxpl, /* Data transfer property list ID */
+ dataset, /* Dataset ID */
+ memspace, /* Memory dataspace ID */
+ filespace; /* Dataset's dataspace ID */
+ char dname[] = "dataset"; /* Name of dataset */
+ hsize_t dset_dims[4] = {0, 6, 7, 8};
+ hsize_t req_start[4] = {0, 0, 0, 0};
+ hsize_t req_count[4] = {1, 6, 7, 8};
+ hsize_t dset_size; /* Dataset size */
+ int *rdata, *wdata; /* Buffers for data to read and write */
+ int *twdata, *trdata; /* Temporary pointer into buffer */
+ int acc, i, ii, j, k, l; /* Local index variables */
+ herr_t ret; /* Generic return value */
+ const char *filename;
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ /* Set the dataset dimension to be one row more than number of processes */
+ /* and calculate the actual dataset size. */
+ dset_dims[0] = (hsize_t)(mpi_size + 1);
+ dset_size = dset_dims[0] * dset_dims[1] * dset_dims[2] * dset_dims[3];
+
+ /* Allocate space for the buffers */
+ rdata = HDmalloc((size_t)(dset_size * sizeof(int)));
+ VRFY((rdata != NULL), "HDcalloc succeeded for read buffer");
+ wdata = HDmalloc((size_t)(dset_size * sizeof(int)));
+ VRFY((wdata != NULL), "HDmalloc succeeded for write buffer");
+
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Create HDF5 file
+ */
+ iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((iof >= 0), "H5Fcreate succeeded");
+
+ filespace = H5Screate_simple(4, dset_dims, NULL);
+ VRFY((filespace >= 0), "File H5Screate_simple succeeded");
+
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+
+ memspace = H5Screate_simple(4, dset_dims, NULL);
+ VRFY((memspace >= 0), "Memory H5Screate_simple succeeded");
+
+ /*
+ * Read dataset before any data is written.
+ */
+
+ /* Create DXPL for I/O */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "H5Pcreate succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value, NULL,
+ NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0), "testing property list inserted succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for (ii = 0; ii < 2; ii++) {
+
+ if (ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int)));
+
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if (ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify all data read are the fill value 0 */
+ trdata = rdata;
+ err_num = 0;
+ for (i = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++, trdata++)
+ if (*trdata != 0)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf(
+ "Rank %d: Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
+ mpi_rank, i, j, k, l, *trdata);
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("Rank %d: [more errors ...]\n", mpi_rank);
+ if (err_num) {
+ HDprintf("Rank %d: %d errors found in check_value\n", mpi_rank, err_num);
+ nerrors++;
+ }
+ }
+
+ /* Barrier to ensure all processes have completed the above test. */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * Each process writes 1 row of data. Thus last row is not written.
+ */
+ /* Create hyperslabs in memory and file dataspaces */
+ req_start[0] = (hsize_t)mpi_rank;
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
+ ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
+
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Fill write buffer with some values */
+ twdata = wdata;
+ for (i = 0, acc = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++)
+ *twdata++ = acc++;
+
+ /* Collectively write a hyperslab of data to the dataset */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, memspace, filespace, dxpl, wdata);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Barrier here, to allow processes to sync */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * Read dataset after partial write.
+ */
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), " H5Pset succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for (ii = 0; ii < 2; ii++) {
+
+ if (ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata, 2, (size_t)(dset_size * sizeof(int)));
+
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if (ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify correct data read */
+ twdata = wdata;
+ trdata = rdata;
+ err_num = 0;
+ for (i = 0; i < (int)dset_dims[0]; i++)
+ for (j = 0; j < (int)dset_dims[1]; j++)
+ for (k = 0; k < (int)dset_dims[2]; k++)
+ for (l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ if (i < mpi_size) {
+ if (*twdata != *trdata)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n",
+ i, j, k, l, *twdata, *trdata);
+ } /* end if */
+ else {
+ if (*trdata != 0)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n",
+ i, j, k, l, *trdata);
+ } /* end else */
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (err_num) {
+ HDprintf("%d errors found in check_value\n", err_num);
+ nerrors++;
+ }
+ }
+
+ /* Close all file objects */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(iof);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ /* Close memory dataspace */
+ ret = H5Sclose(memspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+ /* Close dxpl */
+ ret = H5Pclose(dxpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Close fapl */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* free the buffers */
+ HDfree(rdata);
+ HDfree(wdata);
+}
+
+/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
+void
+collective_group_write_independent_group_read(void)
+{
+ collective_group_write();
+ independent_group_read();
+}
+
+/* Write multiple groups with a chunked dataset in each group collectively.
+ * These groups and datasets are for testing independent read later.
+ */
+void
+collective_group_write(void)
+{
+ int mpi_rank, mpi_size, size;
+ int i, j, m;
+ char gname[64], dname[32];
+ hid_t fid, gid, did, plist, dcpl, memspace, filespace;
+ DATATYPE *outme = NULL;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
+ herr_t ret1, ret2;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ngroups;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ngroups = pt->count; */ ngroups = NGROUPS;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ size = get_size();
+
+ chunk_size[0] = (hsize_t)(size / 2);
+ chunk_size[1] = (hsize_t)(size / 2);
+
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((outme != NULL), "HDmalloc succeeded for outme");
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ VRFY((fid >= 0), "H5Fcreate");
+ H5Pclose(plist);
+
+ /* decide the hyperslab according to process number. */
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+
+ /* select hyperslab in memory and file spaces. These two operations are
+ * identical since the datasets are the same. */
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((memspace >= 0), "memspace");
+ VRFY((filespace >= 0), "filespace");
+ VRFY((ret1 == 0), "mgroup memspace selection");
+ VRFY((ret2 == 0), "mgroup filespace selection");
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ ret1 = H5Pset_chunk(dcpl, 2, chunk_size);
+ VRFY((dcpl >= 0), "dataset creation property");
+ VRFY((ret1 == 0), "set chunk for dataset creation property");
+
+ /* creates ngroups groups under the root group, writes chunked
+ * datasets in parallel. */
+ for (m = 0; m < ngroups; m++) {
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((gid > 0), gname);
+
+ HDsnprintf(dname, sizeof(dname), "dataset%d", m);
+ did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((did > 0), dname);
+
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = (i + j) * 1000 + mpi_rank;
+
+ ret1 = H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+ VRFY((ret1 == 0), "H5Dwrite");
+
+ ret1 = H5Dclose(did);
+ VRFY((ret1 == 0), "H5Dclose");
+
+ ret1 = H5Gclose(gid);
+ VRFY((ret1 == 0), "H5Gclose");
+
+#ifdef BARRIER_CHECKS
+ if (!((m + 1) % 10)) {
+ HDprintf("created %d groups\n", m + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+ }
+
+ H5Pclose(dcpl);
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+
+ ret1 = H5Fclose(fid);
+ VRFY((ret1 == 0), "H5Fclose");
+
+ HDfree(outme);
+}
+
+/* Let two sets of processes open and read different groups and chunked
+ * datasets independently.
+ */
+void
+independent_group_read(void)
+{
+ int mpi_rank, m;
+ hid_t plist, fid;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ngroups;
+ herr_t ret;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ngroups = pt->count; */ ngroups = NGROUPS;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ H5Pset_all_coll_metadata_ops(plist, FALSE);
+
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ VRFY((fid > 0), "H5Fopen");
+ H5Pclose(plist);
+
+ /* open groups and read datasets. Odd number processes read even number
+ * groups from the end; even number processes read odd number groups
+ * from the beginning. */
+ if (mpi_rank % 2 == 0) {
+ for (m = ngroups - 1; m == 0; m -= 2)
+ group_dataset_read(fid, mpi_rank, m);
+ }
+ else {
+ for (m = 0; m < ngroups; m += 2)
+ group_dataset_read(fid, mpi_rank, m);
+ }
+
+ ret = H5Fclose(fid);
+ VRFY((ret == 0), "H5Fclose");
+}
+
+/* Open and read datasets and compare data
+ */
+static void
+group_dataset_read(hid_t fid, int mpi_rank, int m)
+{
+ int ret, i, j, size;
+ char gname[64], dname[32];
+ hid_t gid, did;
+ DATATYPE *outdata = NULL;
+ DATATYPE *indata = NULL;
+
+ size = get_size();
+
+ indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((indata != NULL), "HDmalloc succeeded for indata");
+
+ outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
+
+ /* open every group under root group. */
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid = H5Gopen2(fid, gname, H5P_DEFAULT);
+ VRFY((gid > 0), gname);
+
+ /* check the data. */
+ HDsnprintf(dname, sizeof(dname), "dataset%d", m);
+ did = H5Dopen2(gid, dname, H5P_DEFAULT);
+ VRFY((did > 0), dname);
+
+ H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata);
+
+ /* this is the original value */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outdata[(i * size) + j] = (i + j) * 1000 + mpi_rank;
+
+ /* compare the original value(outdata) to the value in file(indata).*/
+ ret = check_value(indata, outdata, size);
+ VRFY((ret == 0), "check the data");
+
+ ret = H5Dclose(did);
+ VRFY((ret == 0), "H5Dclose");
+ ret = H5Gclose(gid);
+ VRFY((ret == 0), "H5Gclose");
+
+ HDfree(indata);
+ HDfree(outdata);
+}
+
+/*
+ * Example of using PHDF5 to create multiple groups. Under the root group,
+ * it creates ngroups groups. Under the first group just created, it creates
+ * recursive subgroups of depth GROUP_DEPTH. In each created group, it
+ * generates NDATASETS datasets. Each process write a hyperslab of an array
+ * into the file. The structure is like
+ *
+ * root group
+ * |
+ * ---------------------------- ... ... ------------------------
+ * | | | ... ... | |
+ * group0*+' group1*+' group2*+' ... ... group ngroups*+'
+ * |
+ * 1st_child_group*'
+ * |
+ * 2nd_child_group*'
+ * |
+ * :
+ * :
+ * |
+ * GROUP_DEPTHth_child_group*'
+ *
+ * * means the group has dataset(s).
+ * + means the group has attribute(s).
+ * ' means the datasets in the groups have attribute(s).
+ *
+ */
+void
+multiple_group_write(void)
+{
+ int mpi_rank, mpi_size, size;
+ int m;
+ char gname[64];
+ hid_t fid, gid, plist, memspace, filespace;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ herr_t ret;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ngroups;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ngroups = pt->count; */ ngroups = NGROUPS;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ size = get_size();
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
+ H5Pclose(plist);
+
+ /* decide the hyperslab according to process number. */
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+
+ /* select hyperslab in memory and file spaces. These two operations are
+ * identical since the datasets are the same. */
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ VRFY((memspace >= 0), "memspace");
+ ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mgroup memspace selection");
+
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ VRFY((filespace >= 0), "filespace");
+ ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ VRFY((ret >= 0), "mgroup filespace selection");
+
+ /* creates ngroups groups under the root group, writes datasets in
+ * parallel. */
+ for (m = 0; m < ngroups; m++) {
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((gid > 0), gname);
+
+ /* create attribute for these groups. */
+ write_attribute(gid, is_group, m);
+
+ if (m != 0)
+ write_dataset(memspace, filespace, gid);
+
+ H5Gclose(gid);
+
+#ifdef BARRIER_CHECKS
+ if (!((m + 1) % 10)) {
+ HDprintf("created %d groups\n", m + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+ }
+
+ /* recursively creates subgroups under the first group. */
+ gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
+ create_group_recursive(memspace, filespace, gid, 0);
+ ret = H5Gclose(gid);
+ VRFY((ret >= 0), "H5Gclose");
+
+ ret = H5Sclose(filespace);
+ VRFY((ret >= 0), "H5Sclose");
+ ret = H5Sclose(memspace);
+ VRFY((ret >= 0), "H5Sclose");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose");
+}
+
+/*
+ * In a group, creates NDATASETS datasets. Each process writes a hyperslab
+ * of a data array to the file.
+ */
+static void
+write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
+{
+ int i, j, n, size;
+ int mpi_rank, mpi_size;
+ char dname[32];
+ DATATYPE *outme = NULL;
+ hid_t did;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ size = get_size();
+
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
+ VRFY((outme != NULL), "HDmalloc succeeded for outme");
+
+ for (n = 0; n < NDATASET; n++) {
+ HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((did > 0), dname);
+
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++)
+ outme[(i * size) + j] = n * 1000 + mpi_rank;
+
+ H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
+
+ /* create attribute for these datasets.*/
+ write_attribute(did, is_dset, n);
+
+ H5Dclose(did);
+ }
+ HDfree(outme);
+}
+
+/*
+ * Creates subgroups of depth GROUP_DEPTH recursively. Also writes datasets
+ * in parallel in each group.
+ */
+static void
+create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
+{
+ hid_t child_gid;
+ int mpi_rank;
+ char gname[64];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+#ifdef BARRIER_CHECKS
+ if (!((counter + 1) % 10)) {
+ HDprintf("created %dth child groups\n", counter + 1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+#endif /* BARRIER_CHECKS */
+
+ HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
+ child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((child_gid > 0), gname);
+
+ /* write datasets in parallel. */
+ write_dataset(memspace, filespace, gid);
+
+ if (counter < GROUP_DEPTH)
+ create_group_recursive(memspace, filespace, child_gid, counter + 1);
+
+ H5Gclose(child_gid);
+}
+
+/*
+ * This function is to verify the data from multiple group testing. It opens
+ * every dataset in every group and check their correctness.
+ */
+void
+multiple_group_read(void)
+{
+ int mpi_rank, mpi_size, error_num, size;
+ int m;
+ char gname[64];
+ hid_t plist, fid, gid, memspace, filespace;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+ int ngroups;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+ /* ngroups = pt->count; */ ngroups = NGROUPS;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, group, dataset, or attribute aren't supported with "
+ "this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ size = get_size();
+
+ plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
+ H5Pclose(plist);
+
+ /* decide hyperslab for each process */
+ get_slab(chunk_origin, chunk_dims, count, file_dims, size);
+
+ /* select hyperslab for memory and file space */
+ memspace = H5Screate_simple(DIM, file_dims, NULL);
+ H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+ filespace = H5Screate_simple(DIM, file_dims, NULL);
+ H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
+
+ /* open every group under root group. */
+ for (m = 0; m < ngroups; m++) {
+ HDsnprintf(gname, sizeof(gname), "group%d", m);
+ gid = H5Gopen2(fid, gname, H5P_DEFAULT);
+ VRFY((gid > 0), gname);
+
+ /* check the data. */
+ if (m != 0)
+ if ((error_num = read_dataset(memspace, filespace, gid)) > 0)
+ nerrors += error_num;
+
+ /* check attribute.*/
+ error_num = 0;
+ if ((error_num = read_attribute(gid, is_group, m)) > 0)
+ nerrors += error_num;
+
+ H5Gclose(gid);
+
+#ifdef BARRIER_CHECKS
+ if (!((m + 1) % 10))
+ MPI_Barrier(MPI_COMM_WORLD);
+#endif /* BARRIER_CHECKS */
+ }
+
+ /* open all the groups in vertical direction. */
+ gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
+ VRFY((gid > 0), "group0");
+ recursive_read_group(memspace, filespace, gid, 0);
+ H5Gclose(gid);
+
+ H5Sclose(filespace);
+ H5Sclose(memspace);
+ H5Fclose(fid);
+}
+
+/*
+ * This function opens all the datasets in a certain, checks the data using
+ * dataset_vrfy function.
+ */
+static int
+read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
+{
+ int i, j, n, mpi_rank, mpi_size, size, attr_errors = 0, vrfy_errors = 0;
+ char dname[32];
+ DATATYPE *outdata = NULL, *indata = NULL;
+ hid_t did;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ size = get_size();
+
+ indata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((indata != NULL), "HDmalloc succeeded for indata");
+
+ outdata = (DATATYPE *)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
+ VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
+
+ for (n = 0; n < NDATASET; n++) {
+ HDsnprintf(dname, sizeof(dname), "dataset%d", n);
+ did = H5Dopen2(gid, dname, H5P_DEFAULT);
+ VRFY((did > 0), dname);
+
+ H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata);
+
+ /* this is the original value */
+ for (i = 0; i < size; i++)
+ for (j = 0; j < size; j++) {
+ *outdata = n * 1000 + mpi_rank;
+ outdata++;
+ }
+ outdata -= size * size;
+
+ /* compare the original value(outdata) to the value in file(indata).*/
+ vrfy_errors = check_value(indata, outdata, size);
+
+ /* check attribute.*/
+ if ((attr_errors = read_attribute(did, is_dset, n)) > 0)
+ vrfy_errors += attr_errors;
+
+ H5Dclose(did);
+ }
+
+ HDfree(indata);
+ HDfree(outdata);
+
+ return vrfy_errors;
+}
+
+/*
+ * This recursive function opens all the groups in vertical direction and
+ * checks the data.
+ */
+static void
+recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
+{
+ hid_t child_gid;
+ int mpi_rank, err_num = 0;
+ char gname[64];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+#ifdef BARRIER_CHECKS
+ if ((counter + 1) % 10)
+ MPI_Barrier(MPI_COMM_WORLD);
+#endif /* BARRIER_CHECKS */
+
+ if ((err_num = read_dataset(memspace, filespace, gid)))
+ nerrors += err_num;
+
+ if (counter < GROUP_DEPTH) {
+ HDsnprintf(gname, sizeof(gname), "%dth_child_group", counter + 1);
+ child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
+ VRFY((child_gid > 0), gname);
+ recursive_read_group(memspace, filespace, child_gid, counter + 1);
+ H5Gclose(child_gid);
+ }
+}
+
+/* Create and write attribute for a group or a dataset. For groups, attribute
+ * is a scalar datum; for dataset, it is a one-dimensional array.
+ */
+static void
+write_attribute(hid_t obj_id, int this_type, int num)
+{
+ hid_t sid, aid;
+ hsize_t dspace_dims[1] = {8};
+ int i, mpi_rank, attr_data[8], dspace_rank = 1;
+ char attr_name[32];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if (this_type == is_group) {
+ HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
+ sid = H5Screate(H5S_SCALAR);
+ aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ H5Awrite(aid, H5T_NATIVE_INT, &num);
+ H5Aclose(aid);
+ H5Sclose(sid);
+ } /* end if */
+ else if (this_type == is_dset) {
+ HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
+ for (i = 0; i < 8; i++)
+ attr_data[i] = i;
+ sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
+ aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
+ H5Awrite(aid, H5T_NATIVE_INT, attr_data);
+ H5Aclose(aid);
+ H5Sclose(sid);
+ } /* end else-if */
+}
+
+/* Read and verify attribute for group or dataset. */
+static int
+read_attribute(hid_t obj_id, int this_type, int num)
+{
+ hid_t aid;
+ hsize_t group_block[2] = {1, 1}, dset_block[2] = {1, 8};
+ int i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0;
+ char attr_name[32];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if (this_type == is_group) {
+ HDsnprintf(attr_name, sizeof(attr_name), "Group Attribute %d", num);
+ aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
+ H5Aread(aid, H5T_NATIVE_INT, &in_num);
+ vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
+ H5Aclose(aid);
+ }
+ else if (this_type == is_dset) {
+ HDsnprintf(attr_name, sizeof(attr_name), "Dataset Attribute %d", num);
+ for (i = 0; i < 8; i++)
+ out_data[i] = i;
+ aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
+ H5Aread(aid, H5T_NATIVE_INT, in_data);
+ vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
+ H5Aclose(aid);
+ }
+
+ return vrfy_errors;
+}
+
+/* This functions compares the original data with the read-in data for its
+ * hyperslab part only by process ID.
+ */
+static int
+check_value(DATATYPE *indata, DATATYPE *outdata, int size)
+{
+ int mpi_rank, mpi_size, err_num = 0;
+ hsize_t i, j;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], count[DIM];
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ get_slab(chunk_origin, chunk_dims, count, NULL, size);
+
+ indata += chunk_origin[0] * (hsize_t)size;
+ outdata += chunk_origin[0] * (hsize_t)size;
+ for (i = chunk_origin[0]; i < (chunk_origin[0] + chunk_dims[0]); i++)
+ for (j = chunk_origin[1]; j < (chunk_origin[1] + chunk_dims[1]); j++) {
+ if (*indata != *outdata)
+ if (err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j, (unsigned long)i, (unsigned long)j, *outdata,
+ *indata);
+ }
+ if (err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if (err_num)
+ HDprintf("%d errors found in check_value\n", err_num);
+ return err_num;
+}
+
+/* Decide the portion of data chunk in dataset by process ID.
+ */
+
+static void
+get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[], hsize_t file_dims[], int size)
+{
+ int mpi_rank, mpi_size;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ if (chunk_origin != NULL) {
+ chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size / mpi_size);
+ chunk_origin[1] = 0;
+ }
+ if (chunk_dims != NULL) {
+ chunk_dims[0] = (hsize_t)(size / mpi_size);
+ chunk_dims[1] = (hsize_t)size;
+ }
+ if (file_dims != NULL)
+ file_dims[0] = file_dims[1] = (hsize_t)size;
+ if (count != NULL)
+ count[0] = count[1] = 1;
+}
+
+/*
+ * This function is based on bug demonstration code provided by Thomas
+ * Guignon(thomas.guignon@ifp.fr), and is intended to verify the
+ * correctness of my fix for that bug.
+ *
+ * In essence, the bug appeared when at least one process attempted to
+ * write a point selection -- for which collective I/O is not supported,
+ * and at least one other attempted to write some other type of selection
+ * for which collective I/O is supported.
+ *
+ * Since the processes did not compare notes before performing the I/O,
+ * some would attempt collective I/O while others performed independent
+ * I/O. A hang resulted.
+ *
+ * This function reproduces this situation. At present the test hangs
+ * on failure.
+ * JRM - 9/13/04
+ */
+
+#define N 4
+
+void
+io_mode_confusion(void)
+{
+ /*
+ * HDF5 APIs definitions
+ */
+
+ const int rank = 1;
+ const char *dataset_name = "IntArray";
+
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t filespace, memspace; /* file and memory dataspace */
+ /* identifiers */
+ hsize_t dimsf[1]; /* dataset dimensions */
+ int data[N] = {1}; /* pointer to data buffer to write */
+ hsize_t coord[N] = {0L, 1L, 2L, 3L};
+ hid_t plist_id; /* property list identifier */
+ herr_t status;
+
+ /*
+ * MPI variables
+ */
+
+ int mpi_size, mpi_rank;
+
+ /*
+ * test bed related variables
+ */
+
+ const char *fcn_name = "io_mode_confusion";
+ const hbool_t verbose = FALSE;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+
+#if 0
+ pt = GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
+
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id != -1), "H5Pcreate() failed");
+
+ status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
+ VRFY((status >= 0), "H5Pset_fapl_mpio() failed");
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name);
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ VRFY((file_id >= 0), "H5Fcreate() failed");
+
+ status = H5Pclose(plist_id);
+ VRFY((status >= 0), "H5Pclose() failed");
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n", mpi_rank, fcn_name);
+
+ dimsf[0] = N;
+ filespace = H5Screate_simple(rank, dimsf, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple() failed.");
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the dataset, and closing filespace.\n", mpi_rank, fcn_name);
+
+ dset_id =
+ H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2() failed");
+
+ status = H5Sclose(filespace);
+ VRFY((status >= 0), "H5Sclose() failed");
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n", mpi_rank, fcn_name);
+
+ memspace = H5Screate_simple(rank, dimsf, NULL);
+ VRFY((memspace >= 0), "H5Screate_simple() failed.");
+
+ if (mpi_rank == 0) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n", mpi_rank, fcn_name);
+
+ status = H5Sselect_all(memspace);
+ VRFY((status >= 0), "H5Sselect_all() failed");
+ }
+ else {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n", mpi_rank, fcn_name);
+
+ status = H5Sselect_none(memspace);
+ VRFY((status >= 0), "H5Sselect_none() failed");
+ }
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n", mpi_rank, fcn_name);
+
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "H5Dget_space() failed");
+
+ /* select all */
+ if (mpi_rank == 0) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n", mpi_rank, fcn_name);
+
+ status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t *)&coord);
+ VRFY((status >= 0), "H5Sselect_elements() failed");
+ }
+ else { /* select nothing */
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n", mpi_rank, fcn_name);
+
+ status = H5Sselect_none(filespace);
+ VRFY((status >= 0), "H5Sselect_none() failed");
+ }
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n", mpi_rank, fcn_name);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name);
+
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id != -1), "H5Pcreate() failed");
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n", mpi_rank, fcn_name);
+
+ status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "H5Pset_dxpl_mpio() failed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status >= 0), "set independent IO collectively succeeded");
+ }
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n", mpi_rank, fcn_name);
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace, plist_id, data);
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n", mpi_rank, fcn_name, status);
+ VRFY((status >= 0), "H5Dwrite() failed");
+
+ /*
+ * Close/release resources.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n", mpi_rank, fcn_name);
+
+ status = H5Dclose(dset_id);
+ VRFY((status >= 0), "H5Dclose() failed");
+
+ status = H5Sclose(filespace);
+ VRFY((status >= 0), "H5Dclose() failed");
+
+ status = H5Sclose(memspace);
+ VRFY((status >= 0), "H5Sclose() failed");
+
+ status = H5Pclose(plist_id);
+ VRFY((status >= 0), "H5Pclose() failed");
+
+ status = H5Fclose(file_id);
+ VRFY((status >= 0), "H5Fclose() failed");
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
+
+ return;
+
+} /* io_mode_confusion() */
+
+#undef N
+
+/*
+ * At present, the object header code maintains an image of its on disk
+ * representation, which is updates as necessary instead of generating on
+ * request.
+ *
+ * Prior to the fix that this test in designed to verify, the image of the
+ * on disk representation was only updated on flush -- not when the object
+ * header was marked clean.
+ *
+ * This worked perfectly well as long as all writes of a given object
+ * header were written from a single process. However, with the implementation
+ * of round robin metadata data writes in parallel HDF5, this is no longer
+ * the case -- it is possible for a given object header to be flushed from
+ * several different processes, with the object header simply being marked
+ * clean in all other processes on each flush. This resulted in NULL or
+ * out of data object header information being written to disk.
+ *
+ * To repair this, I modified the object header code to update its
+ * on disk image both on flush on when marked clean.
+ *
+ * This test is directed at verifying that the fix performs as expected.
+ *
+ * The test functions by creating a HDF5 file with several small datasets,
+ * and then flushing the file. This should result of at least one of
+ * the associated object headers being flushed by a process other than
+ * process 0.
+ *
+ * Then for each data set, add an attribute and flush the file again.
+ *
+ * Close the file and re-open it.
+ *
+ * Open the each of the data sets in turn. If all opens are successful,
+ * the test passes. Otherwise the test fails.
+ *
+ * Note that this test will probably become irrelevant shortly, when we
+ * land the journaling modifications on the trunk -- at which point all
+ * cache clients will have to construct on disk images on demand.
+ *
+ * JRM -- 10/13/10
+ */
+
+#define NUM_DATA_SETS 4
+#define LOCAL_DATA_SIZE 4
+#define LARGE_ATTR_SIZE 256
+/* Since all even and odd processes are split into writer and reader comm
+ * respectively, process 0 and 1 in COMM_WORLD become the root process of
+ * the writer and reader comm respectively.
+ */
+#define Writer_Root 0
+#define Reader_Root 1
+#define Reader_wait(mpi_err, xsteps) mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD)
+#define Reader_result(mpi_err, xsteps_done) \
+ mpi_err = MPI_Bcast(&xsteps_done, 1, MPI_INT, Reader_Root, MPI_COMM_WORLD)
+#define Reader_check(mpi_err, xsteps, xsteps_done) \
+ { \
+ Reader_wait(mpi_err, xsteps); \
+ Reader_result(mpi_err, xsteps_done); \
+ }
+
+/* object names used by both rr_obj_hdr_flush_confusion and
+ * rr_obj_hdr_flush_confusion_reader.
+ */
+const char *dataset_name[NUM_DATA_SETS] = {"dataset_0", "dataset_1", "dataset_2", "dataset_3"};
+const char *att_name[NUM_DATA_SETS] = {"attribute_0", "attribute_1", "attribute_2", "attribute_3"};
+const char *lg_att_name[NUM_DATA_SETS] = {"large_attribute_0", "large_attribute_1", "large_attribute_2",
+ "large_attribute_3"};
+
+void
+rr_obj_hdr_flush_confusion(void)
+{
+ /* MPI variables */
+ /* private communicator size and rank */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ int is_reader; /* 1 for reader process; 0 for writer process. */
+ MPI_Comm comm;
+
+ /* test bed related variables */
+ const char *fcn_name = "rr_obj_hdr_flush_confusion";
+ const hbool_t verbose = FALSE;
+
+ /* Create two new private communicators from MPI_COMM_WORLD.
+ * Even and odd ranked processes go to comm_writers and comm_readers
+ * respectively.
+ */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_FLUSH_REFRESH) || !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_ATTR_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file, dataset, attribute, dataset more, attribute more, or "
+ "file flush aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ HDassert(mpi_size > 2);
+
+ is_reader = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
+
+ /* The reader processes branches off to do reading
+ * while the writer processes continues to do writing
+ * Whenever writers finish one writing step, including a H5Fflush,
+ * they inform the readers, via MPI_COMM_WORLD, to verify.
+ * They will wait for the result from the readers before doing the next
+ * step. When all steps are done, they inform readers to end.
+ */
+ if (is_reader)
+ rr_obj_hdr_flush_confusion_reader(comm);
+ else
+ rr_obj_hdr_flush_confusion_writer(comm);
+
+ MPI_Comm_free(&comm);
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
+
+ return;
+
+} /* rr_obj_hdr_flush_confusion() */
+
+void
+rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
+{
+ int i;
+ int j;
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t att_id[NUM_DATA_SETS];
+ hid_t att_space[NUM_DATA_SETS];
+ hid_t lg_att_id[NUM_DATA_SETS];
+ hid_t lg_att_space[NUM_DATA_SETS];
+ hid_t disk_space[NUM_DATA_SETS];
+ hid_t mem_space[NUM_DATA_SETS];
+ hid_t dataset[NUM_DATA_SETS];
+ hsize_t att_size[1];
+ hsize_t lg_att_size[1];
+ hsize_t disk_count[1];
+ hsize_t disk_size[1];
+ hsize_t disk_start[1];
+ hsize_t mem_count[1];
+ hsize_t mem_size[1];
+ hsize_t mem_start[1];
+ herr_t err;
+ double data[LOCAL_DATA_SIZE];
+ double att[LOCAL_DATA_SIZE];
+ double lg_att[LARGE_ATTR_SIZE];
+
+ /* MPI variables */
+ /* world communication size and rank */
+ int mpi_world_size;
+ int mpi_world_rank;
+ /* private communicator size and rank */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ /* steps to verify and have been verified */
+ int steps = 0;
+ int steps_done = 0;
+
+ /* test bed related variables */
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
+ const hbool_t verbose = FALSE;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+
+ /*
+ * setup test bed related variables:
+ */
+
+#if 0
+ pt = (const H5Ptest_param_t *)GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_size(comm, &mpi_size);
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
+
+ err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
+ VRFY((err >= 0), "H5Pset_fapl_mpio() failed");
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n", mpi_rank, fcn_name, filename);
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate() failed");
+
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "H5Pclose(fapl_id) failed");
+
+ /*
+ * Step 1: create the data sets and write data.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Creating the datasets.\n", mpi_rank, fcn_name);
+
+ disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size);
+ mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+
+ disk_space[i] = H5Screate_simple(1, disk_size, NULL);
+ VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
+
+ dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE, disk_space[i], H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n");
+ }
+
+ /*
+ * setup data transfer property list
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
+
+ err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+
+ /*
+ * write data to the data sets
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Writing datasets.\n", mpi_rank, fcn_name);
+
+ disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
+ mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_start[0] = (hsize_t)(0);
+
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ data[j] = (double)(mpi_rank + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
+ mem_space[i] = H5Screate_simple(1, mem_size, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
+ err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id, data);
+ VRFY((err >= 0), "H5Dwrite(1) failed.\n");
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ data[j] *= 10.0;
+ }
+
+ /*
+ * close the data spaces
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sclose(disk_space[i]);
+ VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
+ err = H5Sclose(mem_space[i]);
+ VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
+ }
+
+ /* End of Step 1: create the data sets and write data. */
+
+ /*
+ * flush the metadata cache
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "H5Fflush(1) failed.\n");
+
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /*
+ * Step 2: write attributes to each dataset
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: writing attributes.\n", mpi_rank, fcn_name);
+
+ att_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ att[j] = (double)(j + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ att_space[i] = H5Screate_simple(1, att_size, NULL);
+ VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n");
+ att_id[i] =
+ H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE, att_space[i], H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n");
+ err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att);
+ VRFY((err >= 0), "H5Awrite(1) failed.\n");
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ att[j] /= 10.0;
+ }
+ }
+
+ /*
+ * close attribute IDs and spaces
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sclose(att_space[i]);
+ VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n");
+ err = H5Aclose(att_id[i]);
+ VRFY((err >= 0), "H5Aclose(att_id[i]) failed.\n");
+ }
+
+ /* End of Step 2: write attributes to each dataset */
+
+ /*
+ * flush the metadata cache again
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "H5Fflush(2) failed.\n");
+
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /*
+ * Step 3: write large attributes to each dataset
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: writing large attributes.\n", mpi_rank, fcn_name);
+
+ lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE);
+
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] = (double)(j + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL);
+ VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n");
+ lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE, lg_att_space[i], H5P_DEFAULT,
+ H5P_DEFAULT);
+ VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n");
+ err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
+ VRFY((err >= 0), "H5Awrite(2) failed.\n");
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] /= 10.0;
+ }
+ }
+
+ /* Step 3: write large attributes to each dataset */
+
+ /*
+ * flush the metadata cache yet again to clean the object headers.
+ *
+ * This is an attempt to create a situation where we have dirty
+ * object header continuation chunks, but clean object headers
+ * to verify a speculative bug fix -- it doesn't seem to work,
+ * but I will leave the code in anyway, as the object header
+ * code is going to change a lot in the near future.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "H5Fflush(3) failed.\n");
+
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /*
+ * Step 4: write different large attributes to each dataset
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: writing different large attributes.\n", mpi_rank, fcn_name);
+
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] = (double)(j + 2);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
+ VRFY((err >= 0), "H5Awrite(2) failed.\n");
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] /= 10.0;
+ }
+ }
+
+ /* End of Step 4: write different large attributes to each dataset */
+
+ /*
+ * flush the metadata cache again
+ */
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n", mpi_rank, fcn_name);
+ err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((err >= 0), "H5Fflush(3) failed.\n");
+
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /* Step 5: Close all objects and the file */
+
+ /*
+ * close large attribute IDs and spaces
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+
+ err = H5Sclose(lg_att_space[i]);
+ VRFY((err >= 0), "H5Sclose(lg_att_space[i]) failed.\n");
+ err = H5Aclose(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aclose(lg_att_id[i]) failed.\n");
+ }
+
+ /*
+ * close the data sets
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing datasets .\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Dclose(dataset[i]);
+ VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
+ }
+
+ /*
+ * close the data transfer property list.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
+
+ err = H5Pclose(dxpl_id);
+ VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
+
+ /*
+ * Close file.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing file.\n", mpi_rank, fcn_name);
+
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "H5Fclose(1) failed");
+
+ /* End of Step 5: Close all objects and the file */
+ /* Tell the reader to check the file up to steps. */
+ steps++;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ /* All done. Inform reader to end. */
+ steps = 0;
+ Reader_check(mrc, steps, steps_done);
+ VRFY((MPI_SUCCESS == mrc), "Reader_check failed");
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
+
+ return;
+
+} /* rr_obj_hdr_flush_confusion_writer() */
+
+void
+rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
+{
+ int i;
+ int j;
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t lg_att_id[NUM_DATA_SETS];
+ hid_t lg_att_type[NUM_DATA_SETS];
+ hid_t disk_space[NUM_DATA_SETS];
+ hid_t mem_space[NUM_DATA_SETS];
+ hid_t dataset[NUM_DATA_SETS];
+ hsize_t disk_count[1];
+ hsize_t disk_start[1];
+ hsize_t mem_count[1];
+ hsize_t mem_size[1];
+ hsize_t mem_start[1];
+ herr_t err;
+ htri_t tri_err;
+ double data[LOCAL_DATA_SIZE];
+ double data_read[LOCAL_DATA_SIZE];
+ double att[LOCAL_DATA_SIZE];
+ double att_read[LOCAL_DATA_SIZE];
+ double lg_att[LARGE_ATTR_SIZE];
+ double lg_att_read[LARGE_ATTR_SIZE];
+
+ /* MPI variables */
+ /* world communication size and rank */
+ int mpi_world_size;
+ int mpi_world_rank;
+ /* private communicator size and rank */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ int steps = -1; /* How far (steps) to verify the file */
+ int steps_done = -1; /* How far (steps) have been verified */
+
+ /* test bed related variables */
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
+ const hbool_t verbose = FALSE;
+#if 0
+ const H5Ptest_param_t *pt;
+#endif
+ char *filename;
+
+ /*
+ * setup test bed related variables:
+ */
+
+#if 0
+ pt = (const H5Ptest_param_t *)GetTestParameters();
+#endif
+ /* filename = pt->name; */ filename = PARATESTFILE;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_size(comm, &mpi_size);
+
+ /* Repeatedly re-open the file and verify its contents until it is */
+ /* told to end (when steps=0). */
+ while (steps_done != 0) {
+ Reader_wait(mrc, steps);
+ VRFY((mrc >= 0), "Reader_wait failed");
+ steps_done = 0;
+
+ if (steps > 0) {
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n", mpi_rank, fcn_name);
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
+ err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
+ VRFY((err >= 0), "H5Pset_fapl_mpio() failed");
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n", mpi_rank, fcn_name, filename);
+
+ file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
+ VRFY((file_id >= 0), "H5Fopen() failed");
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0), "H5Pclose(fapl_id) failed");
+
+#if 1
+ if (steps >= 1) {
+ /*=====================================================*
+ * Step 1: open the data sets and read data.
+ *=====================================================*/
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: opening the datasets.\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ dataset[i] = -1;
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
+ VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
+ disk_space[i] = H5Dget_space(dataset[i]);
+ VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
+ }
+
+ /*
+ * setup data transfer property list
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
+ err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((err >= 0), "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+
+ /*
+ * read data from the data sets
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
+
+ disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
+
+ mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+
+ mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_start[0] = (hsize_t)(0);
+
+ /* set up expected data for verification */
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ data[j] = (double)(mpi_rank + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start, NULL, disk_count,
+ NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
+ mem_space[i] = H5Screate_simple(1, mem_size, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET, mem_start, NULL, mem_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
+ err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i], disk_space[i], dxpl_id,
+ data_read);
+ VRFY((err >= 0), "H5Dread(1) failed.\n");
+
+ /* compare read data with expected data */
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])) {
+ HDfprintf(stdout,
+ "%0d:%s: Reading datasets value failed in "
+ "Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, data[j], data_read[j]);
+ nerrors++;
+ }
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ data[j] *= 10.0;
+ }
+
+ /*
+ * close the data spaces
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ err = H5Sclose(disk_space[i]);
+ VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
+ err = H5Sclose(mem_space[i]);
+ VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 1: open the data sets and read data. */
+#endif
+
+#if 1
+ /*=====================================================*
+ * Step 2: reading attributes from each dataset
+ *=====================================================*/
+
+ if (steps >= 2) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
+
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ att[j] = (double)(j + 1);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ hid_t att_id, att_type;
+
+ att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
+ VRFY((att_id >= 0), "H5Aopen failed.\n");
+ att_type = H5Aget_type(att_id);
+ VRFY((att_type >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err == 0) {
+ HDfprintf(stdout, "%0d:%s: Mismatched Attribute type of Dataset %d.\n", mpi_rank,
+ fcn_name, i);
+ nerrors++;
+ }
+ else {
+ /* should verify attribute size before H5Aread */
+ err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for (j = 0; j < LOCAL_DATA_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])) {
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched attribute data read in Dataset %d, at position "
+ "%d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, att[j], att_read[j]);
+ nerrors++;
+ }
+ for (j = 0; j < LOCAL_DATA_SIZE; j++) {
+ att[j] /= 10.0;
+ }
+ }
+ err = H5Aclose(att_id);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 2: reading attributes from each dataset */
+#endif
+
+#if 1
+ /*=====================================================*
+ * Step 3 or 4: read large attributes from each dataset.
+ * Step 4 has different attribute value from step 3.
+ *=====================================================*/
+
+ if (steps >= 3) {
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
+
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+ lg_att[j] = (steps == 3) ? (double)(j + 1) : (double)(j + 2);
+ }
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
+ VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
+ lg_att_type[i] = H5Aget_type(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err == 0) {
+ HDfprintf(stdout, "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
+ nerrors++;
+ }
+ else {
+ /* should verify large attribute size before H5Aread */
+ err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for (j = 0; j < LARGE_ATTR_SIZE; j++)
+ if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])) {
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched large attribute data read in Dataset %d, at "
+ "position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
+ nerrors++;
+ }
+ for (j = 0; j < LARGE_ATTR_SIZE; j++) {
+
+ lg_att[j] /= 10.0;
+ }
+ }
+ err = H5Tclose(lg_att_type[i]);
+ VRFY((err >= 0), "H5Tclose failed.\n");
+ err = H5Aclose(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ /* Both step 3 and 4 use this same read checking code. */
+ steps_done = (steps == 3) ? 3 : 4;
+ }
+
+ /* End of Step 3 or 4: read large attributes from each dataset */
+#endif
+
+ /*=====================================================*
+ * Step 5: read all objects from the file
+ *=====================================================*/
+ if (steps >= 5) {
+ /* nothing extra to verify. The file is closed normally. */
+ /* Just increment steps_done */
+ steps_done++;
+ }
+
+ /*
+ * Close the data sets
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing datasets again.\n", mpi_rank, fcn_name);
+
+ for (i = 0; i < NUM_DATA_SETS; i++) {
+ if (dataset[i] >= 0) {
+ err = H5Dclose(dataset[i]);
+ VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
+ }
+ }
+
+ /*
+ * close the data transfer property list.
+ */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
+
+ err = H5Pclose(dxpl_id);
+ VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
+
+ /*
+ * Close the file
+ */
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: closing file again.\n", mpi_rank, fcn_name);
+ err = H5Fclose(file_id);
+ VRFY((err >= 0), "H5Fclose(1) failed");
+
+ } /* else if (steps_done==0) */
+ Reader_result(mrc, steps_done);
+ } /* end while(1) */
+
+ if (verbose)
+ HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
+
+ return;
+} /* rr_obj_hdr_flush_confusion_reader() */
+
+#undef NUM_DATA_SETS
+#undef LOCAL_DATA_SIZE
+#undef LARGE_ATTR_SIZE
+#undef Reader_check
+#undef Reader_wait
+#undef Reader_result
+#undef Writer_Root
+#undef Reader_Root
+
+/*
+ * Test creating a chunked dataset in parallel in a file with an alignment set
+ * and an alignment threshold large enough to avoid aligning the chunks but
+ * small enough that the raw data aggregator will be aligned if it is treated as
+ * an object that must be aligned by the library
+ */
+#define CHUNK_SIZE 72
+#define NCHUNKS 32
+#define AGGR_SIZE 2048
+#define EXTRA_ALIGN 100
+
+void
+chunk_align_bug_1(void)
+{
+ int mpi_rank;
+ hid_t file_id, dset_id, fapl_id, dcpl_id, space_id;
+ hsize_t dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE;
+#if 0
+ h5_stat_size_t file_size;
+ hsize_t align;
+#endif
+ herr_t ret;
+ const char *filename;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+
+ /* Create file without alignment */
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ /* Close file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+#if 0
+ /* Get file size */
+ file_size = h5_get_file_size(filename, fapl_id);
+ VRFY((file_size >= 0), "h5_get_file_size succeeded");
+
+ /* Calculate alignment value, set to allow a chunk to squeak in between the
+ * original EOF and the aligned location of the aggregator. Add some space
+ * for the dataset metadata */
+ align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN;
+#endif
+
+ /* Set aggregator size and alignment, disable metadata aggregator */
+ HDassert(AGGR_SIZE > CHUNK_SIZE);
+ ret = H5Pset_small_data_block_size(fapl_id, AGGR_SIZE);
+ VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
+ ret = H5Pset_meta_block_size(fapl_id, 0);
+ VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");
+#if 0
+ ret = H5Pset_alignment(fapl_id, CHUNK_SIZE + 1, align);
+ VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
+#endif
+
+ /* Reopen file with new settings */
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+ VRFY((file_id >= 0), "H5Fopen succeeded");
+
+ /* Create dataset */
+ space_id = H5Screate_simple(1, &dims, NULL);
+ VRFY((space_id >= 0), "H5Screate_simple succeeded");
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dcpl_id, 1, &cdims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+ dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_CHAR, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /* Close ids */
+ ret = H5Dclose(dset_id);
+ VRFY((dset_id >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(space_id);
+ VRFY((space_id >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(dcpl_id);
+ VRFY((dcpl_id >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(fapl_id);
+ VRFY((fapl_id >= 0), "H5Pclose succeeded");
+
+ /* Close file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ return;
+} /* end chunk_align_bug_1() */
+
+/*=============================================================================
+ * End of t_mdset.c
+ *===========================================================================*/
diff --git a/testpar/API/t_ph5basic.c b/testpar/API/t_ph5basic.c
new file mode 100644
index 0000000..1639aff
--- /dev/null
+++ b/testpar/API/t_ph5basic.c
@@ -0,0 +1,192 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Test parallel HDF5 basic components
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+/*-------------------------------------------------------------------------
+ * Function: test_fapl_mpio_dup
+ *
+ * Purpose: Test if fapl_mpio property list keeps a duplicate of the
+ * communicator and INFO objects given when set; and returns
+ * duplicates of its components when H5Pget_fapl_mpio is called.
+ *
+ * Return: Success: None
+ * Failure: Abort
+ *
+ * Programmer: Albert Cheng
+ * January 9, 2003
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+test_fapl_mpio_dup(void)
+{
+ int mpi_size, mpi_rank;
+ MPI_Comm comm, comm_tmp;
+ int mpi_size_old, mpi_rank_old;
+ int mpi_size_tmp, mpi_rank_tmp;
+ MPI_Info info = MPI_INFO_NULL;
+ MPI_Info info_tmp = MPI_INFO_NULL;
+ int mrc; /* MPI return value */
+ hid_t acc_pl; /* File access properties */
+ herr_t ret; /* HDF5 return value */
+ int nkeys, nkeys_tmp;
+
+ if (VERBOSE_MED)
+ HDprintf("Verify fapl_mpio duplicates communicator and INFO objects\n");
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ if (VERBOSE_MED)
+ HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
+
+ /* Create a new communicator that has the same processes as MPI_COMM_WORLD.
+ * Use MPI_Comm_split because it is simpler than MPI_Comm_create
+ */
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
+ MPI_Comm_size(comm, &mpi_size_old);
+ MPI_Comm_rank(comm, &mpi_rank_old);
+ if (VERBOSE_MED)
+ HDprintf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);
+
+ /* create a new INFO object with some trivial information. */
+ mrc = MPI_Info_create(&info);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_create");
+ mrc = MPI_Info_set(info, "hdf_info_name", "XYZ");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_set");
+ if (MPI_INFO_NULL != info) {
+ mrc = MPI_Info_get_nkeys(info, &nkeys);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ }
+#if 0
+ if (VERBOSE_MED)
+ h5_dump_info_object(info);
+#endif
+
+ acc_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((acc_pl >= 0), "H5P_FILE_ACCESS");
+
+ ret = H5Pset_fapl_mpio(acc_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* Case 1:
+ * Free the created communicator and INFO object.
+ * Check if the access property list is still valid and can return
+ * valid communicator and INFO object.
+ */
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info) {
+ mrc = MPI_Info_free(&info);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
+ }
+
+ ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio");
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
+ if (VERBOSE_MED)
+ HDprintf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
+ VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
+ VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ VRFY((nkeys_tmp == nkeys), "new and old nkeys equal");
+ }
+#if 0
+ if (VERBOSE_MED)
+ h5_dump_info_object(info_tmp);
+#endif
+
+ /* Case 2:
+ * Free the retrieved communicator and INFO object.
+ * Check if the access property list is still valid and can return
+ * valid communicator and INFO object.
+ * Also verify the NULL argument option.
+ */
+ mrc = MPI_Comm_free(&comm_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
+ }
+
+ /* check NULL argument options. */
+ ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only");
+ mrc = MPI_Comm_free(&comm_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+
+ ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio Info only");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
+ }
+
+ ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio neither");
+
+ /* now get both and check validity too. */
+ /* Do not free the returned objects which are used in the next case. */
+ ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
+ VRFY((ret >= 0), "H5Pget_fapl_mpio");
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
+ if (VERBOSE_MED)
+ HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
+ VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
+ VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ VRFY((nkeys_tmp == nkeys), "new and old nkeys equal");
+ }
+#if 0
+ if (VERBOSE_MED)
+ h5_dump_info_object(info_tmp);
+#endif
+
+ /* Case 3:
+ * Close the property list and verify the retrieved communicator and INFO
+ * object are still valid.
+ */
+ H5Pclose(acc_pl);
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
+ if (VERBOSE_MED)
+ HDprintf("After Property list closed: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ }
+#if 0
+ if (VERBOSE_MED)
+ h5_dump_info_object(info_tmp);
+#endif
+
+ /* clean up */
+ mrc = MPI_Comm_free(&comm_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
+ }
+} /* end test_fapl_mpio_dup() */
diff --git a/testpar/API/t_prop.c b/testpar/API/t_prop.c
new file mode 100644
index 0000000..3659501
--- /dev/null
+++ b/testpar/API/t_prop.c
@@ -0,0 +1,646 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for encoding/decoding plists sent between processes
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#if 0
+#include "H5ACprivate.h"
+#include "H5Pprivate.h"
+#endif
+
+static int
+test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
+{
+ MPI_Request req[2];
+ MPI_Status status;
+ hid_t pl; /* Decoded property list */
+ size_t buf_size = 0;
+ void *sbuf = NULL;
+ herr_t ret; /* Generic return value */
+
+ if (mpi_rank == 0) {
+ int send_size = 0;
+
+ /* first call to encode returns only the size of the buffer needed */
+ ret = H5Pencode2(orig_pl, NULL, &buf_size, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Pencode succeeded");
+
+ sbuf = (uint8_t *)HDmalloc(buf_size);
+
+ ret = H5Pencode2(orig_pl, sbuf, &buf_size, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Pencode succeeded");
+
+ /* this is a temp fix to send this size_t */
+ send_size = (int)buf_size;
+
+ MPI_Isend(&send_size, 1, MPI_INT, recv_proc, 123, MPI_COMM_WORLD, &req[0]);
+ MPI_Isend(sbuf, send_size, MPI_BYTE, recv_proc, 124, MPI_COMM_WORLD, &req[1]);
+ } /* end if */
+
+ if (mpi_rank == recv_proc) {
+ int recv_size;
+ void *rbuf;
+
+ MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
+ VRFY((recv_size >= 0), "MPI_Recv succeeded");
+ buf_size = (size_t)recv_size;
+ rbuf = (uint8_t *)HDmalloc(buf_size);
+ MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
+
+ pl = H5Pdecode(rbuf);
+ VRFY((pl >= 0), "H5Pdecode succeeded");
+
+ VRFY(H5Pequal(orig_pl, pl), "Property List Equal Succeeded");
+
+ ret = H5Pclose(pl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ if (NULL != rbuf)
+ HDfree(rbuf);
+ } /* end if */
+
+ if (0 == mpi_rank) {
+ /* gcc 11 complains about passing MPI_STATUSES_IGNORE as an MPI_Status
+ * array. See the discussion here:
+ *
+ * https://github.com/pmodels/mpich/issues/5687
+ */
+ /* H5_GCC_DIAG_OFF("stringop-overflow") */
+ MPI_Waitall(2, req, MPI_STATUSES_IGNORE);
+ /* H5_GCC_DIAG_ON("stringop-overflow") */
+ }
+
+ if (NULL != sbuf)
+ HDfree(sbuf);
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ return 0;
+}
+
+void
+test_plist_ed(void)
+{
+ hid_t dcpl; /* dataset create prop. list */
+ hid_t dapl; /* dataset access prop. list */
+ hid_t dxpl; /* dataset transfer prop. list */
+ hid_t gcpl; /* group create prop. list */
+ hid_t lcpl; /* link create prop. list */
+ hid_t lapl; /* link access prop. list */
+ hid_t ocpypl; /* object copy prop. list */
+ hid_t ocpl; /* object create prop. list */
+ hid_t fapl; /* file access prop. list */
+ hid_t fcpl; /* file create prop. list */
+ hid_t strcpl; /* string create prop. list */
+ hid_t acpl; /* attribute create prop. list */
+
+ int mpi_size, mpi_rank, recv_proc;
+
+ hsize_t chunk_size = 16384; /* chunk size */
+ double fill = 2.7; /* Fill value */
+ size_t nslots = 521 * 2;
+ size_t nbytes = 1048576 * 10;
+ double w0 = 0.5;
+ unsigned max_compact;
+ unsigned min_dense;
+ hsize_t max_size[1]; /*data space maximum size */
+ const char *c_to_f = "x+32";
+ H5AC_cache_config_t my_cache_config = {H5AC__CURR_CACHE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ FALSE,
+ "temp",
+ TRUE,
+ FALSE,
+ (2 * 2048 * 1024),
+ 0.3,
+ (64 * 1024 * 1024),
+ (4 * 1024 * 1024),
+ 60000,
+ H5C_incr__threshold,
+ 0.8,
+ 3.0,
+ TRUE,
+ (8 * 1024 * 1024),
+ H5C_flash_incr__add_space,
+ 2.0,
+ 0.25,
+ H5C_decr__age_out_with_threshold,
+ 0.997,
+ 0.8,
+ TRUE,
+ (3 * 1024 * 1024),
+ 3,
+ FALSE,
+ 0.2,
+ (256 * 2048),
+ 1 /* H5AC__DEFAULT_METADATA_WRITE_STRATEGY */};
+
+ herr_t ret; /* Generic return value */
+
+ if (VERBOSE_MED)
+ HDprintf("Encode/Decode DCPLs\n");
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ if (mpi_size == 1)
+ recv_proc = 0;
+ else
+ recv_proc = 1;
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_chunk(dcpl, 1, &chunk_size);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_LATE);
+ VRFY((ret >= 0), "H5Pset_alloc_time succeeded");
+
+ ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
+ VRFY((ret >= 0), "set fill-value succeeded");
+
+ max_size[0] = 100;
+ ret = H5Pset_external(dcpl, "ext1.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext2.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext3.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+ ret = H5Pset_external(dcpl, "ext4.data", (off_t)0, (hsize_t)(max_size[0] * sizeof(int) / 4));
+ VRFY((ret >= 0), "set external succeeded");
+
+ ret = test_encode_decode(dcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE DAPLS *****/
+ dapl = H5Pcreate(H5P_DATASET_ACCESS);
+ VRFY((dapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_chunk_cache(dapl, nslots, nbytes, w0);
+ VRFY((ret >= 0), "H5Pset_chunk_cache succeeded");
+
+ ret = test_encode_decode(dapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE OCPLS *****/
+ ocpl = H5Pcreate(H5P_OBJECT_CREATE);
+ VRFY((ocpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_attr_creation_order(ocpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ VRFY((ret >= 0), "H5Pset_attr_creation_order succeeded");
+
+ ret = H5Pset_attr_phase_change(ocpl, 110, 105);
+ VRFY((ret >= 0), "H5Pset_attr_phase_change succeeded");
+
+ ret = H5Pset_filter(ocpl, H5Z_FILTER_FLETCHER32, 0, (size_t)0, NULL);
+ VRFY((ret >= 0), "H5Pset_filter succeeded");
+
+ ret = test_encode_decode(ocpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(ocpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE DXPLS *****/
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_btree_ratios(dxpl, 0.2, 0.6, 0.2);
+ VRFY((ret >= 0), "H5Pset_btree_ratios succeeded");
+
+ ret = H5Pset_hyper_vector_size(dxpl, 5);
+ VRFY((ret >= 0), "H5Pset_hyper_vector_size succeeded");
+
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt(dxpl, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl, 30);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
+
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl, 40);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
+
+ ret = H5Pset_edc_check(dxpl, H5Z_DISABLE_EDC);
+ VRFY((ret >= 0), "H5Pset_edc_check succeeded");
+
+ ret = H5Pset_data_transform(dxpl, c_to_f);
+ VRFY((ret >= 0), "H5Pset_data_transform succeeded");
+
+ ret = test_encode_decode(dxpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(dxpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE GCPLS *****/
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+ VRFY((gcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_local_heap_size_hint(gcpl, 256);
+ VRFY((ret >= 0), "H5Pset_local_heap_size_hint succeeded");
+
+ ret = H5Pset_link_phase_change(gcpl, 2, 2);
+ VRFY((ret >= 0), "H5Pset_link_phase_change succeeded");
+
+ /* Query the group creation properties */
+ ret = H5Pget_link_phase_change(gcpl, &max_compact, &min_dense);
+ VRFY((ret >= 0), "H5Pget_est_link_info succeeded");
+
+ ret = H5Pset_est_link_info(gcpl, 3, 9);
+ VRFY((ret >= 0), "H5Pset_est_link_info succeeded");
+
+ ret = H5Pset_link_creation_order(gcpl, (H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED));
+ VRFY((ret >= 0), "H5Pset_link_creation_order succeeded");
+
+ ret = test_encode_decode(gcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(gcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE LCPLS *****/
+ lcpl = H5Pcreate(H5P_LINK_CREATE);
+ VRFY((lcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_create_intermediate_group(lcpl, TRUE);
+ VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
+
+ ret = test_encode_decode(lcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(lcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE LAPLS *****/
+ lapl = H5Pcreate(H5P_LINK_ACCESS);
+ VRFY((lapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_nlinks(lapl, (size_t)134);
+ VRFY((ret >= 0), "H5Pset_nlinks succeeded");
+
+ ret = H5Pset_elink_acc_flags(lapl, H5F_ACC_RDONLY);
+ VRFY((ret >= 0), "H5Pset_elink_acc_flags succeeded");
+
+ ret = H5Pset_elink_prefix(lapl, "/tmpasodiasod");
+ VRFY((ret >= 0), "H5Pset_nlinks succeeded");
+
+ /* Create FAPL for the elink FAPL */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_alignment(fapl, 2, 1024);
+ VRFY((ret >= 0), "H5Pset_alignment succeeded");
+
+ ret = H5Pset_elink_fapl(lapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ /* Close the elink's FAPL */
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = test_encode_decode(lapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(lapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE OCPYPLS *****/
+ ocpypl = H5Pcreate(H5P_OBJECT_COPY);
+ VRFY((ocpypl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_copy_object(ocpypl, H5O_COPY_EXPAND_EXT_LINK_FLAG);
+ VRFY((ret >= 0), "H5Pset_copy_object succeeded");
+
+ ret = H5Padd_merge_committed_dtype_path(ocpypl, "foo");
+ VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");
+
+ ret = H5Padd_merge_committed_dtype_path(ocpypl, "bar");
+ VRFY((ret >= 0), "H5Padd_merge_committed_dtype_path succeeded");
+
+ ret = test_encode_decode(ocpypl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(ocpypl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE FAPLS *****/
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_family_offset(fapl, 1024);
+ VRFY((ret >= 0), "H5Pset_family_offset succeeded");
+
+ ret = H5Pset_meta_block_size(fapl, 2098452);
+ VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");
+
+ ret = H5Pset_sieve_buf_size(fapl, 1048576);
+ VRFY((ret >= 0), "H5Pset_sieve_buf_size succeeded");
+
+ ret = H5Pset_alignment(fapl, 2, 1024);
+ VRFY((ret >= 0), "H5Pset_alignment succeeded");
+
+ ret = H5Pset_cache(fapl, 1024, 128, 10485760, 0.3);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+
+ ret = H5Pset_elink_file_cache_size(fapl, 10485760);
+ VRFY((ret >= 0), "H5Pset_elink_file_cache_size succeeded");
+
+ ret = H5Pset_gc_references(fapl, 1);
+ VRFY((ret >= 0), "H5Pset_gc_references succeeded");
+
+ ret = H5Pset_small_data_block_size(fapl, 2048);
+ VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
+
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ VRFY((ret >= 0), "H5Pset_libver_bounds succeeded");
+
+ ret = H5Pset_fclose_degree(fapl, H5F_CLOSE_WEAK);
+ VRFY((ret >= 0), "H5Pset_fclose_degree succeeded");
+
+ ret = H5Pset_multi_type(fapl, H5FD_MEM_GHEAP);
+ VRFY((ret >= 0), "H5Pset_multi_type succeeded");
+
+ ret = H5Pset_mdc_config(fapl, &my_cache_config);
+ VRFY((ret >= 0), "H5Pset_mdc_config succeeded");
+
+ ret = test_encode_decode(fapl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE FCPLS *****/
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ VRFY((fcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_userblock(fcpl, 1024);
+ VRFY((ret >= 0), "H5Pset_userblock succeeded");
+
+ ret = H5Pset_istore_k(fcpl, 3);
+ VRFY((ret >= 0), "H5Pset_istore_k succeeded");
+
+ ret = H5Pset_sym_k(fcpl, 4, 5);
+ VRFY((ret >= 0), "H5Pset_sym_k succeeded");
+
+ ret = H5Pset_shared_mesg_nindexes(fcpl, 8);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_nindexes succeeded");
+
+ ret = H5Pset_shared_mesg_index(fcpl, 1, H5O_SHMESG_SDSPACE_FLAG, 32);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_index succeeded");
+
+ ret = H5Pset_shared_mesg_phase_change(fcpl, 60, 20);
+ VRFY((ret >= 0), "H5Pset_shared_mesg_phase_change succeeded");
+
+ ret = H5Pset_sizes(fcpl, 8, 4);
+ VRFY((ret >= 0), "H5Pset_sizes succeeded");
+
+ ret = test_encode_decode(fcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(fcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE STRCPLS *****/
+ strcpl = H5Pcreate(H5P_STRING_CREATE);
+ VRFY((strcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_char_encoding(strcpl, H5T_CSET_UTF8);
+ VRFY((ret >= 0), "H5Pset_char_encoding succeeded");
+
+ ret = test_encode_decode(strcpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(strcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /******* ENCODE/DECODE ACPLS *****/
+ acpl = H5Pcreate(H5P_ATTRIBUTE_CREATE);
+ VRFY((acpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_char_encoding(acpl, H5T_CSET_UTF8);
+ VRFY((ret >= 0), "H5Pset_char_encoding succeeded");
+
+ ret = test_encode_decode(acpl, mpi_rank, recv_proc);
+ VRFY((ret >= 0), "test_encode_decode succeeded");
+
+ ret = H5Pclose(acpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+}
+
+#if 0
+void
+external_links(void)
+{
+ hid_t lcpl = H5I_INVALID_HID; /* link create prop. list */
+ hid_t lapl = H5I_INVALID_HID; /* link access prop. list */
+ hid_t fapl = H5I_INVALID_HID; /* file access prop. list */
+ hid_t gapl = H5I_INVALID_HID; /* group access prop. list */
+ hid_t fid = H5I_INVALID_HID; /* file id */
+ hid_t group = H5I_INVALID_HID; /* group id */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm;
+ int doIO;
+ int i, mrc;
+
+ herr_t ret; /* Generic return value */
+ htri_t tri_status; /* tri return value */
+
+ const char *filename = "HDF5test.h5";
+ const char *filename_ext = "HDF5test_ext.h5";
+ const char *group_path = "/Base/Block/Step";
+ const char *link_name = "link"; /* external link */
+ char link_path[50];
+
+ if (VERBOSE_MED)
+ HDprintf("Check external links\n");
+
+ /* set up MPI parameters */
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Check MPI communicator access properties are passed to
+ linked external files */
+
+ if (mpi_rank == 0) {
+
+ lcpl = H5Pcreate(H5P_LINK_CREATE);
+ VRFY((lcpl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_create_intermediate_group(lcpl, 1);
+ VRFY((ret >= 0), "H5Pset_create_intermediate_group succeeded");
+
+ /* Create file to serve as target for external link.*/
+ fid = H5Fcreate(filename_ext, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gcreate succeeded");
+
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ /* Create a new file using the file access property list. */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ group = H5Gcreate2(fid, group_path, lcpl, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gcreate succeeded");
+
+ /* Create external links to the target files. */
+ ret = H5Lcreate_external(filename_ext, group_path, group, link_name, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Lcreate_external succeeded");
+
+ /* Close and release resources. */
+ ret = H5Pclose(lcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /*
+ * For the first case, use all the processes. For the second case
+ * use a sub-communicator to verify the correct communicator is
+ * being used for the externally linked files.
+ * There is no way to determine if MPI info is being used for the
+ * externally linked files.
+ */
+
+ for (i = 0; i < 2; i++) {
+
+ comm = MPI_COMM_WORLD;
+
+ if (i == 0)
+ doIO = 1;
+ else {
+ doIO = mpi_rank % 2;
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, doIO, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "");
+ }
+
+ if (doIO) {
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_fapl_mpio(fapl, comm, MPI_INFO_NULL);
+ VRFY((fapl >= 0), "H5Pset_fapl_mpio succeeded");
+
+ fid = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* test opening a group that is to an external link, the external linked
+ file should inherit the source file's access properties */
+ HDsnprintf(link_path, sizeof(link_path), "%s%s%s", group_path, "/", link_name);
+ group = H5Gopen2(fid, link_path, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Gopen succeeded");
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ /* test opening a group that is external link by setting group
+ creation property */
+ gapl = H5Pcreate(H5P_GROUP_ACCESS);
+ VRFY((gapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_elink_fapl(gapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ group = H5Gopen2(fid, link_path, gapl);
+ VRFY((group >= 0), "H5Gopen succeeded");
+
+ ret = H5Gclose(group);
+ VRFY((ret >= 0), "H5Gclose succeeded");
+
+ ret = H5Pclose(gapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* test link APIs */
+ lapl = H5Pcreate(H5P_LINK_ACCESS);
+ VRFY((lapl >= 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_elink_fapl(lapl, fapl);
+ VRFY((ret >= 0), "H5Pset_elink_fapl succeeded");
+
+ tri_status = H5Lexists(fid, link_path, H5P_DEFAULT);
+ VRFY((tri_status == TRUE), "H5Lexists succeeded");
+
+ tri_status = H5Lexists(fid, link_path, lapl);
+ VRFY((tri_status == TRUE), "H5Lexists succeeded");
+
+ group = H5Oopen(fid, link_path, H5P_DEFAULT);
+ VRFY((group >= 0), "H5Oopen succeeded");
+
+ ret = H5Oclose(group);
+ VRFY((ret >= 0), "H5Oclose succeeded");
+
+ group = H5Oopen(fid, link_path, lapl);
+ VRFY((group >= 0), "H5Oopen succeeded");
+
+ ret = H5Oclose(group);
+ VRFY((ret >= 0), "H5Oclose succeeded");
+
+ ret = H5Pclose(lapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* close the remaining resources */
+
+ ret = H5Pclose(fapl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ if (comm != MPI_COMM_WORLD) {
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free succeeded");
+ }
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* delete the test files */
+ if (mpi_rank == 0) {
+ MPI_File_delete(filename, MPI_INFO_NULL);
+ MPI_File_delete(filename_ext, MPI_INFO_NULL);
+ }
+}
+#endif
diff --git a/testpar/API/t_pshutdown.c b/testpar/API/t_pshutdown.c
new file mode 100644
index 0000000..48a8005
--- /dev/null
+++ b/testpar/API/t_pshutdown.c
@@ -0,0 +1,150 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Mohamad Chaarawi
+ * February 2015
+ *
+ * Purpose: This test creates a file and a bunch of objects in the
+ * file and then calls MPI_Finalize without closing anything. The
+ * library should exercise the attribute callback destroy attached to
+ * MPI_COMM_SELF and terminate the HDF5 library closing all open
+ * objects. The t_prestart test will read back the file and make sure
+ * all created objects are there.
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+int nerrors = 0; /* errors count */
+
+const char *FILENAME[] = {"shutdown.h5", NULL};
+
+int
+main(int argc, char **argv)
+{
+ hid_t file_id, dset_id, grp_id;
+ hid_t fapl, sid, mem_dataspace;
+ hsize_t dims[RANK], i;
+ herr_t ret;
+#if 0
+ char filename[1024];
+#endif
+ int mpi_size, mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ DATATYPE *data_array = NULL; /* data buffer */
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ if (MAINPROCESS) {
+ printf("Testing %-62s", "proper shutdown of HDF5 library");
+ fflush(stdout);
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ /* Get the capability flag of the VOL connector being used */
+ ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g);
+ VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded");
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_GROUP_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ HDprintf(
+ " API functions for basic file, group, or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ MPI_Finalize();
+ return 0;
+ }
+
+ ret = H5Pset_fapl_mpio(fapl, comm, info);
+ VRFY((ret >= 0), "");
+
+#if 0
+ h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
+#endif
+ file_id = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+ grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((grp_id >= 0), "H5Gcreate succeeded");
+
+ dims[0] = (hsize_t)ROW_FACTOR * (hsize_t)mpi_size;
+ dims[1] = (hsize_t)COL_FACTOR * (hsize_t)mpi_size;
+ sid = H5Screate_simple(RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ dset_id = H5Dcreate2(grp_id, "Dataset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate succeeded");
+
+ /* allocate memory for data buffer */
+ data_array = (DATATYPE *)HDmalloc(dims[0] * dims[1] * sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0] / (hsize_t)mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+
+ /* put some trivial data in the data_array */
+ for (i = 0; i < dims[0] * dims[1]; i++)
+ data_array[i] = mpi_rank + 1;
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple(RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* write data independently */
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release data buffers */
+ if (data_array)
+ HDfree(data_array);
+
+ MPI_Finalize();
+
+ /* nerrors += GetTestNumErrs(); */
+
+ if (MAINPROCESS) {
+ if (0 == nerrors) {
+ puts(" PASSED");
+ fflush(stdout);
+ }
+ else {
+ puts("*FAILED*");
+ fflush(stdout);
+ }
+ }
+
+ return (nerrors != 0);
+}
diff --git a/testpar/API/t_shapesame.c b/testpar/API/t_shapesame.c
new file mode 100644
index 0000000..340e89e
--- /dev/null
+++ b/testpar/API/t_shapesame.c
@@ -0,0 +1,4516 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ This program will test independent and collective reads and writes between
+ selections of different rank that non-the-less are deemed as having the
+ same shape by H5Sselect_shape_same().
+ */
+
+#define H5S_FRIEND /*suppress error about including H5Spkg */
+
+/* Define this macro to indicate that the testing APIs should be available */
+#define H5S_TESTING
+
+#if 0
+#include "H5Spkg.h" /* Dataspaces */
+#endif
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+/* FILENAME and filenames must have the same number of names.
+ * Use PARATESTFILE in general and use a separated filename only if the file
+ * created in one test is accessed by a different test.
+ * filenames[0] is reserved as the file name for PARATESTFILE.
+ */
+#define NFILENAME 2
+const char *FILENAME[NFILENAME] = {"ShapeSameTest.h5", NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
+
+/* On Lustre (and perhaps other parallel file systems?), we have severe
+ * slow downs if two or more processes attempt to access the same file system
+ * block. To minimize this problem, we set alignment in the shape same tests
+ * to the default Lustre block size -- which greatly reduces contention in
+ * the chunked dataset case.
+ */
+
+#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
+
+#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
+
+struct hs_dr_pio_test_vars_t {
+ int mpi_size;
+ int mpi_rank;
+ MPI_Comm mpi_comm;
+ MPI_Info mpi_info;
+ int test_num;
+ int edge_size;
+ int checker_edge_size;
+ int chunk_edge_size;
+ int small_rank;
+ int large_rank;
+ hid_t dset_type;
+ uint32_t *small_ds_buf_0;
+ uint32_t *small_ds_buf_1;
+ uint32_t *small_ds_buf_2;
+ uint32_t *small_ds_slice_buf;
+ uint32_t *large_ds_buf_0;
+ uint32_t *large_ds_buf_1;
+ uint32_t *large_ds_buf_2;
+ uint32_t *large_ds_slice_buf;
+ int small_ds_offset;
+ int large_ds_offset;
+ hid_t fid; /* HDF5 file ID */
+ hid_t xfer_plist;
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid_0;
+ hid_t file_small_ds_sid_1;
+ hid_t small_ds_slice_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid_0;
+ hid_t file_large_ds_sid_1;
+ hid_t file_large_ds_process_slice_sid;
+ hid_t mem_large_ds_process_slice_sid;
+ hid_t large_ds_slice_sid;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ size_t small_ds_size;
+ size_t small_ds_slice_size;
+ size_t large_ds_size;
+ size_t large_ds_slice_size;
+ hsize_t dims[PAR_SS_DR_MAX_RANK];
+ hsize_t chunk_dims[PAR_SS_DR_MAX_RANK];
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ hsize_t *start_ptr;
+ hsize_t *stride_ptr;
+ hsize_t *count_ptr;
+ hsize_t *block_ptr;
+ int skips;
+ int max_skips;
+ int64_t total_tests;
+ int64_t tests_run;
+ int64_t tests_skipped;
+};
+
+/*-------------------------------------------------------------------------
+ * Function: hs_dr_pio_test__setup()
+ *
+ * Purpose: Do setup for tests of I/O to/from hyperslab selections of
+ * different rank in the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/9/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG 0
+
+static void
+hs_dr_pio_test__setup(const int test_num, const int edge_size, const int checker_edge_size,
+ const int chunk_edge_size, const int small_rank, const int large_rank,
+ const hbool_t use_collective_io, const hid_t dset_type, const int express_test,
+ struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
+ const char *fcnName = "hs_dr_pio_test__setup()";
+#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
+ const char *filename;
+ hbool_t mis_match = FALSE;
+ int i;
+ int mrc;
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
+ uint32_t *ptr_0;
+ uint32_t *ptr_1;
+ hid_t acc_tpl; /* File access templates */
+ hid_t small_ds_dcpl_id = H5P_DEFAULT;
+ hid_t large_ds_dcpl_id = H5P_DEFAULT;
+ herr_t ret; /* Generic return value */
+
+ HDassert(edge_size >= 6);
+ HDassert(edge_size >= chunk_edge_size);
+ HDassert((chunk_edge_size == 0) || (chunk_edge_size >= 3));
+ HDassert(1 < small_rank);
+ HDassert(small_rank < large_rank);
+ HDassert(large_rank <= PAR_SS_DR_MAX_RANK);
+
+ tv_ptr->test_num = test_num;
+ tv_ptr->edge_size = edge_size;
+ tv_ptr->checker_edge_size = checker_edge_size;
+ tv_ptr->chunk_edge_size = chunk_edge_size;
+ tv_ptr->small_rank = small_rank;
+ tv_ptr->large_rank = large_rank;
+ tv_ptr->dset_type = dset_type;
+
+ MPI_Comm_size(MPI_COMM_WORLD, &(tv_ptr->mpi_size));
+ MPI_Comm_rank(MPI_COMM_WORLD, &(tv_ptr->mpi_rank));
+ /* the VRFY() macro needs the local variable mpi_rank -- set it up now */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ HDassert(tv_ptr->mpi_size >= 1);
+
+ tv_ptr->mpi_comm = MPI_COMM_WORLD;
+ tv_ptr->mpi_info = MPI_INFO_NULL;
+
+ for (i = 0; i < tv_ptr->small_rank - 1; i++) {
+ tv_ptr->small_ds_size *= (size_t)(tv_ptr->edge_size);
+ tv_ptr->small_ds_slice_size *= (size_t)(tv_ptr->edge_size);
+ }
+ tv_ptr->small_ds_size *= (size_t)(tv_ptr->mpi_size + 1);
+
+ /* used by checker board tests only */
+ tv_ptr->small_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->small_rank;
+
+ HDassert(0 < tv_ptr->small_ds_offset);
+ HDassert(tv_ptr->small_ds_offset < PAR_SS_DR_MAX_RANK);
+
+ for (i = 0; i < tv_ptr->large_rank - 1; i++) {
+
+ tv_ptr->large_ds_size *= (size_t)(tv_ptr->edge_size);
+ tv_ptr->large_ds_slice_size *= (size_t)(tv_ptr->edge_size);
+ }
+ tv_ptr->large_ds_size *= (size_t)(tv_ptr->mpi_size + 1);
+
+ /* used by checker board tests only */
+ tv_ptr->large_ds_offset = PAR_SS_DR_MAX_RANK - tv_ptr->large_rank;
+
+ HDassert(0 <= tv_ptr->large_ds_offset);
+ HDassert(tv_ptr->large_ds_offset < PAR_SS_DR_MAX_RANK);
+
+ /* set up the start, stride, count, and block pointers */
+ /* used by contiguous tests only */
+ tv_ptr->start_ptr = &(tv_ptr->start[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
+ tv_ptr->stride_ptr = &(tv_ptr->stride[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
+ tv_ptr->count_ptr = &(tv_ptr->count[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
+ tv_ptr->block_ptr = &(tv_ptr->block[PAR_SS_DR_MAX_RANK - tv_ptr->large_rank]);
+
+ /* Allocate buffers */
+ tv_ptr->small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
+ VRFY((tv_ptr->small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
+
+ tv_ptr->small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
+ VRFY((tv_ptr->small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
+
+ tv_ptr->small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
+ VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded");
+
+ tv_ptr->small_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+ VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded");
+
+ tv_ptr->large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
+ VRFY((tv_ptr->large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
+
+ tv_ptr->large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
+ VRFY((tv_ptr->large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
+
+ tv_ptr->large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
+ VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded");
+
+ tv_ptr->large_ds_slice_buf = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
+ VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded");
+
+ /* initialize the buffers */
+
+ ptr_0 = tv_ptr->small_ds_buf_0;
+ for (i = 0; i < (int)(tv_ptr->small_ds_size); i++)
+ *ptr_0++ = (uint32_t)i;
+ HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
+ HDmemset(tv_ptr->small_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
+
+ HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+
+ ptr_0 = tv_ptr->large_ds_buf_0;
+ for (i = 0; i < (int)(tv_ptr->large_ds_size); i++)
+ *ptr_0++ = (uint32_t)i;
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+ HDmemset(tv_ptr->large_ds_buf_2, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+ HDmemset(tv_ptr->large_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
+
+ filename = filenames[0]; /* (const char *)GetTestParameters(); */
+ HDassert(filename != NULL);
+#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
+ if (MAINPROCESS) {
+
+ HDfprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num);
+ HDfprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size);
+ HDfprintf(stdout, "%d: small/large rank = %d/%d, use_collective_io = %d.\n", tv_ptr->mpi_rank,
+ tv_ptr->small_rank, tv_ptr->large_rank, (int)use_collective_io);
+ HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->edge_size,
+ tv_ptr->chunk_edge_size);
+ HDfprintf(stdout, "%d: checker_edge_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->checker_edge_size);
+ HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n", tv_ptr->mpi_rank,
+ (int)(tv_ptr->small_ds_size), (int)(tv_ptr->large_ds_size));
+ HDfprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(tv_ptr->mpi_comm, tv_ptr->mpi_info, facc_type);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* set the alignment -- need it large so that we aren't always hitting the
+ * the same file system block. Do this only if express_test is greater
+ * than zero.
+ */
+ if (express_test > 0) {
+
+ ret = H5Pset_alignment(acc_tpl, (hsize_t)0, SHAPE_SAME_TEST_ALIGNMENT);
+ VRFY((ret != FAIL), "H5Pset_alignment() succeeded");
+ }
+
+ /* create the file collectively */
+ tv_ptr->fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((tv_ptr->fid >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+ /* setup dims: */
+ tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1);
+ tv_ptr->dims[1] = tv_ptr->dims[2] = tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size);
+
+ /* Create small ds dataspaces */
+ tv_ptr->full_mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded");
+
+ tv_ptr->full_file_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded");
+
+ tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded");
+
+ tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_small_ds_sid_0 != 0), "H5Screate_simple() file_small_ds_sid_0 succeeded");
+
+ /* used by checker board tests only */
+ tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_small_ds_sid_1 != 0), "H5Screate_simple() file_small_ds_sid_1 succeeded");
+
+ tv_ptr->small_ds_slice_sid = H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL);
+ VRFY((tv_ptr->small_ds_slice_sid != 0), "H5Screate_simple() small_ds_slice_sid succeeded");
+
+ /* Create large ds dataspaces */
+ tv_ptr->full_mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded");
+
+ tv_ptr->full_file_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->full_file_large_ds_sid != FAIL), "H5Screate_simple() full_file_large_ds_sid succeeded");
+
+ tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->mem_large_ds_sid != FAIL), "H5Screate_simple() mem_large_ds_sid succeeded");
+
+ tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_large_ds_sid_0 != FAIL), "H5Screate_simple() file_large_ds_sid_0 succeeded");
+
+ /* used by checker board tests only */
+ tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_large_ds_sid_1 != FAIL), "H5Screate_simple() file_large_ds_sid_1 succeeded");
+
+ tv_ptr->mem_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL),
+ "H5Screate_simple() mem_large_ds_process_slice_sid succeeded");
+
+ tv_ptr->file_large_ds_process_slice_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
+ VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL),
+ "H5Screate_simple() file_large_ds_process_slice_sid succeeded");
+
+ tv_ptr->large_ds_slice_sid = H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL);
+ VRFY((tv_ptr->large_ds_slice_sid != 0), "H5Screate_simple() large_ds_slice_sid succeeded");
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if (tv_ptr->chunk_edge_size > 0) {
+
+ /* Under Lustre (and perhaps other parallel file systems?) we get
+ * locking delays when two or more processes attempt to access the
+ * same file system block.
+ *
+ * To minimize this problem, I have changed chunk_dims[0]
+ * from (mpi_size + 1) to just when any sort of express test is
+ * selected. Given the structure of the test, and assuming we
+ * set the alignment large enough, this avoids the contention
+ * issue by seeing to it that each chunk is only accessed by one
+ * process.
+ *
+ * One can argue as to whether this is a good thing to do in our
+ * tests, but for now it is necessary if we want the test to complete
+ * in a reasonable amount of time.
+ *
+ * JRM -- 9/16/10
+ */
+
+ tv_ptr->chunk_dims[0] = 1;
+
+ tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] = tv_ptr->chunk_dims[3] = tv_ptr->chunk_dims[4] =
+ (hsize_t)(tv_ptr->chunk_edge_size);
+
+ small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(small_ds_dcpl_id, tv_ptr->small_rank, tv_ptr->chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+ large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(large_ds_dcpl_id, tv_ptr->large_rank, tv_ptr->chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
+ }
+
+ /* create the small dataset */
+ tv_ptr->small_dataset =
+ H5Dcreate2(tv_ptr->fid, "small_dataset", tv_ptr->dset_type, tv_ptr->file_small_ds_sid_0, H5P_DEFAULT,
+ small_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret != FAIL), "H5Dcreate2() small_dataset succeeded");
+
+ /* create the large dataset */
+ tv_ptr->large_dataset =
+ H5Dcreate2(tv_ptr->fid, "large_dataset", tv_ptr->dset_type, tv_ptr->file_large_ds_sid_0, H5P_DEFAULT,
+ large_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret != FAIL), "H5Dcreate2() large_dataset succeeded");
+
+ /* setup xfer property list */
+ tv_ptr->xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((tv_ptr->xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ if (use_collective_io) {
+ ret = H5Pset_dxpl_mpio(tv_ptr->xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+
+ /* setup selection to write initial data to the small and large data sets */
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ /* setup selections for writing initial data to the small data set */
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size);
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, or) succeeded");
+ }
+
+ /* write the initial value of the small data set to file */
+ ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
+
+ VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set and verifies it.
+ */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_small_ds_sid,
+ tv_ptr->full_file_small_ds_sid, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
+
+ /* verify that the correct data was written to the small data set */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->small_ds_buf_1;
+
+ i = 0;
+ for (i = 0; i < (int)(tv_ptr->small_ds_size); i++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY((mis_match == FALSE), "small ds init data good.");
+
+ /* setup selections for writing initial data to the large data set */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded");
+
+ /* In passing, setup the process slice dataspaces as well */
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
+ tv_ptr->stride, tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid, H5S_SELECT_SET, tv_ptr->start,
+ tv_ptr->stride, tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) succeeded");
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_size);
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_OR, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, or) succeeded");
+ }
+
+ /* write the initial value of the large data set to file */
+ ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
+
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set.
+ */
+ ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->full_mem_large_ds_sid,
+ tv_ptr->full_file_large_ds_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
+
+ /* verify that the correct data was written to the large data set */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->large_ds_buf_1;
+
+ i = 0;
+ for (i = 0; i < (int)(tv_ptr->large_ds_size); i++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY((mis_match == FALSE), "large ds init data good.");
+
+ /* sync with the other processes before changing data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync initial values check");
+
+ return;
+
+} /* hs_dr_pio_test__setup() */
+
+/*-------------------------------------------------------------------------
+ * Function: hs_dr_pio_test__takedown()
+ *
+ * Purpose: Do takedown after tests of I/O to/from hyperslab selections
+ * of different rank in the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define HS_DR_PIO_TEST__TAKEDOWN__DEBUG 0
+
+static void
+hs_dr_pio_test__takedown(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG
+ const char *fcnName = "hs_dr_pio_test__takedown()";
+#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */
+ int mpi_rank; /* needed by the VRFY macro */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* Close property lists */
+ if (tv_ptr->xfer_plist != H5P_DEFAULT) {
+ ret = H5Pclose(tv_ptr->xfer_plist);
+ VRFY((ret != FAIL), "H5Pclose(xfer_plist) succeeded");
+ }
+
+ /* Close dataspaces */
+ ret = H5Sclose(tv_ptr->full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_small_ds_sid_0);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_0) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_small_ds_sid_1);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid_1) succeeded");
+
+ ret = H5Sclose(tv_ptr->small_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(small_ds_slice_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_large_ds_sid_0);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_0) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_large_ds_sid_1);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid_1) succeeded");
+
+ ret = H5Sclose(tv_ptr->mem_large_ds_process_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_process_slice_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->file_large_ds_process_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_process_slice_sid) succeeded");
+
+ ret = H5Sclose(tv_ptr->large_ds_slice_sid);
+ VRFY((ret != FAIL), "H5Sclose(large_ds_slice_sid) succeeded");
+
+ /* Close Datasets */
+ ret = H5Dclose(tv_ptr->small_dataset);
+ VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
+
+ ret = H5Dclose(tv_ptr->large_dataset);
+ VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
+
+ /* close the file collectively */
+ MESG("about to close file.");
+ ret = H5Fclose(tv_ptr->fid);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ /* Free memory buffers */
+
+ if (tv_ptr->small_ds_buf_0 != NULL)
+ HDfree(tv_ptr->small_ds_buf_0);
+ if (tv_ptr->small_ds_buf_1 != NULL)
+ HDfree(tv_ptr->small_ds_buf_1);
+ if (tv_ptr->small_ds_buf_2 != NULL)
+ HDfree(tv_ptr->small_ds_buf_2);
+ if (tv_ptr->small_ds_slice_buf != NULL)
+ HDfree(tv_ptr->small_ds_slice_buf);
+
+ if (tv_ptr->large_ds_buf_0 != NULL)
+ HDfree(tv_ptr->large_ds_buf_0);
+ if (tv_ptr->large_ds_buf_1 != NULL)
+ HDfree(tv_ptr->large_ds_buf_1);
+ if (tv_ptr->large_ds_buf_2 != NULL)
+ HDfree(tv_ptr->large_ds_buf_2);
+ if (tv_ptr->large_ds_slice_buf != NULL)
+ HDfree(tv_ptr->large_ds_slice_buf);
+
+ return;
+
+} /* hs_dr_pio_test__takedown() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__d2m_l2s()
+ *
+ * Purpose: Part one of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * In this function, we test this by reading small_rank - 1
+ * slices from the on disk large cube, and verifying that the
+ * data read is correct. Verify that H5Sselect_shape_same()
+ * returns true on the memory and file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/10/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__run_test()";
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* We have already done a H5Sselect_all() on the dataspace
+ * small_ds_slice_sid in the initialization phase, so no need to
+ * call H5Sselect_all() again.
+ */
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the buffer we will be reading into */
+ HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s reading slices from big cube on disk into small cube slice.\n", fcnName);
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set. However, in the parallel version, each
+ * process only works with that slice of the large cube indicated
+ * by its rank -- hence we set the most slowly changing index to
+ * mpi_rank, and don't iterate over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
+ * of this function. Thus no need for another inner loop.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_cube_sid) succeeded");
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* Read selection from disk */
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName,
+ H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+ ret =
+ H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf);
+ VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
+
+ /* verify that expected data is retrieved */
+
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->small_ds_slice_buf;
+ expected_value =
+ (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ for (n = 0; n < tv_ptr->small_ds_slice_size; n++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = 0; /* zero data for next use */
+
+ ptr_1++;
+ expected_value++;
+ }
+
+ VRFY((mis_match == FALSE), "small slice read from large ds data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* contig_hs_dr_pio_test__d2m_l2s() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__d2m_s2l()
+ *
+ * Purpose: Part two of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * In this function, we test this by reading slices of the
+ * on disk small data set into slices through the in memory
+ * large data set, and verify that the correct data (and
+ * only the correct data) is read.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/10/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()";
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* Read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
+
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+
+ /* zero out the in memory large ds */
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't iterate
+ * over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* Read selection from disk */
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+ start_index =
+ (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->large_ds_size);
+
+ for (n = 0; n < tv_ptr->large_ds_size; n++) {
+
+ if ((n >= start_index) && (n <= stop_index)) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+ expected_value++;
+ }
+ else {
+
+ if (*ptr_1 != 0) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE), "small slice read from large ds data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* contig_hs_dr_pio_test__d2m_s2l() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__m2d_l2s()
+ *
+ * Purpose: Part three of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * Do this by writing small_rank - 1 dimensional slices from
+ * the in memory large data set to the on disk small cube
+ * dataset. After each write, read the slice of the small
+ * dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same()
+ * returns true on the memory and file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/10/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()";
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 dimensional slices from the in memory large
+ * data set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the in memory small ds */
+ HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
+
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s writing slices from big ds to slices of small ds on disk.\n", fcnName);
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't iterate
+ * over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ j = 0;
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out this rank's slice of the on disk small data set */
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2);
+ VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
+
+ /* select the portion of the in memory large cube from which we
+ * are going to write data.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret >= 0), "H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory slice through the cube selection and the
+ * on disk full square selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+
+ /* write the slice from the in memory large data set to the
+ * slice of the on disk small dataset. */
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
+
+ /* read the on disk square into memory */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+ /* verify that expected data is retrieved */
+
+ mis_match = FALSE;
+ ptr_1 = tv_ptr->small_ds_buf_1;
+
+ expected_value =
+ (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->small_ds_size);
+
+ for (n = 0; n < tv_ptr->small_ds_size; n++) {
+
+ if ((n >= start_index) && (n <= stop_index)) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+ expected_value++;
+ }
+ else {
+
+ if (*ptr_1 != 0) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE), "small slice write from large ds data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* contig_hs_dr_pio_test__m2d_l2s() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__m2d_s2l()
+ *
+ * Purpose: Part four of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * Do this by writing the contents of the process's slice of
+ * the in memory small data set to slices of the on disk
+ * large data set. After each write, read the process's
+ * slice of the large data set back into memory, and verify
+ * that it contains the expected data.
+ *
+ * Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/10/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()";
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+ /* select the slice of the in memory small data set associated with
+ * the process's mpi rank.
+ */
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to write slices of the small data set to
+ * slices of the large data set.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the in memory large ds */
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s writing process slices of small ds to slices of large ds on disk.\n", fcnName);
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDfprintf(stdout, "%s:%d: skipping test with start = %d %d %d %d %d.\n", fcnName,
+ (int)(tv_ptr->mpi_rank), (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* Zero out this processes slice of the on disk large data set.
+ * Note that this will leave one slice with its original data
+ * as there is one more slice than processes.
+ */
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->large_ds_slice_sid,
+ tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist,
+ tv_ptr->large_ds_buf_2);
+ VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded");
+
+ /* select the portion of the in memory large cube to which we
+ * are going to write data.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start_ptr,
+ tv_ptr->stride_ptr, tv_ptr->count_ptr, tv_ptr->block_ptr);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab() target large ds slice succeeded");
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory small data set slice selection and the
+ * on disk slice through the large data set selection
+ * as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
+ */
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]), (int)(tv_ptr->start[2]),
+ (int)(tv_ptr->start[3]), (int)(tv_ptr->start[4]));
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
+ VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded");
+
+ /* read this processes slice on the on disk large
+ * data set into memory.
+ */
+
+ ret = H5Dread(
+ tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_process_slice_sid,
+ tv_ptr->file_large_ds_process_slice_sid, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+
+ start_index =
+ (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index < tv_ptr->large_ds_size);
+
+ for (n = 0; n < tv_ptr->large_ds_size; n++) {
+
+ if ((n >= start_index) && (n <= stop_index)) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+
+ expected_value++;
+ }
+ else {
+
+ if (*ptr_1 != 0) {
+
+ mis_match = TRUE;
+ }
+ }
+ /* zero out buffer for next test */
+ *ptr_1 = 0;
+ ptr_1++;
+ }
+
+ VRFY((mis_match == FALSE), "small ds slice write to large ds slice data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* contig_hs_dr_pio_test__m2d_s2l() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test__run_test()
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0
+
+static void
+contig_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int chunk_edge_size,
+ const int small_rank, const int large_rank, const hbool_t use_collective_io,
+ const hid_t dset_type, int express_test, int *skips_ptr, int max_skips,
+ int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr,
+ int mpi_rank)
+{
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ const char *fcnName = "contig_hs_dr_pio_test__run_test()";
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ struct hs_dr_pio_test_vars_t test_vars = {
+ /* int mpi_size = */ -1,
+ /* int mpi_rank = */ -1,
+ /* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
+ /* MPI_Inf mpi_info = */ MPI_INFO_NULL,
+ /* int test_num = */ -1,
+ /* int edge_size = */ -1,
+ /* int checker_edge_size = */ -1,
+ /* int chunk_edge_size = */ -1,
+ /* int small_rank = */ -1,
+ /* int large_rank = */ -1,
+ /* hid_t dset_type = */ -1,
+ /* uint32_t * small_ds_buf_0 = */ NULL,
+ /* uint32_t * small_ds_buf_1 = */ NULL,
+ /* uint32_t * small_ds_buf_2 = */ NULL,
+ /* uint32_t * small_ds_slice_buf = */ NULL,
+ /* uint32_t * large_ds_buf_0 = */ NULL,
+ /* uint32_t * large_ds_buf_1 = */ NULL,
+ /* uint32_t * large_ds_buf_2 = */ NULL,
+ /* uint32_t * large_ds_slice_buf = */ NULL,
+ /* int small_ds_offset = */ -1,
+ /* int large_ds_offset = */ -1,
+ /* hid_t fid = */ -1, /* HDF5 file ID */
+ /* hid_t xfer_plist = */ H5P_DEFAULT,
+ /* hid_t full_mem_small_ds_sid = */ -1,
+ /* hid_t full_file_small_ds_sid = */ -1,
+ /* hid_t mem_small_ds_sid = */ -1,
+ /* hid_t file_small_ds_sid_0 = */ -1,
+ /* hid_t file_small_ds_sid_1 = */ -1,
+ /* hid_t small_ds_slice_sid = */ -1,
+ /* hid_t full_mem_large_ds_sid = */ -1,
+ /* hid_t full_file_large_ds_sid = */ -1,
+ /* hid_t mem_large_ds_sid = */ -1,
+ /* hid_t file_large_ds_sid_0 = */ -1,
+ /* hid_t file_large_ds_sid_1 = */ -1,
+ /* hid_t file_large_ds_process_slice_sid = */ -1,
+ /* hid_t mem_large_ds_process_slice_sid = */ -1,
+ /* hid_t large_ds_slice_sid = */ -1,
+ /* hid_t small_dataset = */ -1, /* Dataset ID */
+ /* hid_t large_dataset = */ -1, /* Dataset ID */
+ /* size_t small_ds_size = */ 1,
+ /* size_t small_ds_slice_size = */ 1,
+ /* size_t large_ds_size = */ 1,
+ /* size_t large_ds_slice_size = */ 1,
+ /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t * start_ptr = */ NULL,
+ /* hsize_t * stride_ptr = */ NULL,
+ /* hsize_t * count_ptr = */ NULL,
+ /* hsize_t * block_ptr = */ NULL,
+ /* int skips = */ 0,
+ /* int max_skips = */ 0,
+ /* int64_t total_tests = */ 0,
+ /* int64_t tests_run = */ 0,
+ /* int64_t tests_skipped = */ 0};
+ struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
+
+ if (MAINPROCESS)
+ printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1),
+ small_rank, large_rank);
+
+ hs_dr_pio_test__setup(test_num, edge_size, -1, chunk_edge_size, small_rank, large_rank, use_collective_io,
+ dset_type, express_test, tv_ptr);
+
+ /* initialize skips & max_skips */
+ tv_ptr->skips = *skips_ptr;
+ tv_ptr->max_skips = max_skips;
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank);
+ HDfprintf(stdout, "test %d: Initialization complete.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5Sselect_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading small_rank - 1 dimensional slice from the on disk
+ * large cube, and verifying that the data read is correct. Verify that
+ * H5Sselect_shape_same() returns true on the memory and file selections.
+ */
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ contig_hs_dr_pio_test__d2m_l2s(tv_ptr);
+
+ /* Second, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ contig_hs_dr_pio_test__d2m_s2l(tv_ptr);
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 D slices from the in memory large data
+ * set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ contig_hs_dr_pio_test__m2d_l2s(tv_ptr);
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ contig_hs_dr_pio_test__m2d_s2l(tv_ptr);
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
+ (long long)(tv_ptr->total_tests));
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ hs_dr_pio_test__takedown(tv_ptr);
+
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Takedown complete.\n", test_num);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ *skips_ptr = tv_ptr->skips;
+ *total_tests_ptr += tv_ptr->total_tests;
+ *tests_run_ptr += tv_ptr->tests_run;
+ *tests_skipped_ptr += tv_ptr->tests_skipped;
+
+ return;
+
+} /* contig_hs_dr_pio_test__run_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CONTIG_HS_DR_PIO_TEST__DEBUG 0
+
+static void
+contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
+{
+ int express_test;
+ int local_express_test;
+ int mpi_rank = -1;
+ int mpi_size;
+ int test_num = 0;
+ int edge_size;
+ int chunk_edge_size = 0;
+ int small_rank;
+ int large_rank;
+ int mpi_result;
+ int skips = 0;
+ int max_skips = 0;
+ /* The following table list the number of sub-tests skipped between
+ * each test that is actually executed as a function of the express
+ * test level. Note that any value in excess of 4880 will cause all
+ * sub tests to be skipped.
+ */
+ int max_skips_tbl[4] = {0, 4, 64, 1024};
+ hid_t dset_type = H5T_NATIVE_UINT;
+ int64_t total_tests = 0;
+ int64_t tests_run = 0;
+ int64_t tests_skipped = 0;
+
+ HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ edge_size = (mpi_size > 6 ? mpi_size : 6);
+
+ local_express_test = EXPRESS_MODE; /* GetTestExpress(); */
+
+ mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX,
+ MPI_COMM_WORLD);
+
+ VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded");
+
+ if (local_express_test < 0) {
+ max_skips = max_skips_tbl[0];
+ }
+ else if (local_express_test > 3) {
+ max_skips = max_skips_tbl[3];
+ }
+ else {
+ max_skips = max_skips_tbl[local_express_test];
+ }
+
+ for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) {
+
+ for (small_rank = 2; small_rank < large_rank; small_rank++) {
+
+ switch (sstest_type) {
+ case IND_CONTIG:
+ /* contiguous data set, independent I/O */
+ chunk_edge_size = 0;
+
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, FALSE, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case IND_CONTIG */
+
+ case COL_CONTIG:
+ /* contiguous data set, collective I/O */
+ chunk_edge_size = 0;
+
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, TRUE, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case COL_CONTIG */
+
+ case IND_CHUNKED:
+ /* chunked data set, independent I/O */
+ chunk_edge_size = 5;
+
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, FALSE, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case IND_CHUNKED */
+
+ case COL_CHUNKED:
+ /* chunked data set, collective I/O */
+ chunk_edge_size = 5;
+
+ contig_hs_dr_pio_test__run_test(
+ test_num, edge_size, chunk_edge_size, small_rank, large_rank, TRUE, dset_type,
+ express_test, &skips, max_skips, &total_tests, &tests_run, &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case COL_CHUNKED */
+
+ default:
+ VRFY((FALSE), "unknown test type");
+ break;
+
+ } /* end of switch(sstest_type) */
+#if CONTIG_HS_DR_PIO_TEST__DEBUG
+ if ((MAINPROCESS) && (tests_skipped > 0)) {
+ HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n", tests_run, tests_skipped,
+ total_tests);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
+ }
+ }
+
+ if (MAINPROCESS) {
+ if (tests_skipped > 0) {
+ HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
+ }
+ else
+ HDprintf("\n");
+ }
+
+ return;
+
+} /* contig_hs_dr_pio_test() */
+
+/****************************************************************
+**
+** ckrbrd_hs_dr_pio_test__slct_ckrbrd():
+** Given a dataspace of tgt_rank, and dimensions:
+**
+** (mpi_size + 1), edge_size, ... , edge_size
+**
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the dataspace parallel to the
+** sel_rank fastest changing indices, with origin (in the
+** higher indices) as indicated by the start array.
+**
+** Note that this function, like all its relatives, is
+** hard coded to presume a maximum dataspace rank of 5.
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
+** the value of the constant.
+**
+** JRM -- 10/8/09
+**
+****************************************************************/
+
+#define CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank,
+ const int edge_size, const int checker_edge_size, const int sel_rank,
+ hsize_t sel_start[])
+{
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
+#endif
+ hbool_t first_selection = TRUE;
+ int i, j, k, l, m;
+ int n_cube_offset;
+ int sel_offset;
+ const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
+ /* this changes */
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ herr_t ret; /* Generic return value */
+
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(0 < sel_rank);
+ HDassert(sel_rank <= tgt_rank);
+ HDassert(tgt_rank <= test_max_rank);
+ HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK);
+
+ sel_offset = test_max_rank - sel_rank;
+ HDassert(sel_offset >= 0);
+
+ n_cube_offset = test_max_rank - tgt_rank;
+ HDassert(n_cube_offset >= 0);
+ HDassert(n_cube_offset <= sel_offset);
+
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n", fcnName, mpi_rank, edge_size,
+ checker_edge_size);
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset);
+ HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, n_cube_offset);
+#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* First, compute the base count (which assumes start == 0
+ * for the associated offset) and offset_count (which
+ * assumes start == checker_edge_size for the associated
+ * offset).
+ *
+ * Note that the following computation depends on the C99
+ * requirement that integer division discard any fraction
+ * (truncation towards zero) to function correctly. As we
+ * now require C99, this shouldn't be a problem, but noting
+ * it may save us some pain if we are ever obliged to support
+ * pre-C99 compilers again.
+ */
+
+ base_count = (hsize_t)(edge_size / (checker_edge_size * 2));
+
+ if ((edge_size % (checker_edge_size * 2)) > 0) {
+
+ base_count++;
+ }
+
+ offset_count = (hsize_t)((edge_size - checker_edge_size) / (checker_edge_size * 2));
+
+ if (((edge_size - checker_edge_size) % (checker_edge_size * 2)) > 0) {
+
+ offset_count++;
+ }
+
+ /* Now set up the stride and block arrays, and portions of the start
+ * and count arrays that will not be altered during the selection of
+ * the checker board.
+ */
+ i = 0;
+ while (i < n_cube_offset) {
+
+ /* these values should never be used */
+ start[i] = 0;
+ stride[i] = 0;
+ count[i] = 0;
+ block[i] = 0;
+
+ i++;
+ }
+
+ while (i < sel_offset) {
+
+ start[i] = sel_start[i];
+ stride[i] = (hsize_t)(2 * edge_size);
+ count[i] = 1;
+ block[i] = 1;
+
+ i++;
+ }
+
+ while (i < test_max_rank) {
+
+ stride[i] = (hsize_t)(2 * checker_edge_size);
+ block[i] = (hsize_t)checker_edge_size;
+
+ i++;
+ }
+
+ i = 0;
+ do {
+ if (0 >= sel_offset) {
+
+ if (i == 0) {
+
+ start[0] = 0;
+ count[0] = base_count;
+ }
+ else {
+
+ start[0] = (hsize_t)checker_edge_size;
+ count[0] = offset_count;
+ }
+ }
+
+ j = 0;
+ do {
+ if (1 >= sel_offset) {
+
+ if (j == 0) {
+
+ start[1] = 0;
+ count[1] = base_count;
+ }
+ else {
+
+ start[1] = (hsize_t)checker_edge_size;
+ count[1] = offset_count;
+ }
+ }
+
+ k = 0;
+ do {
+ if (2 >= sel_offset) {
+
+ if (k == 0) {
+
+ start[2] = 0;
+ count[2] = base_count;
+ }
+ else {
+
+ start[2] = (hsize_t)checker_edge_size;
+ count[2] = offset_count;
+ }
+ }
+
+ l = 0;
+ do {
+ if (3 >= sel_offset) {
+
+ if (l == 0) {
+
+ start[3] = 0;
+ count[3] = base_count;
+ }
+ else {
+
+ start[3] = (hsize_t)checker_edge_size;
+ count[3] = offset_count;
+ }
+ }
+
+ m = 0;
+ do {
+ if (4 >= sel_offset) {
+
+ if (m == 0) {
+
+ start[4] = 0;
+ count[4] = base_count;
+ }
+ else {
+
+ start[4] = (hsize_t)checker_edge_size;
+ count[4] = offset_count;
+ }
+ }
+
+ if (((i + j + k + l + m) % 2) == 0) {
+
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank,
+ (int)first_selection);
+ HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i, j,
+ k, l, m);
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2], (int)start[3],
+ (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3],
+ (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)count[0], (int)count[1], (int)count[2], (int)count[3],
+ (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)block[0], (int)block[1], (int)block[2], (int)block[3],
+ (int)block[4]);
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(tgt_sid));
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank, sel_rank);
+#endif
+
+ if (first_selection) {
+
+ first_selection = FALSE;
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[n_cube_offset]),
+ &(stride[n_cube_offset]), &(count[n_cube_offset]),
+ &(block[n_cube_offset]));
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
+ }
+ else {
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[n_cube_offset]),
+ &(stride[n_cube_offset]), &(count[n_cube_offset]),
+ &(block[n_cube_offset]));
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
+ }
+ }
+
+ m++;
+
+ } while ((m <= 1) && (4 >= sel_offset));
+
+ l++;
+
+ } while ((l <= 1) && (3 >= sel_offset));
+
+ k++;
+
+ } while ((k <= 1) && (2 >= sel_offset));
+
+ j++;
+
+ } while ((j <= 1) && (1 >= sel_offset));
+
+ i++;
+
+ } while ((i <= 1) && (0 >= sel_offset));
+
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
+#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Clip the selection back to the dataspace proper. */
+
+ for (i = 0; i < test_max_rank; i++) {
+
+ start[i] = 0;
+ stride[i] = (hsize_t)edge_size;
+ count[i] = 1;
+ block[i] = (hsize_t)edge_size;
+ }
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block);
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
+
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
+ HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
+#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__slct_ckrbrd() */
+
+/****************************************************************
+**
+** ckrbrd_hs_dr_pio_test__verify_data():
+**
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
+** otherwise.
+**
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
+**
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
+** rank) dimensional slice through this processes slice
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indices.
+**
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
+**
+** Thus for a 20x10x10 dataset, the value stored in location
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
+**
+** (10 * 10 * x) + (10 * y) + z
+**
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
+**
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
+**
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
+**
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube opposite the origin.)
+**
+****************************************************************/
+
+#define CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG 0
+
+static hbool_t
+ckrbrd_hs_dr_pio_test__verify_data(uint32_t *buf_ptr, const int rank, const int edge_size,
+ const int checker_edge_size, uint32_t first_expected_val,
+ hbool_t buf_starts_in_checker)
+{
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
+#endif
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint32_t expected_value;
+ uint32_t *val_ptr;
+ int i, j, k, l, m; /* to track position in n-cube */
+ int v, w, x, y, z; /* to track position in checker */
+ const int test_max_rank = 5; /* code changes needed if this is increased */
+
+ HDassert(buf_ptr != NULL);
+ HDassert(0 < rank);
+ HDassert(rank <= test_max_rank);
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(test_max_rank <= PAR_SS_DR_MAX_RANK);
+
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
+ HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size);
+ HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val);
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker);
+}
+#endif
+
+val_ptr = buf_ptr;
+expected_value = first_expected_val;
+
+i = 0;
+v = 0;
+start_in_checker[0] = buf_starts_in_checker;
+do {
+ if (v >= checker_edge_size) {
+
+ start_in_checker[0] = !start_in_checker[0];
+ v = 0;
+ }
+
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do {
+ if (w >= checker_edge_size) {
+
+ start_in_checker[1] = !start_in_checker[1];
+ w = 0;
+ }
+
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do {
+ if (x >= checker_edge_size) {
+
+ start_in_checker[2] = !start_in_checker[2];
+ x = 0;
+ }
+
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do {
+ if (y >= checker_edge_size) {
+
+ start_in_checker[3] = !start_in_checker[3];
+ y = 0;
+ }
+
+ m = 0;
+ z = 0;
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
+#endif
+ in_checker = start_in_checker[3];
+ do {
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, " %d", (int)(*val_ptr));
+#endif
+ if (z >= checker_edge_size) {
+
+ in_checker = !in_checker;
+ z = 0;
+ }
+
+ if (in_checker) {
+
+ if (*val_ptr != expected_value) {
+
+ good_data = FALSE;
+ }
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+ else if (*val_ptr != 0) {
+
+ good_data = FALSE;
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+
+ val_ptr++;
+ expected_value++;
+ m++;
+ z++;
+
+ } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+ HDfprintf(stdout, "\n");
+#endif
+ l++;
+ y++;
+ } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
+ k++;
+ x++;
+ } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
+ j++;
+ w++;
+ } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
+ i++;
+ v++;
+} while ((rank >= test_max_rank) && (i < edge_size));
+
+return (good_data);
+
+} /* ckrbrd_hs_dr_pio_test__verify_data() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__d2m_l2s()
+ *
+ * Purpose: Part one of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can read from disk correctly using checker
+ * board selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * In this function, we test this by reading small_rank - 1
+ * checker board slices from the on disk large cube, and
+ * verifying that the data read is correct. Verify that
+ * H5Sselect_shape_same() returns true on the memory and
+ * file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/15/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()";
+ uint32_t *ptr_0;
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ uint32_t expected_value;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5Sselect_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading a (small_rank - 1)-D checker board slice from this
+ * processes slice of the on disk large data set, and verifying that the
+ * data read is correct. Verify that H5Sselect_shape_same() returns
+ * true on the memory and file selections.
+ *
+ * The first step is to set up the needed checker board selection in the
+ * in memory small small cube
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->small_ds_slice_sid, tv_ptr->small_rank - 1,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
+
+ /* zero out the buffer we will be reading into */
+ HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ", fcnName, tv_ptr->mpi_rank);
+ ptr_0 = tv_ptr->small_ds_slice_buf;
+ for (i = 0; i < (int)(tv_ptr->small_ds_slice_size); i++) {
+ HDfprintf(stdout, "%d ", (int)(*ptr_0));
+ ptr_0++;
+ }
+ HDfprintf(stdout, "\n");
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: reading slice from big ds on disk into small ds slice.\n", fcnName,
+ tv_ptr->mpi_rank);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set. However, in the parallel version, each
+ * process only works with that slice of the large cube indicated
+ * by its rank -- hence we set the most slowly changing index to
+ * mpi_rank, and don't iterate over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
+ * of this function. Thus no need for another inner loop.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_0, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* Read selection from disk */
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n", fcnName,
+ H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+
+ ret =
+ H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->small_ds_slice_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_slice_buf);
+ VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, tv_ptr->mpi_rank);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+
+ /* verify that expected data is retrieved */
+
+ expected_value =
+ (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ data_ok = ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->small_ds_slice_buf, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+
+ VRFY((data_ok == TRUE), "small slice read from large ds data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__d2m_l2s() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__d2m_s2l()
+ *
+ * Purpose: Part two of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
+ *
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * In this function, we test this by reading checker board
+ * slices of the on disk small data set into slices through
+ * the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/15/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()";
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_0, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s reading slices of on disk small data set into slices of big data set.\n", fcnName);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+
+ /* zero out the buffer we will be reading into */
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read the slice of the small data set
+ * into different slices of the process slice of the large data
+ * set.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't iterate
+ * over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* Read selection from disk */
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_0));
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ data_ok = TRUE;
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+ start_index =
+ (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ {
+ int m, n;
+
+ HDfprintf(stdout, "%s:%d: expected_value = %d.\n", fcnName, tv_ptr->mpi_rank,
+ expected_value);
+ HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ start_index, stop_index);
+ n = 0;
+ for (m = 0; (unsigned)m < tv_ptr->large_ds_size; m++) {
+ HDfprintf(stdout, "%d ", (int)(*ptr_1));
+ ptr_1++;
+ n++;
+ if (n >= tv_ptr->edge_size) {
+ HDfprintf(stdout, "\n");
+ n = 0;
+ }
+ }
+ HDfprintf(stdout, "\n");
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ }
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->large_ds_size);
+
+ for (u = 0; u < start_index; u++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ }
+
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(1).");
+
+ data_ok = ckrbrd_hs_dr_pio_test__verify_data(ptr_1, tv_ptr->small_rank - 1,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size,
+ expected_value, (hbool_t)TRUE);
+
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(2).");
+
+ ptr_1 = tv_ptr->large_ds_buf_1 + stop_index + 1;
+
+ for (u = stop_index + 1; u < tv_ptr->large_ds_size; u++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ }
+
+ /* zero out the value for the next pass */
+ *ptr_1 = 0;
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small to large ds data good(3).");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__d2m_s2l() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__m2d_l2s()
+ *
+ * Purpose: Part three of a series of tests of I/O to/from checker
+ * board hyperslab selections of different rank in the
+ * parallel.
+ *
+ * Verify that we can write from memory to file using checker
+ * board selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Do this by writing small_rank - 1 dimensional checker
+ * board slices from the in memory large data set to the on
+ * disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify
+ * that it contains the expected data. Verify that
+ * H5Sselect_shape_same() returns true on the memory and
+ * file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/15/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()";
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 D slices from the in memory large data
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_small_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_small_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->file_small_ds_sid_1, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to read slices of the large cube.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the in memory small ds */
+ HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout,
+ "%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n",
+ fcnName);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+
+ /* in serial versions of this test, we loop through all the dimensions
+ * of the large data set that don't appear in the small data set.
+ *
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't iterate
+ * over it.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ j = 0;
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* zero out this rank's slice of the on disk small data set */
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_2);
+ VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
+
+ /* select the portion of the in memory large cube from which we
+ * are going to write data.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->mem_large_ds_sid, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory checkerboard selection of the slice through the
+ * large dataset and the checkerboard selection of the process
+ * slice of the small data set as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+
+ /* write the checker board selection of the slice from the in
+ * memory large data set to the slice of the on disk small
+ * dataset.
+ */
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_large_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_small_ds_sid_1));
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
+ ret = H5Dwrite(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_small_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
+
+ /* read the on disk process slice of the small dataset into memory */
+ ret = H5Dread(tv_ptr->small_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
+
+ /* verify that expected data is retrieved */
+
+ expected_value =
+ (uint32_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+
+ start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= tv_ptr->small_ds_size);
+
+ data_ok = TRUE;
+
+ ptr_1 = tv_ptr->small_ds_buf_1;
+ for (u = 0; u < start_index; u++, ptr_1++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ *ptr_1 = 0;
+ }
+ }
+
+ data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->small_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+
+ ptr_1 = tv_ptr->small_ds_buf_1;
+ for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ *ptr_1 = 0;
+ }
+ }
+
+ VRFY((data_ok == TRUE), "large slice write slice to small slice data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__m2d_l2s() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__m2d_s2l()
+ *
+ * Purpose: Part four of a series of tests of I/O to/from checker
+ * board hyperslab selections of different rank in the parallel.
+ *
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
+ *
+ * Do this by writing checker board selections of the contents
+ * of the process's slice of the in memory small data set to
+ * slices of the on disk large data set. After each write,
+ * read the process's slice of the large data set back into
+ * memory, and verify that it contains the expected data.
+ *
+ * Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 8/15/11
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t *tv_ptr)
+{
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()";
+#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ size_t u;
+ size_t start_index;
+ size_t stop_index;
+ uint32_t expected_value;
+ uint32_t *ptr_1;
+ int mpi_rank; /* needed by VRFY */
+ hsize_t sel_start[PAR_SS_DR_MAX_RANK];
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ /* initialize the local copy of mpi_rank */
+ mpi_rank = tv_ptr->mpi_rank;
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+ tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
+ tv_ptr->stride[0] = (hsize_t)(2 * (tv_ptr->mpi_size + 1));
+ tv_ptr->count[0] = 1;
+ tv_ptr->block[0] = 1;
+
+ for (i = 1; i < tv_ptr->large_rank; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+
+ ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_sid_0, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) succeeded");
+
+ ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, H5S_SELECT_SET, tv_ptr->start, tv_ptr->stride,
+ tv_ptr->count, tv_ptr->block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) succeeded");
+
+ /* setup a checkerboard selection of the slice of the in memory small
+ * data set associated with the process's mpi rank.
+ */
+
+ sel_start[0] = sel_start[1] = sel_start[2] = sel_start[3] = sel_start[4] = 0;
+ sel_start[tv_ptr->small_ds_offset] = (hsize_t)(tv_ptr->mpi_rank);
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(tv_ptr->mpi_rank, tv_ptr->mem_small_ds_sid, tv_ptr->small_rank,
+ tv_ptr->edge_size, tv_ptr->checker_edge_size, tv_ptr->small_rank - 1,
+ sel_start);
+
+ /* set up start, stride, count, and block -- note that we will
+ * change start[] so as to write checkerboard selections of slices
+ * of the small data set to slices of the large data set.
+ */
+ for (i = 0; i < PAR_SS_DR_MAX_RANK; i++) {
+
+ tv_ptr->start[i] = 0;
+ tv_ptr->stride[i] = (hsize_t)(2 * tv_ptr->edge_size);
+ tv_ptr->count[i] = 1;
+ if ((PAR_SS_DR_MAX_RANK - i) > (tv_ptr->small_rank - 1)) {
+
+ tv_ptr->block[i] = 1;
+ }
+ else {
+
+ tv_ptr->block[i] = (hsize_t)(tv_ptr->edge_size);
+ }
+ }
+
+ /* zero out the in memory large ds */
+ HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
+
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout,
+ "%s writing process checkerboard selections of slices of small ds to process slices of large "
+ "ds on disk.\n",
+ fcnName);
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 0) {
+
+ i = tv_ptr->mpi_rank;
+ }
+ else {
+
+ i = 0;
+ }
+
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ * loop over it -- either we are setting i to mpi_rank, or
+ * we are setting it to zero. It will not change during the
+ * test.
+ */
+
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 1) {
+
+ j = tv_ptr->mpi_rank;
+ }
+ else {
+
+ j = 0;
+ }
+
+ do {
+ if (PAR_SS_DR_MAX_RANK - tv_ptr->large_rank == 2) {
+
+ k = tv_ptr->mpi_rank;
+ }
+ else {
+
+ k = 0;
+ }
+
+ do {
+ /* since small rank >= 2 and large_rank > small_rank, we
+ * have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
+ * (baring major re-orgaization), this gives us:
+ *
+ * (PAR_SS_DR_MAX_RANK - large_rank) <= 2
+ *
+ * so no need to repeat the test in the outer loops --
+ * just set l = 0.
+ */
+
+ l = 0;
+ do {
+ if ((tv_ptr->skips)++ < tv_ptr->max_skips) { /* skip the test */
+
+ (tv_ptr->tests_skipped)++;
+ }
+ else { /* run the test */
+
+ tv_ptr->skips = 0; /* reset the skips counter */
+
+ /* we know that small_rank >= 1 and that large_rank > small_rank
+ * by the assertions at the head of this function. Thus no
+ * need for another inner loop.
+ */
+
+ /* Zero out this processes slice of the on disk large data set.
+ * Note that this will leave one slice with its original data
+ * as there is one more slice than processes.
+ */
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_2);
+ VRFY((ret != FAIL), "H5Dwrite() to zero large ds succeeded");
+
+ /* select the portion of the in memory large cube to which we
+ * are going to write data.
+ */
+ tv_ptr->start[0] = (hsize_t)i;
+ tv_ptr->start[1] = (hsize_t)j;
+ tv_ptr->start[2] = (hsize_t)k;
+ tv_ptr->start[3] = (hsize_t)l;
+ tv_ptr->start[4] = 0;
+
+ HDassert((tv_ptr->start[0] == 0) || (0 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[1] == 0) || (1 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[2] == 0) || (2 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[3] == 0) || (3 < tv_ptr->small_ds_offset + 1));
+ HDassert((tv_ptr->start[4] == 0) || (4 < tv_ptr->small_ds_offset + 1));
+
+ ckrbrd_hs_dr_pio_test__slct_ckrbrd(
+ tv_ptr->mpi_rank, tv_ptr->file_large_ds_sid_1, tv_ptr->large_rank, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, tv_ptr->small_rank - 1, tv_ptr->start);
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory small data set slice selection and the
+ * on disk slice through the large data set selection
+ * as having the same shape.
+ */
+ check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
+
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
+ */
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2], tv_ptr->start[3],
+ tv_ptr->start[4]);
+ HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n", fcnName, tv_ptr->mpi_rank,
+ H5Sget_simple_extent_ndims(tv_ptr->mem_small_ds_sid),
+ H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_1));
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
+ ret = H5Dwrite(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_small_ds_sid,
+ tv_ptr->file_large_ds_sid_1, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
+ VRFY((ret != FAIL), "H5Dwrite of small ds slice to large ds succeeded");
+
+ /* read this processes slice on the on disk large
+ * data set into memory.
+ */
+
+ ret = H5Dread(tv_ptr->large_dataset, H5T_NATIVE_UINT32, tv_ptr->mem_large_ds_sid,
+ tv_ptr->file_large_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->large_ds_buf_1);
+ VRFY((ret != FAIL), "H5Dread() of process slice of large ds succeeded");
+
+ /* verify that the expected data and only the
+ * expected data was read.
+ */
+ expected_value = (uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+
+ start_index =
+ (size_t)((i * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size *
+ tv_ptr->edge_size) +
+ (j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
+ (k * tv_ptr->edge_size * tv_ptr->edge_size) + (l * tv_ptr->edge_size));
+ stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
+
+ HDassert(start_index < stop_index);
+ HDassert(stop_index < tv_ptr->large_ds_size);
+
+ data_ok = TRUE;
+
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ for (u = 0; u < start_index; u++, ptr_1++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ *ptr_1 = 0;
+ }
+ }
+
+ data_ok &= ckrbrd_hs_dr_pio_test__verify_data(
+ tv_ptr->large_ds_buf_1 + start_index, tv_ptr->small_rank - 1, tv_ptr->edge_size,
+ tv_ptr->checker_edge_size, expected_value, (hbool_t)TRUE);
+
+ ptr_1 = tv_ptr->large_ds_buf_1;
+ for (u = stop_index; u < tv_ptr->small_ds_size; u++, ptr_1++) {
+
+ if (*ptr_1 != 0) {
+
+ data_ok = FALSE;
+ *ptr_1 = 0;
+ }
+ }
+
+ VRFY((data_ok == TRUE), "small ds cb slice write to large ds slice data good.");
+
+ (tv_ptr->tests_run)++;
+ }
+
+ l++;
+
+ (tv_ptr->total_tests)++;
+
+ } while ((tv_ptr->large_rank > 2) && ((tv_ptr->small_rank - 1) <= 1) && (l < tv_ptr->edge_size));
+ k++;
+ } while ((tv_ptr->large_rank > 3) && ((tv_ptr->small_rank - 1) <= 2) && (k < tv_ptr->edge_size));
+ j++;
+ } while ((tv_ptr->large_rank > 4) && ((tv_ptr->small_rank - 1) <= 3) && (j < tv_ptr->edge_size));
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__m2d_s2l() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test__run_test()
+ *
+ * Purpose: Test I/O to/from checkerboard selections of hyperslabs of
+ * different rank in the parallel.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 10/10/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG 0
+
+static void
+ckrbrd_hs_dr_pio_test__run_test(const int test_num, const int edge_size, const int checker_edge_size,
+ const int chunk_edge_size, const int small_rank, const int large_rank,
+ const hbool_t use_collective_io, const hid_t dset_type,
+ const int express_test, int *skips_ptr, int max_skips,
+ int64_t *total_tests_ptr, int64_t *tests_run_ptr, int64_t *tests_skipped_ptr,
+ int mpi_rank)
+
+{
+#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()";
+#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+ struct hs_dr_pio_test_vars_t test_vars = {
+ /* int mpi_size = */ -1,
+ /* int mpi_rank = */ -1,
+ /* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
+ /* MPI_Inf mpi_info = */ MPI_INFO_NULL,
+ /* int test_num = */ -1,
+ /* int edge_size = */ -1,
+ /* int checker_edge_size = */ -1,
+ /* int chunk_edge_size = */ -1,
+ /* int small_rank = */ -1,
+ /* int large_rank = */ -1,
+ /* hid_t dset_type = */ -1,
+ /* uint32_t * small_ds_buf_0 = */ NULL,
+ /* uint32_t * small_ds_buf_1 = */ NULL,
+ /* uint32_t * small_ds_buf_2 = */ NULL,
+ /* uint32_t * small_ds_slice_buf = */ NULL,
+ /* uint32_t * large_ds_buf_0 = */ NULL,
+ /* uint32_t * large_ds_buf_1 = */ NULL,
+ /* uint32_t * large_ds_buf_2 = */ NULL,
+ /* uint32_t * large_ds_slice_buf = */ NULL,
+ /* int small_ds_offset = */ -1,
+ /* int large_ds_offset = */ -1,
+ /* hid_t fid = */ -1, /* HDF5 file ID */
+ /* hid_t xfer_plist = */ H5P_DEFAULT,
+ /* hid_t full_mem_small_ds_sid = */ -1,
+ /* hid_t full_file_small_ds_sid = */ -1,
+ /* hid_t mem_small_ds_sid = */ -1,
+ /* hid_t file_small_ds_sid_0 = */ -1,
+ /* hid_t file_small_ds_sid_1 = */ -1,
+ /* hid_t small_ds_slice_sid = */ -1,
+ /* hid_t full_mem_large_ds_sid = */ -1,
+ /* hid_t full_file_large_ds_sid = */ -1,
+ /* hid_t mem_large_ds_sid = */ -1,
+ /* hid_t file_large_ds_sid_0 = */ -1,
+ /* hid_t file_large_ds_sid_1 = */ -1,
+ /* hid_t file_large_ds_process_slice_sid = */ -1,
+ /* hid_t mem_large_ds_process_slice_sid = */ -1,
+ /* hid_t large_ds_slice_sid = */ -1,
+ /* hid_t small_dataset = */ -1, /* Dataset ID */
+ /* hid_t large_dataset = */ -1, /* Dataset ID */
+ /* size_t small_ds_size = */ 1,
+ /* size_t small_ds_slice_size = */ 1,
+ /* size_t large_ds_size = */ 1,
+ /* size_t large_ds_slice_size = */ 1,
+ /* hsize_t dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t chunk_dims[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t start[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t stride[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t count[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t block[PAR_SS_DR_MAX_RANK] = */ {0, 0, 0, 0, 0},
+ /* hsize_t * start_ptr = */ NULL,
+ /* hsize_t * stride_ptr = */ NULL,
+ /* hsize_t * count_ptr = */ NULL,
+ /* hsize_t * block_ptr = */ NULL,
+ /* int skips = */ 0,
+ /* int max_skips = */ 0,
+ /* int64_t total_tests = */ 0,
+ /* int64_t tests_run = */ 0,
+ /* int64_t tests_skipped = */ 0};
+ struct hs_dr_pio_test_vars_t *tv_ptr = &test_vars;
+
+ if (MAINPROCESS)
+ printf("\r - running test #%lld: small rank = %d, large rank = %d", (long long)(test_num + 1),
+ small_rank, large_rank);
+
+ hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size, chunk_edge_size, small_rank, large_rank,
+ use_collective_io, dset_type, express_test, tv_ptr);
+
+ /* initialize skips & max_skips */
+ tv_ptr->skips = *skips_ptr;
+ tv_ptr->max_skips = max_skips;
+
+#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n", test_num, small_rank, large_rank);
+ HDfprintf(stdout, "test %d: Initialization complete.\n", test_num);
+ }
+#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ /* first, verify that we can read from disk correctly using selections
+ * of different rank that H5Sselect_shape_same() views as being of the
+ * same shape.
+ *
+ * Start by reading a (small_rank - 1)-D slice from this processes slice
+ * of the on disk large data set, and verifying that the data read is
+ * correct. Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
+ *
+ * The first step is to set up the needed checker board selection in the
+ * in memory small small cube
+ */
+
+ ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr);
+
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
+ */
+
+ ckrbrd_hs_dr_pio_test__d2m_s2l(tv_ptr);
+
+ /* now we go in the opposite direction, verifying that we can write
+ * from memory to file using selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
+ *
+ * Start by writing small_rank - 1 D slices from the in memory large data
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5Sselect_shape_same() returns true on
+ * the memory and file selections.
+ */
+
+ ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr);
+
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
+ * each write, read the process's slice of the large data set back
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
+ * and file selections.
+ */
+
+ ckrbrd_hs_dr_pio_test__m2d_s2l(tv_ptr);
+
+#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
+ (long long)(tv_ptr->total_tests));
+ }
+#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ hs_dr_pio_test__takedown(tv_ptr);
+
+#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+ if (MAINPROCESS) {
+ HDfprintf(stdout, "test %d: Takedown complete.\n", test_num);
+ }
+#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
+
+ *skips_ptr = tv_ptr->skips;
+ *total_tests_ptr += tv_ptr->total_tests;
+ *tests_run_ptr += tv_ptr->tests_run;
+ *tests_skipped_ptr += tv_ptr->tests_skipped;
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test__run_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: ckrbrd_hs_dr_pio_test()
+ *
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 9/18/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
+{
+ int express_test;
+ int local_express_test;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int test_num = 0;
+ int edge_size;
+ int checker_edge_size = 3;
+ int chunk_edge_size = 0;
+ int small_rank = 3;
+ int large_rank = 4;
+ int mpi_result;
+ hid_t dset_type = H5T_NATIVE_UINT;
+ int skips = 0;
+ int max_skips = 0;
+ /* The following table list the number of sub-tests skipped between
+ * each test that is actually executed as a function of the express
+ * test level. Note that any value in excess of 4880 will cause all
+ * sub tests to be skipped.
+ */
+ int max_skips_tbl[4] = {0, 4, 64, 1024};
+ int64_t total_tests = 0;
+ int64_t tests_run = 0;
+ int64_t tests_skipped = 0;
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ edge_size = (mpi_size > 6 ? mpi_size : 6);
+
+ local_express_test = EXPRESS_MODE; /* GetTestExpress(); */
+
+ HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
+
+ mpi_result = MPI_Allreduce((void *)&local_express_test, (void *)&express_test, 1, MPI_INT, MPI_MAX,
+ MPI_COMM_WORLD);
+
+ VRFY((mpi_result == MPI_SUCCESS), "MPI_Allreduce(0) succeeded");
+
+ if (local_express_test < 0) {
+ max_skips = max_skips_tbl[0];
+ }
+ else if (local_express_test > 3) {
+ max_skips = max_skips_tbl[3];
+ }
+ else {
+ max_skips = max_skips_tbl[local_express_test];
+ }
+
+#if 0
+ {
+ int DebugWait = 1;
+
+ while (DebugWait) ;
+ }
+#endif
+
+ for (large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++) {
+
+ for (small_rank = 2; small_rank < large_rank; small_rank++) {
+ switch (sstest_type) {
+ case IND_CONTIG:
+ /* contiguous data set, independent I/O */
+ chunk_edge_size = 0;
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, FALSE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case IND_CONTIG */
+
+ case COL_CONTIG:
+ /* contiguous data set, collective I/O */
+ chunk_edge_size = 0;
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, TRUE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case COL_CONTIG */
+
+ case IND_CHUNKED:
+ /* chunked data set, independent I/O */
+ chunk_edge_size = 5;
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, FALSE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case IND_CHUNKED */
+
+ case COL_CHUNKED:
+ /* chunked data set, collective I/O */
+ chunk_edge_size = 5;
+ ckrbrd_hs_dr_pio_test__run_test(test_num, edge_size, checker_edge_size, chunk_edge_size,
+ small_rank, large_rank, TRUE, dset_type, express_test,
+ &skips, max_skips, &total_tests, &tests_run,
+ &tests_skipped, mpi_rank);
+ test_num++;
+ break;
+ /* end of case COL_CHUNKED */
+
+ default:
+ VRFY((FALSE), "unknown test type");
+ break;
+
+ } /* end of switch(sstest_type) */
+#if CONTIG_HS_DR_PIO_TEST__DEBUG
+ if ((MAINPROCESS) && (tests_skipped > 0)) {
+ HDfprintf(stdout, " run/skipped/total = %" PRId64 "/%" PRId64 "/%" PRId64 ".\n",
+ tests_run, tests_skipped, total_tests);
+ }
+#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
+ }
+ }
+
+ if (MAINPROCESS) {
+ if (tests_skipped > 0) {
+ HDfprintf(stdout, " %" PRId64 " of %" PRId64 " subtests skipped to expedite testing.\n",
+ tests_skipped, total_tests);
+ }
+ else
+ HDprintf("\n");
+ }
+
+ return;
+
+} /* ckrbrd_hs_dr_pio_test() */
+
+/* Main Body. Here for now, may have to move them to a separated file later. */
+
+/*
+ * Main driver of the Parallel HDF5 tests
+ */
+
+#include "testphdf5.h"
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
+
+/* global variables */
+int dim0;
+int dim1;
+int chunkdim0;
+int chunkdim1;
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
+int ngroups = 512; /* number of groups to create in root
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+
+H5E_auto2_t old_func; /* previous error handler */
+void *old_client_data; /* previous error handler arg.*/
+
+/* other option flags */
+
+#ifdef USE_PAUSE
+/* pause the process for a moment to allow debugger to attach if desired. */
+/* Will pause more if greenlight file is not persent but will eventually */
+/* continue. */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+void
+pause_proc(void)
+{
+
+ int pid;
+ h5_stat_t statbuf;
+ char greenlight[] = "go";
+ int maxloop = 10;
+ int loops = 0;
+ int time_int = 10;
+
+ /* mpi variables */
+ int mpi_size, mpi_rank;
+ int mpi_namelen;
+ char mpi_name[MPI_MAX_PROCESSOR_NAME];
+
+ pid = getpid();
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Get_processor_name(mpi_name, &mpi_namelen);
+
+ if (MAINPROCESS)
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) {
+ if (!loops++) {
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid,
+ pid);
+ }
+ HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ fflush(stdout);
+ HDsleep(time_int);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/* Use the Profile feature of MPI to call the pause_proc() */
+int
+MPI_Init(int *argc, char ***argv)
+{
+ int ret_code;
+ ret_code = PMPI_Init(argc, argv);
+ pause_proc();
+ return (ret_code);
+}
+#endif /* USE_PAUSE */
+
+/*
+ * Show command usage
+ */
+static void
+usage(void)
+{
+ HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ HDprintf("\t-m<n_datasets>"
+ "\tset number of datasets for the multiple dataset test\n");
+ HDprintf("\t-n<n_groups>"
+ "\tset number of groups for the multiple group test\n");
+#if 0
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+#endif
+ HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR,
+ COL_FACTOR);
+ HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
+ HDprintf("\n");
+}
+
+/*
+ * parse the command line options
+ */
+static int
+parse_options(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup default chunk-size. Make sure sizes are > 0 */
+
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+
+ while (--argc) {
+ if (**(++argv) != '-') {
+ break;
+ }
+ else {
+ switch (*(*argv + 1)) {
+ case 'm':
+ ndatasets = atoi((*argv + 1) + 1);
+ if (ndatasets < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'n':
+ ngroups = atoi((*argv + 1) + 1);
+ if (ngroups < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+#if 0
+ case 'f': if (--argc < 1) {
+ nerrors++;
+ return(1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return(1);
+ }
+ paraprefix = *argv;
+ break;
+#endif
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ dim0 = atoi(*(++argv)) * mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv)) * mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return (1);
+ default:
+ HDprintf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return (1);
+ }
+ }
+ } /*while*/
+
+ /* check validity of dimension and chunk sizes */
+ if (dim0 <= 0 || dim1 <= 0) {
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return (1);
+ }
+ if (chunkdim0 <= 0 || chunkdim1 <= 0) {
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return (1);
+ }
+
+ /* Make sure datasets can be divided into equal portions by the processes */
+ if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
+ if (MAINPROCESS)
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
+ nerrors++;
+ return (1);
+ }
+
+ /* compose the test filenames */
+ {
+ int i, n;
+
+ n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i = 0; i < n; i++)
+ strncpy(filenames[i], FILENAME[i], PATH_MAX);
+#if 0 /* no support for VFDs right now */
+ if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) {
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return (1);
+ }
+#endif
+ if (MAINPROCESS) {
+ HDprintf("Test filenames are:\n");
+ for (i = 0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Create the appropriate File access property list
+ */
+hid_t
+create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
+{
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
+
+ /* need the rank for error checking macros */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
+
+ if (l_facc_type == FACC_DEFAULT)
+ return (ret_pl);
+
+ if (l_facc_type == FACC_MPIO) {
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ return (ret_pl);
+ }
+
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return (ret_pl);
+ }
+
+ /* unknown file access types */
+ return (ret_pl);
+}
+
+/* Shape Same test using contiguous hyperslab using independent IO on contiguous datasets */
+static void
+sscontig1(void)
+{
+ contig_hs_dr_pio_test(IND_CONTIG);
+}
+
+/* Shape Same test using contiguous hyperslab using collective IO on contiguous datasets */
+static void
+sscontig2(void)
+{
+ contig_hs_dr_pio_test(COL_CONTIG);
+}
+
+/* Shape Same test using contiguous hyperslab using independent IO on chunked datasets */
+static void
+sscontig3(void)
+{
+ contig_hs_dr_pio_test(IND_CHUNKED);
+}
+
+/* Shape Same test using contiguous hyperslab using collective IO on chunked datasets */
+static void
+sscontig4(void)
+{
+ contig_hs_dr_pio_test(COL_CHUNKED);
+}
+
+/* Shape Same test using checker hyperslab using independent IO on contiguous datasets */
+static void
+sschecker1(void)
+{
+ ckrbrd_hs_dr_pio_test(IND_CONTIG);
+}
+
+/* Shape Same test using checker hyperslab using collective IO on contiguous datasets */
+static void
+sschecker2(void)
+{
+ ckrbrd_hs_dr_pio_test(COL_CONTIG);
+}
+
+/* Shape Same test using checker hyperslab using independent IO on chunked datasets */
+static void
+sschecker3(void)
+{
+ ckrbrd_hs_dr_pio_test(IND_CHUNKED);
+}
+
+/* Shape Same test using checker hyperslab using collective IO on chunked datasets */
+static void
+sschecker4(void)
+{
+ ckrbrd_hs_dr_pio_test(COL_CHUNKED);
+}
+
+int
+main(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+
+#ifndef H5_HAVE_WIN32_API
+ /* Un-buffer the stdout and stderr */
+ HDsetbuf(stderr, NULL);
+ HDsetbuf(stdout, NULL);
+#endif
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ dim0 = ROW_FACTOR * mpi_size;
+ dim1 = COL_FACTOR * mpi_size;
+
+ if (MAINPROCESS) {
+ HDprintf("===================================\n");
+ HDprintf("Shape Same Tests Start\n");
+ HDprintf(" express_test = %d.\n", EXPRESS_MODE /* GetTestExpress() */);
+ HDprintf("===================================\n");
+ }
+
+ /* Attempt to turn off atexit post processing so that in case errors
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0) {
+ if (MAINPROCESS)
+ HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
+ };
+ H5open();
+ /* h5_show_hostname(); */
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+
+ /* Get the capability flag of the VOL connector being used */
+ if (H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g) < 0) {
+ if (MAINPROCESS)
+ HDprintf("Failed to get the capability flag of the VOL connector being used\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
+ /* Make sure the connector supports the API functions being tested. This test only
+ * uses a few API functions, such as H5Fcreate/close/delete, H5Dcreate/write/read/close,
+ */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS)
+ HDprintf("API functions for basic file and dataset aren't supported with this connector\n");
+
+ MPI_Finalize();
+ return 0;
+ }
+
+#if 0
+ HDmemset(filenames, 0, sizeof(filenames));
+ for (int i = 0; i < NFILENAME; i++) {
+ if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) {
+ HDprintf("couldn't allocate filename array\n");
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+ }
+#endif
+
+ /* Initialize testing framework */
+ /* TestInit(argv[0], usage, parse_options); */
+
+ if (parse_options(argc, argv)) {
+ usage();
+ return 1;
+ }
+
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
+ HDprintf("===================================\n"
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
+ }
+
+ /* Shape Same tests using contiguous hyperslab */
+#if 0
+ AddTest("sscontig1", sscontig1, NULL,
+ "Cntg hslab, ind IO, cntg dsets", filenames[0]);
+ AddTest("sscontig2", sscontig2, NULL,
+ "Cntg hslab, col IO, cntg dsets", filenames[0]);
+ AddTest("sscontig3", sscontig3, NULL,
+ "Cntg hslab, ind IO, chnk dsets", filenames[0]);
+ AddTest("sscontig4", sscontig4, NULL,
+ "Cntg hslab, col IO, chnk dsets", filenames[0]);
+#endif
+ if (MAINPROCESS) {
+ printf("Cntg hslab, ind IO, cntg dsets\n");
+ fflush(stdout);
+ }
+ sscontig1();
+ if (MAINPROCESS) {
+ printf("Cntg hslab, col IO, cntg dsets\n");
+ fflush(stdout);
+ }
+ sscontig2();
+ if (MAINPROCESS) {
+ printf("Cntg hslab, ind IO, chnk dsets\n");
+ fflush(stdout);
+ }
+ sscontig3();
+ if (MAINPROCESS) {
+ printf("Cntg hslab, col IO, chnk dsets\n");
+ fflush(stdout);
+ }
+ sscontig4();
+
+ /* Shape Same tests using checker board hyperslab */
+#if 0
+ AddTest("sschecker1", sschecker1, NULL,
+ "Check hslab, ind IO, cntg dsets", filenames[0]);
+ AddTest("sschecker2", sschecker2, NULL,
+ "Check hslab, col IO, cntg dsets", filenames[0]);
+ AddTest("sschecker3", sschecker3, NULL,
+ "Check hslab, ind IO, chnk dsets", filenames[0]);
+ AddTest("sschecker4", sschecker4, NULL,
+ "Check hslab, col IO, chnk dsets", filenames[0]);
+#endif
+ if (MAINPROCESS) {
+ printf("Check hslab, ind IO, cntg dsets\n");
+ fflush(stdout);
+ }
+ sschecker1();
+ if (MAINPROCESS) {
+ printf("Check hslab, col IO, cntg dsets\n");
+ fflush(stdout);
+ }
+ sschecker2();
+ if (MAINPROCESS) {
+ printf("Check hslab, ind IO, chnk dsets\n");
+ fflush(stdout);
+ }
+ sschecker3();
+ if (MAINPROCESS) {
+ printf("Check hslab, col IO, chnk dsets\n");
+ fflush(stdout);
+ }
+ sschecker4();
+
+ /* Display testing information */
+ /* TestInfo(argv[0]); */
+
+ /* setup file access property list */
+ H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* Parse command line arguments */
+ /* TestParseCmdLine(argc, argv); */
+
+ /* Perform requested testing */
+ /* PerformTests(); */
+
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Display test summary, if requested */
+ /* if (MAINPROCESS && GetTestSummary())
+ TestSummary(); */
+
+ /* Clean up test files */
+ /* h5_clean_files(FILENAME, fapl); */
+ H5Fdelete(FILENAME[0], fapl);
+ H5Pclose(fapl);
+
+ /* nerrors += GetTestNumErrs(); */
+
+ /* Gather errors from all processes */
+ {
+ int temp;
+ MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
+ nerrors = temp;
+ }
+
+ if (MAINPROCESS) { /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrors)
+ HDprintf("***Shape Same tests detected %d errors***\n", nerrors);
+ else
+ HDprintf("Shape Same tests finished successfully\n");
+ HDprintf("===================================\n");
+ }
+
+#if 0
+ for (int i = 0; i < NFILENAME; i++) {
+ HDfree(filenames[i]);
+ filenames[i] = NULL;
+ }
+#endif
+
+ /* close HDF5 library */
+ H5close();
+
+ /* Release test infrastructure */
+ /* TestShutdown(); */
+
+ MPI_Finalize();
+
+ /* cannot just return (nerrors) because exit code is limited to 1byte */
+ return (nerrors != 0);
+}
diff --git a/testpar/API/t_span_tree.c b/testpar/API/t_span_tree.c
new file mode 100644
index 0000000..5aafb0b
--- /dev/null
+++ b/testpar/API/t_span_tree.c
@@ -0,0 +1,2622 @@
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ This program will test irregular hyperslab selections with collective write and read.
+ The way to test whether collective write and read works is to use independent IO
+ output to verify the collective output.
+
+ 1) We will write two datasets with the same hyperslab selection settings;
+ one in independent mode,
+ one in collective mode,
+ 2) We will read two datasets with the same hyperslab selection settings,
+ 1. independent read to read independent output,
+ independent read to read collecive output,
+ Compare the result,
+ If the result is the same, then collective write succeeds.
+ 2. collective read to read independent output,
+ independent read to read independent output,
+ Compare the result,
+ If the result is the same, then collective read succeeds.
+
+ */
+
+#include "hdf5.h"
+#if 0
+#include "H5private.h"
+#endif
+#include "testphdf5.h"
+
+#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
+
+static void coll_write_test(int chunk_factor);
+static void coll_read_test(void);
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_cont_write
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in
+ * contiguous storage
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_cont_write(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_write_test(0);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_cont_read
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in
+ * contiguous storage
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_cont_read(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_read_test();
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_simple_chunk_write
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in
+ * chunk storage(1 chunk)
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_simple_chunk_write(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_write_test(1);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_simple_chunk_read
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
+ * storage(1 chunk)
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_simple_chunk_read(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_read_test();
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_complex_chunk_write
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk
+ * storage(4 chunks)
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_complex_chunk_write(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_write_test(4);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_irregular_complex_chunk_read
+ *
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
+ * storage(1 chunk)
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_irregular_complex_chunk_read(void)
+{
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC) ||
+ !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_MORE)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file dataset, or dataset more aren't supported with this "
+ "connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ coll_read_test();
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_write_test
+ *
+ * Purpose: To test the collectively irregular hyperslab write in chunk
+ * storage
+ * Input: number of chunks on each dimension
+ * if number is equal to 0, contiguous storage
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+coll_write_test(int chunk_factor)
+{
+
+ const char *filename;
+ hid_t facc_plist, dxfer_plist, dcrt_plist;
+ hid_t file, datasetc, dataseti; /* File and dataset identifiers */
+ hid_t mspaceid1, mspaceid, fspaceid, fspaceid1; /* Dataspace identifiers */
+
+ hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */
+ hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */
+ hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
+ * read selection from the dataset on the disk
+ */
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+ hsize_t chunk_dims[2];
+
+ herr_t ret;
+ int i;
+ int fillvalue = 0; /* Fill value for the dataset */
+
+ int *matrix_out = NULL;
+ int *matrix_out1 = NULL; /* Buffer to read from the dataset */
+ int *vector = NULL;
+
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Obtain file name */
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ /*
+ * Buffers' initialization.
+ */
+
+ mdim1[0] = (hsize_t)(MSPACE1_DIM * mpi_size);
+ mdim[0] = MSPACE_DIM1;
+ mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
+ fsdim[0] = FSPACE_DIM1;
+ fsdim[1] = (hsize_t)(FSPACE_DIM2 * mpi_size);
+
+ vector = (int *)HDmalloc(sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
+ matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
+ matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)mdim[0] * (size_t)mdim[1] * (size_t)mpi_size);
+
+ HDmemset(vector, 0, sizeof(int) * (size_t)mdim1[0] * (size_t)mpi_size);
+ vector[0] = vector[MSPACE1_DIM * mpi_size - 1] = -1;
+ for (i = 1; i < MSPACE1_DIM * mpi_size - 1; i++)
+ vector[i] = (int)i;
+
+ /* Grab file access property list */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ /*
+ * Create a file.
+ */
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, facc_plist);
+ VRFY((file >= 0), "H5Fcreate succeeded");
+
+ /*
+ * Create property list for a dataset and set up fill values.
+ */
+ dcrt_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcrt_plist >= 0), "");
+
+ ret = H5Pset_fill_value(dcrt_plist, H5T_NATIVE_INT, &fillvalue);
+ VRFY((ret >= 0), "Fill value creation property list succeeded");
+
+ if (chunk_factor != 0) {
+ chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor;
+ chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor;
+ ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims);
+ VRFY((ret >= 0), "chunk creation property list succeeded");
+ }
+
+ /*
+ *
+ * Create dataspace for the first dataset in the disk.
+ * dim1 = 9
+ * dim2 = 3600
+ *
+ *
+ */
+ fspaceid = H5Screate_simple(FSPACE_RANK, fsdim, NULL);
+ VRFY((fspaceid >= 0), "file dataspace created succeeded");
+
+ /*
+ * Create dataset in the file. Notice that creation
+ * property list dcrt_plist is used.
+ */
+ datasetc =
+ H5Dcreate2(file, "collect_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
+ VRFY((datasetc >= 0), "dataset created succeeded");
+
+ dataseti =
+ H5Dcreate2(file, "independ_write", H5T_NATIVE_INT, fspaceid, H5P_DEFAULT, dcrt_plist, H5P_DEFAULT);
+ VRFY((dataseti >= 0), "dataset created succeeded");
+
+ /* The First selection for FILE
+ *
+ * block (3,2)
+ * stride(4,3)
+ * count (1,768/mpi_size)
+ * start (0,1+768*3*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = FHSTART0;
+ start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1);
+ stride[0] = FHSTRIDE0;
+ stride[1] = FHSTRIDE1;
+ count[0] = FHCOUNT0;
+ count[1] = FHCOUNT1;
+ block[0] = FHBLOCK0;
+ block[1] = FHBLOCK1;
+
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The Second selection for FILE
+ *
+ * block (3,768)
+ * stride (1,1)
+ * count (1,1)
+ * start (4,768*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = SHSTART0;
+ start[1] = (hsize_t)(SHSTART1 + SHCOUNT1 * SHBLOCK1 * mpi_rank);
+ stride[0] = SHSTRIDE0;
+ stride[1] = SHSTRIDE1;
+ count[0] = SHCOUNT0;
+ count[1] = SHCOUNT1;
+ block[0] = SHBLOCK0;
+ block[1] = SHBLOCK1;
+
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create dataspace for the first dataset in the memory
+ * dim1 = 27000
+ *
+ */
+ mspaceid1 = H5Screate_simple(MSPACE1_RANK, mdim1, NULL);
+ VRFY((mspaceid1 >= 0), "memory dataspace created succeeded");
+
+ /*
+ * Memory space is 1-D, this is a good test to check
+ * whether a span-tree derived datatype needs to be built.
+ * block 1
+ * stride 1
+ * count 6912/mpi_size
+ * start 1
+ *
+ */
+ start[0] = MHSTART0;
+ stride[0] = MHSTRIDE0;
+ count[0] = MHCOUNT0;
+ block[0] = MHBLOCK0;
+
+ ret = H5Sselect_hyperslab(mspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* independent write */
+ ret = H5Dwrite(dataseti, H5T_NATIVE_INT, mspaceid1, fspaceid, H5P_DEFAULT, vector);
+ VRFY((ret >= 0), "dataset independent write succeed");
+
+ dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxfer_plist >= 0), "");
+
+ ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "MPIO data transfer property list succeed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* collective write */
+ ret = H5Dwrite(datasetc, H5T_NATIVE_INT, mspaceid1, fspaceid, dxfer_plist, vector);
+ VRFY((ret >= 0), "dataset collective write succeed");
+
+ ret = H5Sclose(mspaceid1);
+ VRFY((ret >= 0), "");
+
+ ret = H5Sclose(fspaceid);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(datasetc);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+ /*
+ * Close property list
+ */
+
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+ ret = H5Pclose(dxfer_plist);
+ VRFY((ret >= 0), "");
+ ret = H5Pclose(dcrt_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Open the file.
+ */
+
+ /***
+
+ For testing collective hyperslab selection write
+ In this test, we are using independent read to check
+ the correctedness of collective write compared with
+ independent write,
+
+ In order to thoroughly test this feature, we choose
+ a different selection set for reading the data out.
+
+
+ ***/
+
+ /* Obtain file access property list with MPI-IO driver */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
+ VRFY((file >= 0), "H5Fopen succeeded");
+
+ /*
+ * Open the dataset.
+ */
+ datasetc = H5Dopen2(file, "collect_write", H5P_DEFAULT);
+ VRFY((datasetc >= 0), "H5Dopen2 succeeded");
+
+ dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT);
+ VRFY((dataseti >= 0), "H5Dopen2 succeeded");
+
+ /*
+ * Get dataspace of the open dataset.
+ */
+ fspaceid = H5Dget_space(datasetc);
+ VRFY((fspaceid >= 0), "file dataspace obtained succeeded");
+
+ fspaceid1 = H5Dget_space(dataseti);
+ VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded");
+
+ /* The First selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (1,2+768*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFFHSTART0;
+ start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
+ block[0] = RFFHBLOCK0;
+ block[1] = RFFHBLOCK1;
+ stride[0] = RFFHSTRIDE0;
+ stride[1] = RFFHSTRIDE1;
+ count[0] = RFFHCOUNT0;
+ count[1] = RFFHCOUNT1;
+
+ /* The first selection of the dataset generated by collective write */
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The first selection of the dataset generated by independent write */
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The Second selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,1536/mpi_size)
+ * start (2,4+1536*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RFSHSTART0;
+ start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
+ block[0] = RFSHBLOCK0;
+ block[1] = RFSHBLOCK1;
+ stride[0] = RFSHSTRIDE0;
+ stride[1] = RFSHSTRIDE0;
+ count[0] = RFSHCOUNT0;
+ count[1] = RFSHCOUNT1;
+
+ /* The second selection of the dataset generated by collective write */
+ ret = H5Sselect_hyperslab(fspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The second selection of the dataset generated by independent write */
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create memory dataspace.
+ * rank = 2
+ * mdim1 = 9
+ * mdim2 = 3600
+ *
+ */
+ mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The first selection
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (0,768*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RMFHSTART0;
+ start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
+ block[0] = RMFHBLOCK0;
+ block[1] = RMFHBLOCK1;
+ stride[0] = RMFHSTRIDE0;
+ stride[1] = RMFHSTRIDE1;
+ count[0] = RMFHCOUNT0;
+ count[1] = RMFHCOUNT1;
+
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The second selection
+ * block (1,1)
+ * stride(1,1)
+ * count (3,1536/mpi_size)
+ * start (1,2+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RMSHSTART0;
+ start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
+ block[0] = RMSHBLOCK0;
+ block[1] = RMSHBLOCK1;
+ stride[0] = RMSHSTRIDE0;
+ stride[1] = RMSHSTRIDE1;
+ count[0] = RMSHCOUNT0;
+ count[1] = RMSHCOUNT1;
+
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Initialize data buffer.
+ */
+
+ HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ /*
+ * Read data back to the buffer matrix_out.
+ */
+
+ ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid, H5P_DEFAULT, matrix_out1);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = 0;
+
+ for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) {
+ if (matrix_out[i] != matrix_out1[i])
+ ret = -1;
+ if (ret < 0)
+ break;
+ }
+
+ VRFY((ret >= 0), "H5D irregular collective write succeed");
+
+ /*
+ * Close memory file and memory dataspaces.
+ */
+ ret = H5Sclose(mspaceid);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(fspaceid);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(datasetc);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close property list
+ */
+
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+
+ if (vector)
+ HDfree(vector);
+ if (matrix_out)
+ HDfree(matrix_out);
+ if (matrix_out1)
+ HDfree(matrix_out1);
+
+ return;
+}
+
+/*-------------------------------------------------------------------------
+ * Function: coll_read_test
+ *
+ * Purpose: To test the collectively irregular hyperslab read in chunk
+ * storage
+ * Input: number of chunks on each dimension
+ * if number is equal to 0, contiguous storage
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * Dec 2nd, 2004
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+coll_read_test(void)
+{
+
+ const char *filename;
+ hid_t facc_plist, dxfer_plist;
+ hid_t file, dataseti; /* File and dataset identifiers */
+ hid_t mspaceid, fspaceid1; /* Dataspace identifiers */
+
+ /* Dimension sizes of the dataset (on disk) */
+ hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
+ * read selection from the dataset on the disk
+ */
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+ herr_t ret;
+
+ int i;
+
+ int *matrix_out;
+ int *matrix_out1; /* Buffer to read from the dataset */
+
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ /*set up MPI parameters */
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ /* Obtain file name */
+ filename = PARATESTFILE /* GetTestParameters() */;
+
+ /* Initialize the buffer */
+
+ mdim[0] = MSPACE_DIM1;
+ mdim[1] = (hsize_t)(MSPACE_DIM2 * mpi_size);
+ matrix_out = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ matrix_out1 = (int *)HDmalloc(sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+
+ /*** For testing collective hyperslab selection read ***/
+
+ /* Obtain file access property list */
+ facc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((facc_plist >= 0), "");
+
+ /*
+ * Open the file.
+ */
+ file = H5Fopen(filename, H5F_ACC_RDONLY, facc_plist);
+ VRFY((file >= 0), "H5Fopen succeeded");
+
+ /*
+ * Open the dataset.
+ */
+ dataseti = H5Dopen2(file, "independ_write", H5P_DEFAULT);
+ VRFY((dataseti >= 0), "H5Dopen2 succeeded");
+
+ /*
+ * Get dataspace of the open dataset.
+ */
+ fspaceid1 = H5Dget_space(dataseti);
+ VRFY((fspaceid1 >= 0), "file dataspace obtained succeeded");
+
+ /* The First selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (1,2+768*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFFHSTART0;
+ start[1] = (hsize_t)(RFFHSTART1 + mpi_rank * RFFHCOUNT1);
+ block[0] = RFFHBLOCK0;
+ block[1] = RFFHBLOCK1;
+ stride[0] = RFFHSTRIDE0;
+ stride[1] = RFFHSTRIDE1;
+ count[0] = RFFHCOUNT0;
+ count[1] = RFFHCOUNT1;
+
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /* The Second selection for FILE to read
+ *
+ * block (1,1)
+ * stride(1.1)
+ * count (3,1536/mpi_size)
+ * start (2,4+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RFSHSTART0;
+ start[1] = (hsize_t)(RFSHSTART1 + RFSHCOUNT1 * mpi_rank);
+ block[0] = RFSHBLOCK0;
+ block[1] = RFSHBLOCK1;
+ stride[0] = RFSHSTRIDE0;
+ stride[1] = RFSHSTRIDE0;
+ count[0] = RFSHCOUNT0;
+ count[1] = RFSHCOUNT1;
+
+ ret = H5Sselect_hyperslab(fspaceid1, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Create memory dataspace.
+ */
+ mspaceid = H5Screate_simple(MSPACE_RANK, mdim, NULL);
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace.
+ * Only the starting point is different.
+ * The first selection
+ * block (1,1)
+ * stride(1.1)
+ * count (3,768/mpi_size)
+ * start (0,768*mpi_rank/mpi_size)
+ *
+ */
+
+ start[0] = RMFHSTART0;
+ start[1] = (hsize_t)(RMFHSTART1 + mpi_rank * RMFHCOUNT1);
+ block[0] = RMFHBLOCK0;
+ block[1] = RMFHBLOCK1;
+ stride[0] = RMFHSTRIDE0;
+ stride[1] = RMFHSTRIDE1;
+ count[0] = RMFHCOUNT0;
+ count[1] = RMFHCOUNT1;
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Select two hyperslabs in memory. Hyperslabs has the same
+ * size and shape as the selected hyperslabs for the file dataspace
+ * Only the starting point is different.
+ * The second selection
+ * block (1,1)
+ * stride(1,1)
+ * count (3,1536/mpi_size)
+ * start (1,2+1536*mpi_rank/mpi_size)
+ *
+ */
+ start[0] = RMSHSTART0;
+ start[1] = (hsize_t)(RMSHSTART1 + mpi_rank * RMSHCOUNT1);
+ block[0] = RMSHBLOCK0;
+ block[1] = RMSHBLOCK1;
+ stride[0] = RMSHSTRIDE0;
+ stride[1] = RMSHSTRIDE1;
+ count[0] = RMSHCOUNT0;
+ count[1] = RMSHCOUNT1;
+ ret = H5Sselect_hyperslab(mspaceid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "hyperslab selection succeeded");
+
+ /*
+ * Initialize data buffer.
+ */
+
+ HDmemset(matrix_out, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+ HDmemset(matrix_out1, 0, sizeof(int) * (size_t)MSPACE_DIM1 * (size_t)MSPACE_DIM2 * (size_t)mpi_size);
+
+ /*
+ * Read data back to the buffer matrix_out.
+ */
+
+ dxfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxfer_plist >= 0), "");
+
+ ret = H5Pset_dxpl_mpio(dxfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "MPIO data transfer property list succeed");
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(dxfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "set independent IO collectively succeeded");
+ }
+
+ /* Collective read */
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, dxfer_plist, matrix_out);
+ VRFY((ret >= 0), "H5D collecive read succeed");
+
+ ret = H5Pclose(dxfer_plist);
+ VRFY((ret >= 0), "");
+
+ /* Independent read */
+ ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1, H5P_DEFAULT, matrix_out1);
+ VRFY((ret >= 0), "H5D independent read succeed");
+
+ ret = 0;
+ for (i = 0; i < MSPACE_DIM1 * MSPACE_DIM2 * mpi_size; i++) {
+ if (matrix_out[i] != matrix_out1[i])
+ ret = -1;
+ if (ret < 0)
+ break;
+ }
+ VRFY((ret >= 0), "H5D contiguous irregular collective read succeed");
+
+ /*
+ * Free read buffers.
+ */
+ HDfree(matrix_out);
+ HDfree(matrix_out1);
+
+ /*
+ * Close memory file and memory dataspaces.
+ */
+ ret = H5Sclose(mspaceid);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(fspaceid1);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close dataset.
+ */
+ ret = H5Dclose(dataseti);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close property list
+ */
+ ret = H5Pclose(facc_plist);
+ VRFY((ret >= 0), "");
+
+ /*
+ * Close the file.
+ */
+ ret = H5Fclose(file);
+ VRFY((ret >= 0), "");
+
+ return;
+}
+
+/****************************************************************
+**
+** lower_dim_size_comp_test__select_checker_board():
+**
+** Given a dataspace of tgt_rank, and dimensions:
+**
+** (mpi_size + 1), edge_size, ... , edge_size
+**
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the dataspace parallel to the
+** sel_rank fastest changing indices, with origin (in the
+** higher indices) as indicated by the start array.
+**
+** Note that this function, is hard coded to presume a
+** maximum dataspace rank of 5.
+**
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
+** the value of the constant.
+**
+** JRM -- 11/11/09
+**
+****************************************************************/
+
+#define LDSCT_DS_RANK 5
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0
+#endif
+
+#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0
+
+static void
+lower_dim_size_comp_test__select_checker_board(const int mpi_rank, const hid_t tgt_sid, const int tgt_rank,
+ const hsize_t dims[LDSCT_DS_RANK], const int checker_edge_size,
+ const int sel_rank, hsize_t sel_start[])
+{
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char *fcnName = "lower_dim_size_comp_test__select_checker_board():";
+#endif
+ hbool_t first_selection = TRUE;
+ int i, j, k, l, m;
+ int ds_offset;
+ int sel_offset;
+ const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
+ /* this changes */
+ hsize_t base_count;
+ hsize_t offset_count;
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ herr_t ret; /* Generic return value */
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n", fcnName, mpi_rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4], checker_edge_size);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ HDassert(0 < checker_edge_size);
+ HDassert(0 < sel_rank);
+ HDassert(sel_rank <= tgt_rank);
+ HDassert(tgt_rank <= test_max_rank);
+ HDassert(test_max_rank <= LDSCT_DS_RANK);
+
+ sel_offset = test_max_rank - sel_rank;
+ HDassert(sel_offset >= 0);
+
+ ds_offset = test_max_rank - tgt_rank;
+ HDassert(ds_offset >= 0);
+ HDassert(ds_offset <= sel_offset);
+
+ HDassert((hsize_t)checker_edge_size <= dims[sel_offset]);
+ HDassert(dims[sel_offset] == 10);
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n", fcnName, mpi_rank, sel_rank, sel_offset);
+ HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n", fcnName, mpi_rank, tgt_rank, ds_offset);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* First, compute the base count (which assumes start == 0
+ * for the associated offset) and offset_count (which
+ * assumes start == checker_edge_size for the associated
+ * offset).
+ *
+ * Note that the following computation depends on the C99
+ * requirement that integer division discard any fraction
+ * (truncation towards zero) to function correctly. As we
+ * now require C99, this shouldn't be a problem, but noting
+ * it may save us some pain if we are ever obliged to support
+ * pre-C99 compilers again.
+ */
+
+ base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2);
+
+ if ((dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0) {
+
+ base_count++;
+ }
+
+ offset_count =
+ (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) / ((hsize_t)(checker_edge_size * 2)));
+
+ if (((dims[sel_rank] - (hsize_t)checker_edge_size) % ((hsize_t)(checker_edge_size * 2))) > 0) {
+
+ offset_count++;
+ }
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n", fcnName, mpi_rank, base_count,
+ offset_count);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Now set up the stride and block arrays, and portions of the start
+ * and count arrays that will not be altered during the selection of
+ * the checker board.
+ */
+ i = 0;
+ while (i < ds_offset) {
+
+ /* these values should never be used */
+ start[i] = 0;
+ stride[i] = 0;
+ count[i] = 0;
+ block[i] = 0;
+
+ i++;
+ }
+
+ while (i < sel_offset) {
+
+ start[i] = sel_start[i];
+ stride[i] = 2 * dims[i];
+ count[i] = 1;
+ block[i] = 1;
+
+ i++;
+ }
+
+ while (i < test_max_rank) {
+
+ stride[i] = (hsize_t)(2 * checker_edge_size);
+ block[i] = (hsize_t)checker_edge_size;
+
+ i++;
+ }
+
+ i = 0;
+ do {
+ if (0 >= sel_offset) {
+
+ if (i == 0) {
+
+ start[0] = 0;
+ count[0] = base_count;
+ }
+ else {
+
+ start[0] = (hsize_t)checker_edge_size;
+ count[0] = offset_count;
+ }
+ }
+
+ j = 0;
+ do {
+ if (1 >= sel_offset) {
+
+ if (j == 0) {
+
+ start[1] = 0;
+ count[1] = base_count;
+ }
+ else {
+
+ start[1] = (hsize_t)checker_edge_size;
+ count[1] = offset_count;
+ }
+ }
+
+ k = 0;
+ do {
+ if (2 >= sel_offset) {
+
+ if (k == 0) {
+
+ start[2] = 0;
+ count[2] = base_count;
+ }
+ else {
+
+ start[2] = (hsize_t)checker_edge_size;
+ count[2] = offset_count;
+ }
+ }
+
+ l = 0;
+ do {
+ if (3 >= sel_offset) {
+
+ if (l == 0) {
+
+ start[3] = 0;
+ count[3] = base_count;
+ }
+ else {
+
+ start[3] = (hsize_t)checker_edge_size;
+ count[3] = offset_count;
+ }
+ }
+
+ m = 0;
+ do {
+ if (4 >= sel_offset) {
+
+ if (m == 0) {
+
+ start[4] = 0;
+ count[4] = base_count;
+ }
+ else {
+
+ start[4] = (hsize_t)checker_edge_size;
+ count[4] = offset_count;
+ }
+ }
+
+ if (((i + j + k + l + m) % 2) == 0) {
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n", fcnName, mpi_rank,
+ (int)first_selection);
+ HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n", fcnName, mpi_rank, i,
+ j, k, l, m);
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)start[0], (int)start[1], (int)start[2], (int)start[3],
+ (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1], (int)stride[2], (int)stride[3],
+ (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)count[0], (int)count[1], (int)count[2], (int)count[3],
+ (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block = %d %d %d %d %d.\n", fcnName, mpi_rank,
+ (int)block[0], (int)block[1], (int)block[2], (int)block[3],
+ (int)block[4]);
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n", fcnName, mpi_rank,
+ H5Sget_simple_extent_ndims(tgt_sid));
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n", fcnName, mpi_rank,
+ sel_rank);
+ }
+#endif
+
+ if (first_selection) {
+
+ first_selection = FALSE;
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_SET, &(start[ds_offset]),
+ &(stride[ds_offset]), &(count[ds_offset]),
+ &(block[ds_offset]));
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
+ }
+ else {
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_OR, &(start[ds_offset]),
+ &(stride[ds_offset]), &(count[ds_offset]),
+ &(block[ds_offset]));
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
+ }
+ }
+
+ m++;
+
+ } while ((m <= 1) && (4 >= sel_offset));
+
+ l++;
+
+ } while ((l <= 1) && (3 >= sel_offset));
+
+ k++;
+
+ } while ((k <= 1) && (2 >= sel_offset));
+
+ j++;
+
+ } while ((j <= 1) && (1 >= sel_offset));
+
+ i++;
+
+ } while ((i <= 1) && (0 >= sel_offset));
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ /* Clip the selection back to the dataspace proper. */
+
+ for (i = 0; i < test_max_rank; i++) {
+
+ start[i] = 0;
+ stride[i] = dims[i];
+ count[i] = 1;
+ block[i] = dims[i];
+ }
+
+ ret = H5Sselect_hyperslab(tgt_sid, H5S_SELECT_AND, start, stride, count, block);
+
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(tgt_sid));
+ HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+
+ return;
+
+} /* lower_dim_size_comp_test__select_checker_board() */
+
+/****************************************************************
+**
+** lower_dim_size_comp_test__verify_data():
+**
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
+** otherwise.
+**
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
+**
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
+** rank) dimensional slice through this processes slice
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indices.
+**
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
+**
+** Thus for a 20x10x10 dataset, the value stored in location
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
+**
+** (10 * 10 * x) + (10 * y) + z
+**
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
+**
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
+**
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
+**
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube opposite the origin.)
+**
+****************************************************************/
+
+#define LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG 0
+
+static hbool_t
+lower_dim_size_comp_test__verify_data(uint32_t *buf_ptr,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ const int mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ const int rank, const int edge_size, const int checker_edge_size,
+ uint32_t first_expected_val, hbool_t buf_starts_in_checker)
+{
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ const char *fcnName = "lower_dim_size_comp_test__verify_data():";
+#endif
+ hbool_t good_data = TRUE;
+ hbool_t in_checker;
+ hbool_t start_in_checker[5];
+ uint32_t expected_value;
+ uint32_t *val_ptr;
+ int i, j, k, l, m; /* to track position in n-cube */
+ int v, w, x, y, z; /* to track position in checker */
+ const int test_max_rank = 5; /* code changes needed if this is increased */
+
+ HDassert(buf_ptr != NULL);
+ HDassert(0 < rank);
+ HDassert(rank <= test_max_rank);
+ HDassert(edge_size >= 6);
+ HDassert(0 < checker_edge_size);
+ HDassert(checker_edge_size <= edge_size);
+ HDassert(test_max_rank <= LDSCT_DS_RANK);
+
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
+ HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n", fcnName, checker_edge_size);
+ HDfprintf(stdout, "%s first_expected_val = %d.\n", fcnName, (int)first_expected_val);
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n", fcnName, (int)buf_starts_in_checker);
+ }
+#endif
+
+ val_ptr = buf_ptr;
+ expected_value = first_expected_val;
+
+ i = 0;
+ v = 0;
+ start_in_checker[0] = buf_starts_in_checker;
+ do {
+ if (v >= checker_edge_size) {
+
+ start_in_checker[0] = !start_in_checker[0];
+ v = 0;
+ }
+
+ j = 0;
+ w = 0;
+ start_in_checker[1] = start_in_checker[0];
+ do {
+ if (w >= checker_edge_size) {
+
+ start_in_checker[1] = !start_in_checker[1];
+ w = 0;
+ }
+
+ k = 0;
+ x = 0;
+ start_in_checker[2] = start_in_checker[1];
+ do {
+ if (x >= checker_edge_size) {
+
+ start_in_checker[2] = !start_in_checker[2];
+ x = 0;
+ }
+
+ l = 0;
+ y = 0;
+ start_in_checker[3] = start_in_checker[2];
+ do {
+ if (y >= checker_edge_size) {
+
+ start_in_checker[3] = !start_in_checker[3];
+ y = 0;
+ }
+
+ m = 0;
+ z = 0;
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
+ }
+#endif
+ in_checker = start_in_checker[3];
+ do {
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, " %d", (int)(*val_ptr));
+ }
+#endif
+ if (z >= checker_edge_size) {
+
+ in_checker = !in_checker;
+ z = 0;
+ }
+
+ if (in_checker) {
+
+ if (*val_ptr != expected_value) {
+
+ good_data = FALSE;
+ }
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+ else if (*val_ptr != 0) {
+
+ good_data = FALSE;
+
+ /* zero out buffer for re-use */
+ *val_ptr = 0;
+ }
+
+ val_ptr++;
+ expected_value++;
+ m++;
+ z++;
+
+ } while ((rank >= (test_max_rank - 4)) && (m < edge_size));
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "\n");
+ }
+#endif
+ l++;
+ y++;
+ } while ((rank >= (test_max_rank - 3)) && (l < edge_size));
+ k++;
+ x++;
+ } while ((rank >= (test_max_rank - 2)) && (k < edge_size));
+ j++;
+ w++;
+ } while ((rank >= (test_max_rank - 1)) && (j < edge_size));
+ i++;
+ v++;
+ } while ((rank >= test_max_rank) && (i < edge_size));
+
+ return (good_data);
+
+} /* lower_dim_size_comp_test__verify_data() */
+
+/*-------------------------------------------------------------------------
+ * Function: lower_dim_size_comp_test__run_test()
+ *
+ * Purpose: Verify that a bug in the computation of the size of the
+ * lower dimensions of a dataspace in H5S_obtain_datatype()
+ * has been corrected.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 11/11/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define LDSCT_DS_RANK 5
+
+static void
+lower_dim_size_comp_test__run_test(const int chunk_edge_size, const hbool_t use_collective_io,
+ const hid_t dset_type)
+{
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ const char *fcnName = "lower_dim_size_comp_test__run_test()";
+ int rank;
+ hsize_t dims[32];
+ hsize_t max_dims[32];
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ const char *filename;
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
+ int i;
+ int start_index;
+ int stop_index;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist = H5P_DEFAULT;
+ size_t small_ds_size;
+ size_t small_ds_slice_size;
+ size_t large_ds_size;
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ size_t large_ds_slice_size;
+#endif
+ uint32_t expected_value;
+ uint32_t *small_ds_buf_0 = NULL;
+ uint32_t *small_ds_buf_1 = NULL;
+ uint32_t *large_ds_buf_0 = NULL;
+ uint32_t *large_ds_buf_1 = NULL;
+ uint32_t *ptr_0;
+ uint32_t *ptr_1;
+ hsize_t small_chunk_dims[LDSCT_DS_RANK];
+ hsize_t large_chunk_dims[LDSCT_DS_RANK];
+ hsize_t small_dims[LDSCT_DS_RANK];
+ hsize_t large_dims[LDSCT_DS_RANK];
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ hsize_t small_sel_start[LDSCT_DS_RANK];
+ hsize_t large_sel_start[LDSCT_DS_RANK];
+ hid_t full_mem_small_ds_sid;
+ hid_t full_file_small_ds_sid;
+ hid_t mem_small_ds_sid;
+ hid_t file_small_ds_sid;
+ hid_t full_mem_large_ds_sid;
+ hid_t full_file_large_ds_sid;
+ hid_t mem_large_ds_sid;
+ hid_t file_large_ds_sid;
+ hid_t small_ds_dcpl_id = H5P_DEFAULT;
+ hid_t large_ds_dcpl_id = H5P_DEFAULT;
+ hid_t small_dataset; /* Dataset ID */
+ hid_t large_dataset; /* Dataset ID */
+ htri_t check; /* Shape comparison return value */
+ herr_t ret; /* Generic return value */
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = MPI_COMM_WORLD;
+ mpi_info = MPI_INFO_NULL;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n", fcnName, mpi_rank, (int)chunk_edge_size);
+ HDfprintf(stdout, "%s:%d: use_collective_io = %d.\n", fcnName, mpi_rank, (int)use_collective_io);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10);
+ small_ds_slice_size = (size_t)(1 * 1 * 10 * 10);
+ large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10);
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ large_ds_slice_size = (size_t)(10 * 10 * 10 * 10);
+
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n", fcnName, mpi_rank,
+ (int)small_ds_size, (int)small_ds_slice_size);
+ HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n", fcnName, mpi_rank,
+ (int)large_ds_size, (int)large_ds_slice_size);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* Allocate buffers */
+ small_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_0 != NULL), "malloc of small_ds_buf_0 succeeded");
+
+ small_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * small_ds_size);
+ VRFY((small_ds_buf_1 != NULL), "malloc of small_ds_buf_1 succeeded");
+
+ large_ds_buf_0 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_0 != NULL), "malloc of large_ds_buf_0 succeeded");
+
+ large_ds_buf_1 = (uint32_t *)HDmalloc(sizeof(uint32_t) * large_ds_size);
+ VRFY((large_ds_buf_1 != NULL), "malloc of large_ds_buf_1 succeeded");
+
+ /* initialize the buffers */
+
+ ptr_0 = small_ds_buf_0;
+ ptr_1 = small_ds_buf_1;
+
+ for (i = 0; i < (int)small_ds_size; i++) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ }
+
+ ptr_0 = large_ds_buf_0;
+ ptr_1 = large_ds_buf_1;
+
+ for (i = 0; i < (int)large_ds_size; i++) {
+
+ *ptr_0 = (uint32_t)i;
+ *ptr_1 = 0;
+
+ ptr_0++;
+ ptr_1++;
+ }
+
+ /* get the file name */
+
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+ /* setup dims: */
+ small_dims[0] = (hsize_t)(mpi_size + 1);
+ small_dims[1] = 1;
+ small_dims[2] = 1;
+ small_dims[3] = 10;
+ small_dims[4] = 10;
+
+ large_dims[0] = (hsize_t)(mpi_size + 1);
+ large_dims[1] = 10;
+ large_dims[2] = 10;
+ large_dims[3] = 10;
+ large_dims[4] = 10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)small_dims[0],
+ (int)small_dims[1], (int)small_dims[2], (int)small_dims[3], (int)small_dims[4]);
+ HDfprintf(stdout, "%s:%d: large_dims[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)large_dims[0],
+ (int)large_dims[1], (int)large_dims[2], (int)large_dims[3], (int)large_dims[4]);
+ }
+#endif
+
+ /* create dataspaces */
+
+ full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((full_mem_small_ds_sid != 0), "H5Screate_simple() full_mem_small_ds_sid succeeded");
+
+ full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((full_file_small_ds_sid != 0), "H5Screate_simple() full_file_small_ds_sid succeeded");
+
+ mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((mem_small_ds_sid != 0), "H5Screate_simple() mem_small_ds_sid succeeded");
+
+ file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
+ VRFY((file_small_ds_sid != 0), "H5Screate_simple() file_small_ds_sid succeeded");
+
+ full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((full_mem_large_ds_sid != 0), "H5Screate_simple() full_mem_large_ds_sid succeeded");
+
+ full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((full_file_large_ds_sid != 0), "H5Screate_simple() full_file_large_ds_sid succeeded");
+
+ mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((mem_large_ds_sid != 0), "H5Screate_simple() mem_large_ds_sid succeeded");
+
+ file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
+ VRFY((file_large_ds_sid != 0), "H5Screate_simple() file_large_ds_sid succeeded");
+
+ /* Select the entire extent of the full small ds dataspaces */
+ ret = H5Sselect_all(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_small_ds_sid) succeeded");
+
+ /* Select the entire extent of the full large ds dataspaces */
+ ret = H5Sselect_all(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sselect_all(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(full_file_large_ds_sid) succeeded");
+
+ /* if chunk edge size is greater than zero, set up the small and
+ * large data set creation property lists to specify chunked
+ * datasets.
+ */
+ if (chunk_edge_size > 0) {
+
+ small_chunk_dims[0] = (hsize_t)(1);
+ small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1;
+ small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank,
+ (int)small_chunk_dims[0], (int)small_chunk_dims[1], (int)small_chunk_dims[2],
+ (int)small_chunk_dims[3], (int)small_chunk_dims[4]);
+ }
+#endif
+
+ small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(small_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() small_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(small_ds_dcpl_id, 5, small_chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+ large_chunk_dims[0] = (hsize_t)(1);
+ large_chunk_dims[1] = large_chunk_dims[2] = large_chunk_dims[3] = large_chunk_dims[4] =
+ (hsize_t)chunk_edge_size;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n", fcnName, mpi_rank,
+ (int)large_chunk_dims[0], (int)large_chunk_dims[1], (int)large_chunk_dims[2],
+ (int)large_chunk_dims[3], (int)large_chunk_dims[4]);
+ }
+#endif
+
+ large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(large_ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() large_ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(large_ds_dcpl_id, 5, large_chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() large_ds_dcpl_id succeeded");
+ }
+
+ /* create the small dataset */
+ small_dataset = H5Dcreate2(fid, "small_dataset", dset_type, file_small_ds_sid, H5P_DEFAULT,
+ small_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Dcreate2() small_dataset succeeded");
+
+ /* create the large dataset */
+ large_dataset = H5Dcreate2(fid, "large_dataset", dset_type, file_large_ds_sid, H5P_DEFAULT,
+ large_ds_dcpl_id, H5P_DEFAULT);
+ VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: small/large ds id = %d / %d.\n", fcnName, mpi_rank, (int)small_dataset,
+ (int)large_dataset);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* setup xfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ if (!use_collective_io) {
+
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_collective_opt() succeeded");
+ }
+
+ /* setup selection to write initial data to the small data sets */
+ start[0] = (hsize_t)(mpi_rank + 1);
+ start[1] = start[2] = start[3] = start[4] = 0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = 2;
+ stride[3] = stride[4] = 2 * 10;
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = 1;
+
+ block[0] = block[1] = block[2] = 1;
+ block[3] = block[4] = 10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: settings for small data set initialization.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* setup selections for writing initial data to the small data set */
+ ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, set) succeeded");
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
+
+ start[0] = 0;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_small_ds_sid, or) succeeded");
+
+ ret = H5Sselect_hyperslab(file_small_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid, or) succeeded");
+ }
+
+ check = H5Sselect_valid(mem_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(mem_small_ds_sid) returns TRUE");
+
+ check = H5Sselect_valid(file_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(file_small_ds_sid) returns TRUE");
+
+ /* write the initial value of the small data set to file */
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ ret = H5Dwrite(small_dataset, dset_type, mem_small_ds_sid, file_small_ds_sid, xfer_plist, small_ds_buf_0);
+ VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
+
+ /* sync with the other processes before reading data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
+
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set and verifies it.
+ */
+ ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, full_mem_small_ds_sid, full_file_small_ds_sid, xfer_plist,
+ small_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() small_dataset initial read succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after small dataset writes");
+
+ /* verify that the correct data was written to the small data set,
+ * and reset the buffer to zero in passing.
+ */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = small_ds_buf_1;
+
+ i = 0;
+ for (i = 0; i < (int)small_ds_size; i++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = (uint32_t)0;
+
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY((mis_match == FALSE), "small ds init data good.");
+
+ /* setup selections for writing initial data to the large data set */
+ start[0] = (hsize_t)(mpi_rank + 1);
+ start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10);
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1;
+
+ block[0] = (hsize_t)1;
+ block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: settings for large data set initialization.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, set) succeeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(mem_large_ds_sid));
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(file_large_ds_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ if (MAINPROCESS) { /* add an additional slice to the selections */
+
+ start[0] = (hsize_t)0;
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: added settings for main process.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)start[0],
+ (int)start[1], (int)start[2], (int)start[3], (int)start[4]);
+ HDfprintf(stdout, "%s:%d: stride[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)stride[0],
+ (int)stride[1], (int)stride[2], (int)stride[3], (int)stride[4]);
+ HDfprintf(stdout, "%s:%d: count[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)count[0],
+ (int)count[1], (int)count[2], (int)count[3], (int)count[4]);
+ HDfprintf(stdout, "%s:%d: block[] = %d %d %d %d %d\n", fcnName, mpi_rank, (int)block[0],
+ (int)block[1], (int)block[2], (int)block[3], (int)block[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(mem_large_ds_sid, or) succeeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(mem_large_ds_sid));
+ HDfprintf(stdout, "%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n", fcnName, mpi_rank,
+ (int)H5Sget_select_npoints(file_large_ds_sid));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+ }
+
+ /* try clipping the selection back to the large dataspace proper */
+ start[0] = start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
+
+ stride[0] = (hsize_t)(2 * (mpi_size + 1));
+ stride[1] = stride[2] = stride[3] = stride[4] = (hsize_t)(2 * 10);
+
+ count[0] = count[1] = count[2] = count[3] = count[4] = (hsize_t)1;
+
+ block[0] = (hsize_t)(mpi_size + 1);
+ block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
+
+ ret = H5Sselect_hyperslab(mem_large_ds_sid, H5S_SELECT_AND, start, stride, count, block);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(mem_large_ds_sid, and) succeeded");
+
+ ret = H5Sselect_hyperslab(file_large_ds_sid, H5S_SELECT_AND, start, stride, count, block);
+ VRFY((ret != FAIL), "H5Sselect_hyperslab(file_large_ds_sid, and) succeeded");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+
+ rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims);
+ HDfprintf(stdout, "%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]);
+
+ rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims);
+ HDfprintf(stdout, "%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n", fcnName, mpi_rank, rank,
+ (int)dims[0], (int)dims[1], (int)dims[2], (int)dims[3], (int)dims[4]);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ check = H5Sselect_valid(mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(mem_large_ds_sid) returns TRUE");
+
+ check = H5Sselect_valid(file_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_valid(file_large_ds_sid) returns TRUE");
+
+ /* write the initial value of the large data set to file */
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n", fcnName, mpi_rank);
+ HDfprintf(stdout, "%s:%d: large_dataset = %d.\n", fcnName, mpi_rank, (int)large_dataset);
+ HDfprintf(stdout, "%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n", fcnName, mpi_rank,
+ (int)mem_large_ds_sid, (int)file_large_ds_sid);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ ret = H5Dwrite(large_dataset, dset_type, mem_large_ds_sid, file_large_ds_sid, xfer_plist, large_ds_buf_0);
+
+ if (ret < 0)
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after large dataset writes");
+
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
+ * data set.
+ */
+ ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, full_mem_large_ds_sid, full_file_large_ds_sid, xfer_plist,
+ large_ds_buf_1);
+ VRFY((ret >= 0), "H5Dread() large_dataset initial read succeeded");
+
+ /* verify that the correct data was written to the large data set.
+ * in passing, reset the buffer to zeros
+ */
+ expected_value = 0;
+ mis_match = FALSE;
+ ptr_1 = large_ds_buf_1;
+
+ i = 0;
+ for (i = 0; i < (int)large_ds_size; i++) {
+
+ if (*ptr_1 != expected_value) {
+
+ mis_match = TRUE;
+ }
+
+ *ptr_1 = (uint32_t)0;
+
+ ptr_1++;
+ expected_value++;
+ }
+ VRFY((mis_match == FALSE), "large ds init data good.");
+
+ /***********************************/
+ /***** INITIALIZATION COMPLETE *****/
+ /***********************************/
+
+ /* read a checkerboard selection of the process slice of the
+ * small on disk data set into the process slice of the large
+ * in memory data set, and verify the data read.
+ */
+
+ small_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, file_small_ds_sid,
+ /* tgt_rank = */ 5, small_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank */ 2, small_sel_start);
+
+ expected_value =
+ (uint32_t)((small_sel_start[0] * small_dims[1] * small_dims[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[1] * small_dims[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[2] * small_dims[3] * small_dims[4]) +
+ (small_sel_start[3] * small_dims[4]) + (small_sel_start[4]));
+
+ large_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ large_sel_start[1] = 5;
+ large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_large_ds_sid,
+ /* tgt_rank = */ 5, large_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank = */ 2, large_sel_start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed (1)");
+
+ ret = H5Dread(small_dataset, H5T_NATIVE_UINT32, mem_large_ds_sid, file_small_ds_sid, xfer_plist,
+ large_ds_buf_1);
+
+ VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* verify that expected data is retrieved */
+
+ data_ok = TRUE;
+
+ start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[3] * large_dims[4]) + (large_sel_start[4]));
+
+ stop_index = start_index + (int)small_ds_slice_size;
+
+ HDassert(0 <= start_index);
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= (int)large_ds_size);
+
+ ptr_1 = large_ds_buf_1;
+
+ for (i = 0; i < start_index; i++) {
+
+ if (*ptr_1 != (uint32_t)0) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(1).");
+
+ data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ /* rank */ 2,
+ /* edge_size */ 10,
+ /* checker_edge_size */ 3, expected_value,
+ /* buf_starts_in_checker */ TRUE);
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(2).");
+
+ data_ok = TRUE;
+
+ ptr_1 += small_ds_slice_size;
+
+ for (i = stop_index; i < (int)large_ds_size; i++) {
+
+ if (*ptr_1 != (uint32_t)0) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from small ds data good(3).");
+
+ /* read a checkerboard selection of a slice of the process slice of
+ * the large on disk data set into the process slice of the small
+ * in memory data set, and verify the data read.
+ */
+
+ small_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ small_sel_start[1] = small_sel_start[2] = small_sel_start[3] = small_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, mem_small_ds_sid,
+ /* tgt_rank = */ 5, small_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank */ 2, small_sel_start);
+
+ large_sel_start[0] = (hsize_t)(mpi_rank + 1);
+ large_sel_start[1] = 5;
+ large_sel_start[2] = large_sel_start[3] = large_sel_start[4] = 0;
+
+ lower_dim_size_comp_test__select_checker_board(mpi_rank, file_large_ds_sid,
+ /* tgt_rank = */ 5, large_dims,
+ /* checker_edge_size = */ 3,
+ /* sel_rank = */ 2, large_sel_start);
+
+ /* verify that H5Sselect_shape_same() reports the two
+ * selections as having the same shape.
+ */
+ check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed (2)");
+
+ ret = H5Dread(large_dataset, H5T_NATIVE_UINT32, mem_small_ds_sid, file_large_ds_sid, xfer_plist,
+ small_ds_buf_1);
+
+ VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
+
+ /* verify that expected data is retrieved */
+
+ data_ok = TRUE;
+
+ expected_value =
+ (uint32_t)((large_sel_start[0] * large_dims[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[1] * large_dims[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[2] * large_dims[3] * large_dims[4]) +
+ (large_sel_start[3] * large_dims[4]) + (large_sel_start[4]));
+
+ start_index = (int)(mpi_rank + 1) * (int)small_ds_slice_size;
+
+ stop_index = start_index + (int)small_ds_slice_size;
+
+ HDassert(0 <= start_index);
+ HDassert(start_index < stop_index);
+ HDassert(stop_index <= (int)small_ds_size);
+
+ ptr_1 = small_ds_buf_1;
+
+ for (i = 0; i < start_index; i++) {
+
+ if (*ptr_1 != (uint32_t)0) {
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(1).");
+
+ data_ok = lower_dim_size_comp_test__verify_data(ptr_1,
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ mpi_rank,
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+ /* rank */ 2,
+ /* edge_size */ 10,
+ /* checker_edge_size */ 3, expected_value,
+ /* buf_starts_in_checker */ TRUE);
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(2).");
+
+ data_ok = TRUE;
+
+ ptr_1 += small_ds_slice_size;
+
+ for (i = stop_index; i < (int)small_ds_size; i++) {
+
+ if (*ptr_1 != (uint32_t)0) {
+
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if (mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK) {
+ HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n", fcnName, mpi_rank, (int)i,
+ (int)(*ptr_1));
+ }
+#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
+
+ data_ok = FALSE;
+ *ptr_1 = (uint32_t)0;
+ }
+
+ ptr_1++;
+ }
+
+ VRFY((data_ok == TRUE), "slice read from large ds data good(3).");
+
+ /* Close dataspaces */
+ ret = H5Sclose(full_mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_small_ds_sid) succeeded");
+
+ ret = H5Sclose(file_small_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_small_ds_sid) succeeded");
+
+ ret = H5Sclose(full_mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(full_file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(full_file_large_ds_sid) succeeded");
+
+ ret = H5Sclose(mem_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(mem_large_ds_sid) succeeded");
+
+ ret = H5Sclose(file_large_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_large_ds_sid) succeeded");
+
+ /* Close Datasets */
+ ret = H5Dclose(small_dataset);
+ VRFY((ret != FAIL), "H5Dclose(small_dataset) succeeded");
+
+ ret = H5Dclose(large_dataset);
+ VRFY((ret != FAIL), "H5Dclose(large_dataset) succeeded");
+
+ /* close the file collectively */
+ MESG("about to close file.");
+ ret = H5Fclose(fid);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ /* Free memory buffers */
+ if (small_ds_buf_0 != NULL)
+ HDfree(small_ds_buf_0);
+ if (small_ds_buf_1 != NULL)
+ HDfree(small_ds_buf_1);
+
+ if (large_ds_buf_0 != NULL)
+ HDfree(large_ds_buf_0);
+ if (large_ds_buf_1 != NULL)
+ HDfree(large_ds_buf_1);
+
+ return;
+
+} /* lower_dim_size_comp_test__run_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: lower_dim_size_comp_test()
+ *
+ * Purpose: Test to see if an error in the computation of the size
+ * of the lower dimensions in H5S_obtain_datatype() has
+ * been corrected.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 11/11/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+lower_dim_size_comp_test(void)
+{
+ /* const char *fcnName = "lower_dim_size_comp_test()"; */
+ int chunk_edge_size = 0;
+ int use_collective_io;
+ int mpi_rank;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
+ for (use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
+ chunk_edge_size = 0;
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
+
+ chunk_edge_size = 5;
+ lower_dim_size_comp_test__run_test(chunk_edge_size, (hbool_t)use_collective_io, H5T_NATIVE_UINT);
+ } /* end for */
+
+ return;
+} /* lower_dim_size_comp_test() */
+
+/*-------------------------------------------------------------------------
+ * Function: link_chunk_collective_io_test()
+ *
+ * Purpose: Test to verify that an error in MPI type management in
+ * H5D_link_chunk_collective_io() has been corrected.
+ * In this bug, we used to free MPI types regardless of
+ * whether they were basic or derived.
+ *
+ * This test is based on a bug report kindly provided by
+ * Rob Latham of the MPICH team and ANL.
+ *
+ * The basic thrust of the test is to cause a process
+ * to participate in a collective I/O in which it:
+ *
+ * 1) Reads or writes exactly one chunk,
+ *
+ * 2) Has no in memory buffer for any other chunk.
+ *
+ * The test differers from Rob Latham's bug report in
+ * that is runs with an arbitrary number of proceeses,
+ * and uses a 1 dimensional dataset.
+ *
+ * Return: void
+ *
+ * Programmer: JRM -- 12/16/09
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
+
+void
+link_chunk_collective_io_test(void)
+{
+ /* const char *fcnName = "link_chunk_collective_io_test()"; */
+ const char *filename;
+ hbool_t mis_match = FALSE;
+ int i;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
+ MPI_Comm mpi_comm = MPI_COMM_WORLD;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hsize_t count[1] = {1};
+ hsize_t stride[1] = {2 * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ hsize_t block[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ hsize_t start[1];
+ hsize_t dims[1];
+ hsize_t chunk_dims[1] = {LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE};
+ herr_t ret; /* Generic return value */
+ hid_t file_id;
+ hid_t acc_tpl;
+ hid_t dset_id;
+ hid_t file_ds_sid;
+ hid_t write_mem_ds_sid;
+ hid_t read_mem_ds_sid;
+ hid_t ds_dcpl_id;
+ hid_t xfer_plist;
+ double diff;
+ double expected_value;
+ double local_data_written[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
+ double local_data_read[LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE];
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Make sure the connector supports the API functions being tested */
+ if (!(vol_cap_flags_g & H5VL_CAP_FLAG_FILE_BASIC) || !(vol_cap_flags_g & H5VL_CAP_FLAG_DATASET_BASIC)) {
+ if (MAINPROCESS) {
+ puts("SKIPPED");
+ printf(" API functions for basic file or dataset aren't supported with this connector\n");
+ fflush(stdout);
+ }
+
+ return;
+ }
+
+ HDassert(mpi_size > 0);
+
+ /* get the file name */
+ filename = (const char *)PARATESTFILE /* GetTestParameters() */;
+ HDassert(filename != NULL);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(mpi_comm, mpi_info, facc_type);
+ VRFY((acc_tpl >= 0), "create_faccess_plist() succeeded");
+
+ /* create the file collectively */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ MESG("File opened.");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose(acc_tpl) succeeded");
+
+ /* setup dims */
+ dims[0] = ((hsize_t)mpi_size) * ((hsize_t)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE));
+
+ /* setup mem and file dataspaces */
+ write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((write_mem_ds_sid != 0), "H5Screate_simple() write_mem_ds_sid succeeded");
+
+ read_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((read_mem_ds_sid != 0), "H5Screate_simple() read_mem_ds_sid succeeded");
+
+ file_ds_sid = H5Screate_simple(1, dims, NULL);
+ VRFY((file_ds_sid != 0), "H5Screate_simple() file_ds_sid succeeded");
+
+ /* setup data set creation property list */
+ ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((ds_dcpl_id != FAIL), "H5Pcreate() ds_dcpl_id succeeded");
+
+ ret = H5Pset_layout(ds_dcpl_id, H5D_CHUNKED);
+ VRFY((ret != FAIL), "H5Pset_layout() ds_dcpl_id succeeded");
+
+ ret = H5Pset_chunk(ds_dcpl_id, 1, chunk_dims);
+ VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
+
+ /* create the data set */
+ dset_id =
+ H5Dcreate2(file_id, "dataset", H5T_NATIVE_DOUBLE, file_ds_sid, H5P_DEFAULT, ds_dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2() dataset succeeded");
+
+ /* close the dataset creation property list */
+ ret = H5Pclose(ds_dcpl_id);
+ VRFY((ret >= 0), "H5Pclose(ds_dcpl_id) succeeded");
+
+ /* setup local data */
+ expected_value = (double)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE) * (double)(mpi_rank);
+ for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
+
+ local_data_written[i] = expected_value;
+ local_data_read[i] = 0.0;
+ expected_value += 1.0;
+ }
+
+ /* select the file and mem spaces */
+ start[0] = (hsize_t)(mpi_rank * LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE);
+ ret = H5Sselect_hyperslab(file_ds_sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab(file_ds_sid, set) succeeded");
+
+ ret = H5Sselect_all(write_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sselect_all(mem_ds_sid) succeeded");
+
+ /* Note that we use NO SELECTION on the read memory dataspace */
+
+ /* setup xfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write the data set */
+ ret = H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, write_mem_ds_sid, file_ds_sid, xfer_plist, local_data_written);
+
+ VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded");
+
+ /* sync with the other processes before checking data */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after dataset write");
+
+ /* read this processes slice of the dataset back in */
+ ret = H5Dread(dset_id, H5T_NATIVE_DOUBLE, read_mem_ds_sid, file_ds_sid, xfer_plist, local_data_read);
+ VRFY((ret >= 0), "H5Dread() dataset read succeeded");
+
+ /* close the xfer property list */
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose(xfer_plist) succeeded");
+
+ /* verify the data */
+ mis_match = FALSE;
+ for (i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++) {
+
+ diff = local_data_written[i] - local_data_read[i];
+ diff = fabs(diff);
+
+ if (diff >= 0.001) {
+
+ mis_match = TRUE;
+ }
+ }
+ VRFY((mis_match == FALSE), "dataset data good.");
+
+ /* Close dataspaces */
+ ret = H5Sclose(write_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(write_mem_ds_sid) succeeded");
+
+ ret = H5Sclose(read_mem_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(read_mem_ds_sid) succeeded");
+
+ ret = H5Sclose(file_ds_sid);
+ VRFY((ret != FAIL), "H5Sclose(file_ds_sid) succeeded");
+
+ /* Close Dataset */
+ ret = H5Dclose(dset_id);
+ VRFY((ret != FAIL), "H5Dclose(dset_id) succeeded");
+
+ /* close the file collectively */
+ ret = H5Fclose(file_id);
+ VRFY((ret != FAIL), "file close succeeded");
+
+ return;
+
+} /* link_chunk_collective_io_test() */
diff --git a/testpar/API/testphdf5.c b/testpar/API/testphdf5.c
new file mode 100644
index 0000000..ec5dae2
--- /dev/null
+++ b/testpar/API/testphdf5.c
@@ -0,0 +1,1007 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Main driver of the Parallel HDF5 tests
+ */
+
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
+
+/* global variables */
+int dim0;
+int dim1;
+int chunkdim0;
+int chunkdim1;
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
+int ngroups = 512; /* number of groups to create in root
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+
+H5E_auto2_t old_func; /* previous error handler */
+void *old_client_data; /* previous error handler arg.*/
+
+/* other option flags */
+
+/* FILENAME and filenames must have the same number of names.
+ * Use PARATESTFILE in general and use a separated filename only if the file
+ * created in one test is accessed by a different test.
+ * filenames[0] is reserved as the file name for PARATESTFILE.
+ */
+#define NFILENAME 2
+/* #define PARATESTFILE filenames[0] */
+const char *FILENAME[NFILENAME] = {"ParaTest.h5", NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
+
+#ifdef USE_PAUSE
+/* pause the process for a moment to allow debugger to attach if desired. */
+/* Will pause more if greenlight file is not persent but will eventually */
+/* continue. */
+#include <sys/types.h>
+#include <sys/stat.h>
+
+void
+pause_proc(void)
+{
+
+ int pid;
+ h5_stat_t statbuf;
+ char greenlight[] = "go";
+ int maxloop = 10;
+ int loops = 0;
+ int time_int = 10;
+
+ /* mpi variables */
+ int mpi_size, mpi_rank;
+ int mpi_namelen;
+ char mpi_name[MPI_MAX_PROCESSOR_NAME];
+
+ pid = getpid();
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Get_processor_name(mpi_name, &mpi_namelen);
+
+ if (MAINPROCESS)
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop) {
+ if (!loops++) {
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n", mpi_rank, mpi_namelen, mpi_name, pid,
+ pid);
+ }
+ HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ HDfflush(stdout);
+ HDsleep(time_int);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+/* Use the Profile feature of MPI to call the pause_proc() */
+int
+MPI_Init(int *argc, char ***argv)
+{
+ int ret_code;
+ ret_code = PMPI_Init(argc, argv);
+ pause_proc();
+ return (ret_code);
+}
+#endif /* USE_PAUSE */
+
+/*
+ * Show command usage
+ */
+static void
+usage(void)
+{
+ HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ HDprintf("\t-m<n_datasets>"
+ "\tset number of datasets for the multiple dataset test\n");
+ HDprintf("\t-n<n_groups>"
+ "\tset number of groups for the multiple group test\n");
+#if 0
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+#endif
+ HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n", ROW_FACTOR,
+ COL_FACTOR);
+ HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
+ HDprintf("\n");
+}
+
+/*
+ * parse the command line options
+ */
+static int
+parse_options(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* setup default chunk-size. Make sure sizes are > 0 */
+
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+
+ while (--argc) {
+ if (**(++argv) != '-') {
+ break;
+ }
+ else {
+ switch (*(*argv + 1)) {
+ case 'm':
+ ndatasets = atoi((*argv + 1) + 1);
+ if (ndatasets < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+ case 'n':
+ ngroups = atoi((*argv + 1) + 1);
+ if (ngroups < 0) {
+ nerrors++;
+ return (1);
+ }
+ break;
+#if 0
+ case 'f': if (--argc < 1) {
+ nerrors++;
+ return(1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return(1);
+ }
+ paraprefix = *argv;
+ break;
+#endif
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ dim0 = atoi(*(++argv)) * mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv)) * mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0 + 9) / 10;
+ chunkdim1 = (dim1 + 9) / 10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2) {
+ nerrors++;
+ return (1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return (1);
+ default:
+ HDprintf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return (1);
+ }
+ }
+ } /*while*/
+
+ /* check validity of dimension and chunk sizes */
+ if (dim0 <= 0 || dim1 <= 0) {
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return (1);
+ }
+ if (chunkdim0 <= 0 || chunkdim1 <= 0) {
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return (1);
+ }
+
+ /* Make sure datasets can be divided into equal portions by the processes */
+ if ((dim0 % mpi_size) || (dim1 % mpi_size)) {
+ if (MAINPROCESS)
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", dim0, dim1, mpi_size);
+ nerrors++;
+ return (1);
+ }
+
+ /* compose the test filenames */
+ {
+ int i, n;
+
+ n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i = 0; i < n; i++)
+ strncpy(filenames[i], FILENAME[i], PATH_MAX);
+#if 0 /* no support for VFDs right now */
+ if (h5_fixname(FILENAME[i], fapl, filenames[i], PATH_MAX) == NULL) {
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return (1);
+ }
+#endif
+ if (MAINPROCESS) {
+ HDprintf("Test filenames are:\n");
+ for (i = 0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Create the appropriate File access property list
+ */
+hid_t
+create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
+{
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
+
+ /* need the rank for error checking macros */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
+
+ if (l_facc_type == FACC_DEFAULT)
+ return (ret_pl);
+
+ if (l_facc_type == FACC_MPIO) {
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ return (ret_pl);
+ }
+
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)) {
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return (ret_pl);
+ }
+
+ /* unknown file access types */
+ return (ret_pl);
+}
+
+int
+main(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+ herr_t ret;
+
+#if 0
+ H5Ptest_param_t ndsets_params, ngroups_params;
+ H5Ptest_param_t collngroups_params;
+ H5Ptest_param_t io_mode_confusion_params;
+ H5Ptest_param_t rr_obj_flush_confusion_params;
+#endif
+
+#ifndef H5_HAVE_WIN32_API
+ /* Un-buffer the stdout and stderr */
+ HDsetbuf(stderr, NULL);
+ HDsetbuf(stdout, NULL);
+#endif
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ dim0 = ROW_FACTOR * mpi_size;
+ dim1 = COL_FACTOR * mpi_size;
+
+ if (MAINPROCESS) {
+ HDprintf("===================================\n");
+ HDprintf("PHDF5 TESTS START\n");
+ HDprintf("===================================\n");
+ }
+
+ /* Attempt to turn off atexit post processing so that in case errors
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0) {
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
+ };
+ H5open();
+ /* h5_show_hostname(); */
+
+#if 0
+ HDmemset(filenames, 0, sizeof(filenames));
+ for (int i = 0; i < NFILENAME; i++) {
+ if (NULL == (filenames[i] = HDmalloc(PATH_MAX))) {
+ HDprintf("couldn't allocate filename array\n");
+ MPI_Abort(MPI_COMM_WORLD, -1);
+ }
+ }
+#endif
+
+ /* Set up file access property list with parallel I/O access */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "H5Pcreate succeeded");
+
+ vol_cap_flags_g = H5VL_CAP_FLAG_NONE;
+
+ /* Get the capability flag of the VOL connector being used */
+ ret = H5Pget_vol_cap_flags(fapl, &vol_cap_flags_g);
+ VRFY((ret >= 0), "H5Pget_vol_cap_flags succeeded");
+
+ /* Initialize testing framework */
+ /* TestInit(argv[0], usage, parse_options); */
+
+ if (parse_options(argc, argv)) {
+ usage();
+ return 1;
+ }
+
+ /* Tests are generally arranged from least to most complexity... */
+#if 0
+ AddTest("mpiodup", test_fapl_mpio_dup, NULL,
+ "fapl_mpio duplicate", NULL);
+#endif
+
+ if (MAINPROCESS) {
+ printf("fapl_mpio duplicate\n");
+ fflush(stdout);
+ }
+ test_fapl_mpio_dup();
+
+#if 0
+ AddTest("split", test_split_comm_access, NULL,
+ "dataset using split communicators", PARATESTFILE);
+ AddTest("props", test_file_properties, NULL,
+ "Coll Metadata file property settings", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset using split communicators\n");
+ fflush(stdout);
+ }
+ test_split_comm_access();
+
+ if (MAINPROCESS) {
+ printf("Coll Metadata file property settings\n");
+ fflush(stdout);
+ }
+ test_file_properties();
+
+#if 0
+ AddTest("idsetw", dataset_writeInd, NULL,
+ "dataset independent write", PARATESTFILE);
+ AddTest("idsetr", dataset_readInd, NULL,
+ "dataset independent read", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset independent write\n");
+ fflush(stdout);
+ }
+ dataset_writeInd();
+ if (MAINPROCESS) {
+ printf("dataset independent read\n");
+ fflush(stdout);
+ }
+ dataset_readInd();
+
+#if 0
+ AddTest("cdsetw", dataset_writeAll, NULL,
+ "dataset collective write", PARATESTFILE);
+ AddTest("cdsetr", dataset_readAll, NULL,
+ "dataset collective read", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset collective write\n");
+ fflush(stdout);
+ }
+ dataset_writeAll();
+ if (MAINPROCESS) {
+ printf("dataset collective read\n");
+ fflush(stdout);
+ }
+ dataset_readAll();
+
+#if 0
+ AddTest("eidsetw", extend_writeInd, NULL,
+ "extendible dataset independent write", PARATESTFILE);
+ AddTest("eidsetr", extend_readInd, NULL,
+ "extendible dataset independent read", PARATESTFILE);
+ AddTest("ecdsetw", extend_writeAll, NULL,
+ "extendible dataset collective write", PARATESTFILE);
+ AddTest("ecdsetr", extend_readAll, NULL,
+ "extendible dataset collective read", PARATESTFILE);
+ AddTest("eidsetw2", extend_writeInd2, NULL,
+ "extendible dataset independent write #2", PARATESTFILE);
+ AddTest("selnone", none_selection_chunk, NULL,
+ "chunked dataset with none-selection", PARATESTFILE);
+ AddTest("calloc", test_chunk_alloc, NULL,
+ "parallel extend Chunked allocation on serial file", PARATESTFILE);
+ AddTest("fltread", test_filter_read, NULL,
+ "parallel read of dataset written serially with filters", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("extendible dataset independent write\n");
+ fflush(stdout);
+ }
+ extend_writeInd();
+ if (MAINPROCESS) {
+ printf("extendible dataset independent read\n");
+ fflush(stdout);
+ }
+ extend_readInd();
+ if (MAINPROCESS) {
+ printf("extendible dataset collective write\n");
+ fflush(stdout);
+ }
+ extend_writeAll();
+ if (MAINPROCESS) {
+ printf("extendible dataset collective read\n");
+ fflush(stdout);
+ }
+ extend_readAll();
+ if (MAINPROCESS) {
+ printf("extendible dataset independent write #2\n");
+ fflush(stdout);
+ }
+ extend_writeInd2();
+ if (MAINPROCESS) {
+ printf("chunked dataset with none-selection\n");
+ fflush(stdout);
+ }
+ none_selection_chunk();
+ if (MAINPROCESS) {
+ printf("parallel extend Chunked allocation on serial file\n");
+ fflush(stdout);
+ }
+ test_chunk_alloc();
+ if (MAINPROCESS) {
+ printf("parallel read of dataset written serially with filters\n");
+ fflush(stdout);
+ }
+ test_filter_read();
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+#if 0
+ AddTest("cmpdsetr", compress_readAll, NULL,
+ "compressed dataset collective read", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("compressed dataset collective read\n");
+ fflush(stdout);
+ }
+ compress_readAll();
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+#if 0
+ AddTest("zerodsetr", zero_dim_dset, NULL,
+ "zero dim dset", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("zero dim dset\n");
+ fflush(stdout);
+ }
+ zero_dim_dset();
+
+#if 0
+ ndsets_params.name = PARATESTFILE;
+ ndsets_params.count = ndatasets;
+ AddTest("ndsetw", multiple_dset_write, NULL,
+ "multiple datasets write", &ndsets_params);
+#endif
+
+ if (MAINPROCESS) {
+ printf("multiple datasets write\n");
+ fflush(stdout);
+ }
+ multiple_dset_write();
+
+#if 0
+ ngroups_params.name = PARATESTFILE;
+ ngroups_params.count = ngroups;
+ AddTest("ngrpw", multiple_group_write, NULL,
+ "multiple groups write", &ngroups_params);
+ AddTest("ngrpr", multiple_group_read, NULL,
+ "multiple groups read", &ngroups_params);
+#endif
+
+ if (MAINPROCESS) {
+ printf("multiple groups write\n");
+ fflush(stdout);
+ }
+ multiple_group_write();
+ if (MAINPROCESS) {
+ printf("multiple groups read\n");
+ fflush(stdout);
+ }
+ multiple_group_read();
+
+#if 0
+ AddTest("compact", compact_dataset, NULL,
+ "compact dataset test", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("compact dataset test\n");
+ fflush(stdout);
+ }
+ compact_dataset();
+
+#if 0
+ collngroups_params.name = PARATESTFILE;
+ collngroups_params.count = ngroups;
+ /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
+ AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL,
+ "collective grp/dset write - independent grp/dset read",
+ &collngroups_params);
+#ifndef H5_HAVE_WIN32_API
+ AddTest("bigdset", big_dataset, NULL,
+ "big dataset test", PARATESTFILE);
+#else
+ HDprintf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n");
+#endif
+#endif
+
+ if (MAINPROCESS) {
+ printf("collective grp/dset write - independent grp/dset read\n");
+ fflush(stdout);
+ }
+ collective_group_write_independent_group_read();
+ if (MAINPROCESS) {
+ printf("big dataset test\n");
+ fflush(stdout);
+ }
+ big_dataset();
+
+#if 0
+ AddTest("fill", dataset_fillvalue, NULL,
+ "dataset fill value", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset fill value\n");
+ fflush(stdout);
+ }
+ dataset_fillvalue();
+
+#if 0
+ AddTest("cchunk1",
+ coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE);
+ AddTest("cchunk2",
+ coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE);
+ AddTest("cchunk3",
+ coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE);
+ AddTest("cchunk4",
+ coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("simple collective chunk io\n");
+ fflush(stdout);
+ }
+ coll_chunk1();
+ if (MAINPROCESS) {
+ printf("noncontiguous collective chunk io\n");
+ fflush(stdout);
+ }
+ coll_chunk2();
+ if (MAINPROCESS) {
+ printf("multi-chunk collective chunk io\n");
+ fflush(stdout);
+ }
+ coll_chunk3();
+ if (MAINPROCESS) {
+ printf("collective chunk io with partial non-selection\n");
+ fflush(stdout);
+ }
+ coll_chunk4();
+
+ if ((mpi_size < 3) && MAINPROCESS) {
+ HDprintf("Collective chunk IO optimization APIs ");
+ HDprintf("needs at least 3 processes to participate\n");
+ HDprintf("Collective chunk IO API tests will be skipped \n");
+ }
+
+#if 0
+ AddTest((mpi_size <3)? "-cchunk5":"cchunk5" ,
+ coll_chunk5,NULL,
+ "linked chunk collective IO without optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6",
+ coll_chunk6,NULL,
+ "multi-chunk collective IO with direct request",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7",
+ coll_chunk7,NULL,
+ "linked chunk collective IO with optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8",
+ coll_chunk8,NULL,
+ "linked chunk collective IO transferring to multi-chunk",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9",
+ coll_chunk9,NULL,
+ "multiple chunk collective IO with optimization",PARATESTFILE);
+ AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10",
+ coll_chunk10,NULL,
+ "multiple chunk collective IO transferring to independent IO",PARATESTFILE);
+#endif
+
+ if (mpi_size >= 3) {
+ if (MAINPROCESS) {
+ printf("linked chunk collective IO without optimization\n");
+ fflush(stdout);
+ }
+ coll_chunk5();
+ if (MAINPROCESS) {
+ printf("multi-chunk collective IO with direct request\n");
+ fflush(stdout);
+ }
+ coll_chunk6();
+ if (MAINPROCESS) {
+ printf("linked chunk collective IO with optimization\n");
+ fflush(stdout);
+ }
+ coll_chunk7();
+ if (MAINPROCESS) {
+ printf("linked chunk collective IO transferring to multi-chunk\n");
+ fflush(stdout);
+ }
+ coll_chunk8();
+ if (MAINPROCESS) {
+ printf("multiple chunk collective IO with optimization\n");
+ fflush(stdout);
+ }
+ coll_chunk9();
+ if (MAINPROCESS) {
+ printf("multiple chunk collective IO transferring to independent IO\n");
+ fflush(stdout);
+ }
+ coll_chunk10();
+ }
+
+#if 0
+ /* irregular collective IO tests*/
+ AddTest("ccontw",
+ coll_irregular_cont_write,NULL,
+ "collective irregular contiguous write",PARATESTFILE);
+ AddTest("ccontr",
+ coll_irregular_cont_read,NULL,
+ "collective irregular contiguous read",PARATESTFILE);
+ AddTest("cschunkw",
+ coll_irregular_simple_chunk_write,NULL,
+ "collective irregular simple chunk write",PARATESTFILE);
+ AddTest("cschunkr",
+ coll_irregular_simple_chunk_read,NULL,
+ "collective irregular simple chunk read",PARATESTFILE);
+ AddTest("ccchunkw",
+ coll_irregular_complex_chunk_write,NULL,
+ "collective irregular complex chunk write",PARATESTFILE);
+ AddTest("ccchunkr",
+ coll_irregular_complex_chunk_read,NULL,
+ "collective irregular complex chunk read",PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("collective irregular contiguous write\n");
+ fflush(stdout);
+ }
+ coll_irregular_cont_write();
+ if (MAINPROCESS) {
+ printf("collective irregular contiguous read\n");
+ fflush(stdout);
+ }
+ coll_irregular_cont_read();
+ if (MAINPROCESS) {
+ printf("collective irregular simple chunk write\n");
+ fflush(stdout);
+ }
+ coll_irregular_simple_chunk_write();
+ if (MAINPROCESS) {
+ printf("collective irregular simple chunk read\n");
+ fflush(stdout);
+ }
+ coll_irregular_simple_chunk_read();
+ if (MAINPROCESS) {
+ printf("collective irregular complex chunk write\n");
+ fflush(stdout);
+ }
+ coll_irregular_complex_chunk_write();
+ if (MAINPROCESS) {
+ printf("collective irregular complex chunk read\n");
+ fflush(stdout);
+ }
+ coll_irregular_complex_chunk_read();
+
+#if 0
+ AddTest("null", null_dataset, NULL,
+ "null dataset test", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("null dataset test\n");
+ fflush(stdout);
+ }
+ null_dataset();
+
+#if 0
+ io_mode_confusion_params.name = PARATESTFILE;
+ io_mode_confusion_params.count = 0; /* value not used */
+
+ AddTest("I/Omodeconf", io_mode_confusion, NULL,
+ "I/O mode confusion test",
+ &io_mode_confusion_params);
+#endif
+
+ if (MAINPROCESS) {
+ printf("I/O mode confusion test\n");
+ fflush(stdout);
+ }
+ io_mode_confusion();
+
+ if ((mpi_size < 3) && MAINPROCESS) {
+ HDprintf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n");
+ HDprintf("rr_obj_hdr_flush_confusion test will be skipped \n");
+ }
+
+ if (mpi_size > 2) {
+#if 0
+ rr_obj_flush_confusion_params.name = PARATESTFILE;
+ rr_obj_flush_confusion_params.count = 0; /* value not used */
+ AddTest("rrobjflushconf", rr_obj_hdr_flush_confusion, NULL,
+ "round robin object header flush confusion test",
+ &rr_obj_flush_confusion_params);
+#endif
+
+ if (MAINPROCESS) {
+ printf("round robin object header flush confusion test\n");
+ fflush(stdout);
+ }
+ rr_obj_hdr_flush_confusion();
+ }
+
+#if 0
+ AddTest("alnbg1",
+ chunk_align_bug_1, NULL,
+ "Chunk allocation with alignment bug.",
+ PARATESTFILE);
+
+ AddTest("tldsc",
+ lower_dim_size_comp_test, NULL,
+ "test lower dim size comp in span tree to mpi derived type",
+ PARATESTFILE);
+
+ AddTest("lccio",
+ link_chunk_collective_io_test, NULL,
+ "test mpi derived type management",
+ PARATESTFILE);
+
+ AddTest("actualio", actual_io_mode_tests, NULL,
+ "test actual io mode proprerty",
+ PARATESTFILE);
+
+ AddTest("nocolcause", no_collective_cause_tests, NULL,
+ "test cause for broken collective io",
+ PARATESTFILE);
+
+ AddTest("edpl", test_plist_ed, NULL,
+ "encode/decode Property Lists", NULL);
+#endif
+
+ if (MAINPROCESS) {
+ printf("Chunk allocation with alignment bug\n");
+ fflush(stdout);
+ }
+ chunk_align_bug_1();
+ if (MAINPROCESS) {
+ printf("test lower dim size comp in span tree to mpi derived type\n");
+ fflush(stdout);
+ }
+ lower_dim_size_comp_test();
+ if (MAINPROCESS) {
+ printf("test mpi derived type management\n");
+ fflush(stdout);
+ }
+ link_chunk_collective_io_test();
+ if (MAINPROCESS) {
+ printf("test actual io mode property - SKIPPED currently due to native-specific testing\n");
+ fflush(stdout);
+ }
+ /* actual_io_mode_tests(); */
+ if (MAINPROCESS) {
+ printf("test cause for broken collective io - SKIPPED currently due to native-specific testing\n");
+ fflush(stdout);
+ }
+ /* no_collective_cause_tests(); */
+ if (MAINPROCESS) {
+ printf("encode/decode Property Lists\n");
+ fflush(stdout);
+ }
+ test_plist_ed();
+
+ if ((mpi_size < 2) && MAINPROCESS) {
+ HDprintf("File Image Ops daisy chain test needs at least 2 processes.\n");
+ HDprintf("File Image Ops daisy chain test will be skipped \n");
+ }
+
+#if 0
+ AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL,
+ "file image ops daisy chain", NULL);
+#endif
+
+ if (mpi_size >= 2) {
+ if (MAINPROCESS) {
+ printf("file image ops daisy chain - SKIPPED currently due to native-specific testing\n");
+ fflush(stdout);
+ }
+ /* file_image_daisy_chain_test(); */
+ }
+
+ if ((mpi_size < 2) && MAINPROCESS) {
+ HDprintf("Atomicity tests need at least 2 processes to participate\n");
+ HDprintf("8 is more recommended.. Atomicity tests will be skipped \n");
+ }
+ else if (facc_type != FACC_MPIO && MAINPROCESS) {
+ HDprintf("Atomicity tests will not work with a non MPIO VFD\n");
+ }
+ else if (mpi_size >= 2 && facc_type == FACC_MPIO) {
+#if 0
+ AddTest("atomicity", dataset_atomicity, NULL,
+ "dataset atomic updates", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("dataset atomic updates - SKIPPED currently due to native-specific testing\n");
+ fflush(stdout);
+ }
+ /* dataset_atomicity(); */
+ }
+
+#if 0
+ AddTest("denseattr", test_dense_attr, NULL,
+ "Store Dense Attributes", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("Store Dense Attributes\n");
+ fflush(stdout);
+ }
+ test_dense_attr();
+
+#if 0
+ AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL,
+ "Collective Metadata read with some ranks having no selection", PARATESTFILE);
+ AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL,
+ "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE);
+ AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL,
+ "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE);
+#endif
+
+ if (MAINPROCESS) {
+ printf("Collective Metadata read with some ranks having no selection\n");
+ fflush(stdout);
+ }
+ test_partial_no_selection_coll_md_read();
+ if (MAINPROCESS) {
+ printf("Collective MD read with multi chunk I/O\n");
+ fflush(stdout);
+ }
+ test_multi_chunk_io_addrmap_issue();
+ if (MAINPROCESS) {
+ printf("Collective MD read with link chunk I/O\n");
+ fflush(stdout);
+ }
+ test_link_chunk_io_sort_chunk_issue();
+
+ /* Display testing information */
+ /* TestInfo(argv[0]); */
+
+ /* setup file access property list */
+ H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* Parse command line arguments */
+ /* TestParseCmdLine(argc, argv); */
+
+ if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS) {
+ HDprintf("===================================\n"
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
+ }
+
+ /* Perform requested testing */
+ /* PerformTests(); */
+
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Display test summary, if requested */
+ /* if (MAINPROCESS && GetTestSummary())
+ TestSummary(); */
+
+ /* Clean up test files */
+ /* h5_clean_files(FILENAME, fapl); */
+ H5Fdelete(FILENAME[0], fapl);
+ H5Pclose(fapl);
+
+ /* nerrors += GetTestNumErrs(); */
+
+ /* Gather errors from all processes */
+ {
+ int temp;
+ MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
+ nerrors = temp;
+ }
+
+ if (MAINPROCESS) { /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrors)
+ HDprintf("***PHDF5 tests detected %d errors***\n", nerrors);
+ else
+ HDprintf("PHDF5 tests finished successfully\n");
+ HDprintf("===================================\n");
+ }
+
+#if 0
+ for (int i = 0; i < NFILENAME; i++) {
+ HDfree(filenames[i]);
+ filenames[i] = NULL;
+ }
+#endif
+
+ /* close HDF5 library */
+ H5close();
+
+ /* Release test infrastructure */
+ /* TestShutdown(); */
+
+ /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
+ MPI_Finalize();
+
+ /* cannot just return (nerrors) because exit code is limited to 1byte */
+ return (nerrors != 0);
+}
diff --git a/testpar/API/testphdf5.h b/testpar/API/testphdf5.h
new file mode 100644
index 0000000..27d53e2
--- /dev/null
+++ b/testpar/API/testphdf5.h
@@ -0,0 +1,343 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://www.hdfgroup.org/licenses. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* common definitions used by all parallel hdf5 test programs. */
+
+#ifndef PHDF5TEST_H
+#define PHDF5TEST_H
+
+#include "H5private.h"
+#include "testpar.h"
+#include "H5_api_tests_disabled.h"
+
+/*
+ * Define parameters for various tests since we do not have access to
+ * passing parameters to tests via the testphdf5 test framework.
+ */
+#define PARATESTFILE "ParaTest.h5"
+#define NDATASETS 300
+#define NGROUPS 256
+
+/* Disable express testing by default */
+#define EXPRESS_MODE 0
+
+enum H5TEST_COLL_CHUNK_API {
+ API_NONE = 0,
+ API_LINK_HARD,
+ API_MULTI_HARD,
+ API_LINK_TRUE,
+ API_LINK_FALSE,
+ API_MULTI_COLL,
+ API_MULTI_IND
+};
+
+#ifndef FALSE
+#define FALSE 0
+#endif
+
+#ifndef TRUE
+#define TRUE 1
+#endif
+
+/* Constants definitions */
+#define DIM0 600 /* Default dataset sizes. */
+#define DIM1 1200 /* Values are from a monitor pixel sizes */
+#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
+#define COL_FACTOR 16 /* Nominal column factor for dataset size */
+#define RANK 2
+#define DATASETNAME1 "Data1"
+#define DATASETNAME2 "Data2"
+#define DATASETNAME3 "Data3"
+#define DATASETNAME4 "Data4"
+#define DATASETNAME5 "Data5"
+#define DATASETNAME6 "Data6"
+#define DATASETNAME7 "Data7"
+#define DATASETNAME8 "Data8"
+#define DATASETNAME9 "Data9"
+
+/* point selection order */
+#define IN_ORDER 1
+#define OUT_OF_ORDER 2
+
+/* Hyperslab layout styles */
+#define BYROW 1 /* divide into slabs of rows */
+#define BYCOL 2 /* divide into blocks of columns */
+#define ZROW 3 /* same as BYCOL except process 0 gets 0 rows */
+#define ZCOL 4 /* same as BYCOL except process 0 gets 0 columns */
+
+/* File_Access_type bits */
+#define FACC_DEFAULT 0x0 /* default */
+#define FACC_MPIO 0x1 /* MPIO */
+#define FACC_SPLIT 0x2 /* Split File */
+
+#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
+#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
+/*Constants for collective chunk definitions */
+#define SPACE_DIM1 24
+#define SPACE_DIM2 4
+#define BYROW_CONT 1
+#define BYROW_DISCONT 2
+#define BYROW_SELECTNONE 3
+#define BYROW_SELECTUNBALANCE 4
+#define BYROW_SELECTINCHUNK 5
+
+#define DIMO_NUM_CHUNK 4
+#define DIM1_NUM_CHUNK 2
+#define LINK_TRUE_NUM_CHUNK 2
+#define LINK_FALSE_NUM_CHUNK 6
+#define MULTI_TRUE_PERCENT 50
+#define LINK_TRUE_CHUNK_NAME "h5_link_chunk_true"
+#define LINK_FALSE_CHUNK_NAME "h5_link_chunk_false"
+#define LINK_HARD_CHUNK_NAME "h5_link_chunk_hard"
+#define MULTI_HARD_CHUNK_NAME "h5_multi_chunk_hard"
+#define MULTI_COLL_CHUNK_NAME "h5_multi_chunk_coll"
+#define MULTI_INDP_CHUNK_NAME "h5_multi_chunk_indp"
+
+#define DSET_COLLECTIVE_CHUNK_NAME "coll_chunk_name"
+
+/*Constants for MPI derived data type generated from span tree */
+
+#define MSPACE1_RANK 1 /* Rank of the first dataset in memory */
+#define MSPACE1_DIM 27000 /* Dataset size in memory */
+#define FSPACE_RANK 2 /* Dataset rank as it is stored in the file */
+#define FSPACE_DIM1 9 /* Dimension sizes of the dataset as it is stored in the file */
+#define FSPACE_DIM2 3600
+/* We will read dataset back from the file to the dataset in memory with these dataspace parameters. */
+#define MSPACE_RANK 2
+#define MSPACE_DIM1 9
+#define MSPACE_DIM2 3600
+#define FHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
+#define FHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define FHSTRIDE0 4 /* Stride of the first dimension of the first hyperslab selection*/
+#define FHSTRIDE1 3 /* Stride of the second dimension of the first hyperslab selection*/
+#define FHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
+#define FHBLOCK1 2 /* Block of the second dimension of the first hyperslab selection*/
+#define FHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
+#define FHSTART1 1 /* start of the second dimension of the first hyperslab selection*/
+
+#define SHCOUNT0 1 /* Count of the first dimension of the first hyperslab selection*/
+#define SHCOUNT1 1 /* Count of the second dimension of the first hyperslab selection*/
+#define SHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define SHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define SHBLOCK0 3 /* Block of the first dimension of the first hyperslab selection*/
+#define SHBLOCK1 768 /* Block of the second dimension of the first hyperslab selection*/
+#define SHSTART0 4 /* start of the first dimension of the first hyperslab selection*/
+#define SHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
+
+#define MHCOUNT0 6912 /* Count of the first dimension of the first hyperslab selection*/
+#define MHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define MHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define MHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+
+#define RFFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RFFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define RFFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RFFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RFFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RFFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RFFHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+#define RFFHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
+
+#define RFSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RFSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
+#define RFSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RFSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RFSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RFSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RFSHSTART0 2 /* start of the first dimension of the first hyperslab selection*/
+#define RFSHSTART1 4 /* start of the second dimension of the first hyperslab selection*/
+
+#define RMFHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RMFHCOUNT1 768 /* Count of the second dimension of the first hyperslab selection*/
+#define RMFHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RMFHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RMFHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RMFHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RMFHSTART0 0 /* start of the first dimension of the first hyperslab selection*/
+#define RMFHSTART1 0 /* start of the second dimension of the first hyperslab selection*/
+
+#define RMSHCOUNT0 3 /* Count of the first dimension of the first hyperslab selection*/
+#define RMSHCOUNT1 1536 /* Count of the second dimension of the first hyperslab selection*/
+#define RMSHSTRIDE0 1 /* Stride of the first dimension of the first hyperslab selection*/
+#define RMSHSTRIDE1 1 /* Stride of the second dimension of the first hyperslab selection*/
+#define RMSHBLOCK0 1 /* Block of the first dimension of the first hyperslab selection*/
+#define RMSHBLOCK1 1 /* Block of the second dimension of the first hyperslab selection*/
+#define RMSHSTART0 1 /* start of the first dimension of the first hyperslab selection*/
+#define RMSHSTART1 2 /* start of the second dimension of the first hyperslab selection*/
+
+#define NPOINTS \
+ 4 /* Number of points that will be selected \
+ and overwritten */
+
+/* Definitions of the selection mode for the test_actual_io_function. */
+#define TEST_ACTUAL_IO_NO_COLLECTIVE 0
+#define TEST_ACTUAL_IO_RESET 1
+#define TEST_ACTUAL_IO_MULTI_CHUNK_IND 2
+#define TEST_ACTUAL_IO_MULTI_CHUNK_COL 3
+#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX 4
+#define TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE 5
+#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND 6
+#define TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL 7
+#define TEST_ACTUAL_IO_LINK_CHUNK 8
+#define TEST_ACTUAL_IO_CONTIGUOUS 9
+
+/* Definitions of the selection mode for the no_collective_cause_tests function. */
+#define TEST_COLLECTIVE 0x001
+#define TEST_SET_INDEPENDENT 0x002
+#define TEST_DATATYPE_CONVERSION 0x004
+#define TEST_DATA_TRANSFORMS 0x008
+#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010
+#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020
+#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040
+
+/* Don't erase these lines, they are put here for debugging purposes */
+/*
+#define MSPACE1_RANK 1
+#define MSPACE1_DIM 50
+#define MSPACE2_RANK 1
+#define MSPACE2_DIM 4
+#define FSPACE_RANK 2
+#define FSPACE_DIM1 8
+#define FSPACE_DIM2 12
+#define MSPACE_RANK 2
+#define MSPACE_DIM1 8
+#define MSPACE_DIM2 9
+#define NPOINTS 4
+*/ /* end of debugging macro */
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+/* Collective chunk instrumentation properties */
+#define H5D_XFER_COLL_CHUNK_LINK_HARD_NAME "coll_chunk_link_hard"
+#define H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME "coll_chunk_multi_hard"
+#define H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME "coll_chunk_link_true"
+#define H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME "coll_chunk_link_false"
+#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME "coll_chunk_multi_coll"
+#define H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME "coll_chunk_multi_ind"
+
+/* Definitions for all collective chunk instrumentation properties */
+#define H5D_XFER_COLL_CHUNK_SIZE sizeof(unsigned)
+#define H5D_XFER_COLL_CHUNK_DEF 1
+
+/* General collective I/O instrumentation properties */
+#define H5D_XFER_COLL_RANK0_BCAST_NAME "coll_rank0_bcast"
+
+/* Definitions for general collective I/O instrumentation properties */
+#define H5D_XFER_COLL_RANK0_BCAST_SIZE sizeof(hbool_t)
+#define H5D_XFER_COLL_RANK0_BCAST_DEF FALSE
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+/* type definitions */
+typedef struct H5Ptest_param_t /* holds extra test parameters */
+{
+ char *name;
+ int count;
+} H5Ptest_param_t;
+
+/* Dataset data type. Int's can be easily octo dumped. */
+typedef int DATATYPE;
+
+/* Shape Same Tests Definitions */
+typedef enum {
+ IND_CONTIG, /* Independent IO on contiguous datasets */
+ COL_CONTIG, /* Collective IO on contiguous datasets */
+ IND_CHUNKED, /* Independent IO on chunked datasets */
+ COL_CHUNKED /* Collective IO on chunked datasets */
+} ShapeSameTestMethods;
+
+/* Shared global variables */
+extern int dim0, dim1; /*Dataset dimensions */
+extern int chunkdim0, chunkdim1; /*Chunk dimensions */
+extern int nerrors; /*errors count */
+extern H5E_auto2_t old_func; /* previous error handler */
+extern void *old_client_data; /*previous error handler arg.*/
+extern int facc_type; /*Test file access type */
+extern int dxfer_coll_type;
+
+/* Test program prototypes */
+void test_plist_ed(void);
+#if 0
+void external_links(void);
+#endif
+void zero_dim_dset(void);
+void test_file_properties(void);
+void test_delete(void);
+void multiple_dset_write(void);
+void multiple_group_write(void);
+void multiple_group_read(void);
+void collective_group_write_independent_group_read(void);
+void collective_group_write(void);
+void independent_group_read(void);
+void test_fapl_mpio_dup(void);
+void test_split_comm_access(void);
+void test_page_buffer_access(void);
+void dataset_atomicity(void);
+void dataset_writeInd(void);
+void dataset_writeAll(void);
+void extend_writeInd(void);
+void extend_writeInd2(void);
+void extend_writeAll(void);
+void dataset_readInd(void);
+void dataset_readAll(void);
+void extend_readInd(void);
+void extend_readAll(void);
+void none_selection_chunk(void);
+void actual_io_mode_tests(void);
+void no_collective_cause_tests(void);
+void test_chunk_alloc(void);
+void test_filter_read(void);
+void compact_dataset(void);
+void null_dataset(void);
+void big_dataset(void);
+void dataset_fillvalue(void);
+void coll_chunk1(void);
+void coll_chunk2(void);
+void coll_chunk3(void);
+void coll_chunk4(void);
+void coll_chunk5(void);
+void coll_chunk6(void);
+void coll_chunk7(void);
+void coll_chunk8(void);
+void coll_chunk9(void);
+void coll_chunk10(void);
+void coll_irregular_cont_read(void);
+void coll_irregular_cont_write(void);
+void coll_irregular_simple_chunk_read(void);
+void coll_irregular_simple_chunk_write(void);
+void coll_irregular_complex_chunk_read(void);
+void coll_irregular_complex_chunk_write(void);
+void io_mode_confusion(void);
+void rr_obj_hdr_flush_confusion(void);
+void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm);
+void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm);
+void chunk_align_bug_1(void);
+void lower_dim_size_comp_test(void);
+void link_chunk_collective_io_test(void);
+void contig_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type);
+void checker_board_hyperslab_dr_pio_test(ShapeSameTestMethods sstest_type);
+void file_image_daisy_chain_test(void);
+#ifdef H5_HAVE_FILTER_DEFLATE
+void compress_readAll(void);
+#endif /* H5_HAVE_FILTER_DEFLATE */
+void test_dense_attr(void);
+void test_partial_no_selection_coll_md_read(void);
+void test_multi_chunk_io_addrmap_issue(void);
+void test_link_chunk_io_sort_chunk_issue(void);
+void test_collective_global_heap_write(void);
+
+/* commonly used prototypes */
+hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
+MPI_Offset h5_mpi_get_file_size(const char *filename, MPI_Comm comm, MPI_Info info);
+int dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset,
+ DATATYPE *original);
+void point_set(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], size_t num_points,
+ hsize_t coords[], int order);
+#endif /* PHDF5TEST_H */
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 3a44fca..d34b800 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -104,10 +104,30 @@ set (H5P_TESTS
t_vfd
)
+set (HDF5_API_TESTS
+ attribute
+ dataset
+ datatype
+ file
+ group
+ link
+ misc
+ object
+)
+
+if (HDF5_TEST_API_ENABLE_ASYNC)
+ set (HDF5_API_TESTS
+ ${HDF5_API_TESTS}
+ async
+ )
+endif ()
+
foreach (h5_testp ${H5P_TESTS})
ADD_H5P_EXE(${h5_testp})
endforeach ()
+add_subdirectory (API)
+
if (HDF5_TEST_PARALLEL)
include (CMakeTests.cmake)
endif ()