From 830bf561e1236308cdffe0c519c6e779ec0929e3 Mon Sep 17 00:00:00 2001
From: Allen Byrne <50328838+byrnHDF@users.noreply.github.com>
Date: Tue, 26 Sep 2023 08:46:15 -0500
Subject: Merge CMake,doxygen changes from develop,1.10 branches (#3578)
* Merge CMake,doxygen changes from develop,1.10 branches
* revert incorrect option in AT TS build
* Use variable for ignore-eol usage.
* Add last_test depends logic
* Just print status on CMake below 3.14
---
.github/workflows/main.yml | 89 ++-
.github/workflows/netcdf.yml | 3 +
CMakeFilters.cmake | 2 +-
CMakeInstallation.cmake | 8 +-
config/cmake/fileCompareTest.cmake | 4 +-
config/cmake/hdf5-config.cmake.in | 13 +-
config/cmake/patch.xml | 11 -
config/cmake/patch.xml.in | 11 +
config/cmake/runTest.cmake | 4 +
config/cmake/scripts/HDF5options.cmake | 6 +-
config/sanitizer/README.md | 2 +-
config/toolchain/build32.cmake | 2 +-
config/toolchain/clang.cmake | 16 +-
config/toolchain/crayle.cmake | 10 +-
config/toolchain/gcc.cmake | 12 +-
config/toolchain/mingw64.cmake | 2 +-
config/toolchain/pgi.cmake | 12 +-
doxygen/dox/ExamplesAPI.dox | 1010 ++++++++++++++++++++++++++++++++
doxygen/dox/GettingStarted.dox | 4 +-
doxygen/dox/IntroHDF5.dox | 2 +-
doxygen/dox/IntroParExamples.dox | 569 ++++++++++++++++++
doxygen/dox/IntroParHDF5.dox | 271 +++++++++
doxygen/dox/LearnBasics1.dox | 2 +-
doxygen/img/pchunk_figa.gif | Bin 0 -> 2754 bytes
doxygen/img/pchunk_figb.gif | Bin 0 -> 2094 bytes
doxygen/img/pchunk_figc.gif | Bin 0 -> 3194 bytes
doxygen/img/pchunk_figd.gif | Bin 0 -> 2984 bytes
doxygen/img/pcont_hy_figa.gif | Bin 0 -> 3201 bytes
doxygen/img/pcont_hy_figb.gif | Bin 0 -> 2450 bytes
doxygen/img/pcont_hy_figc.gif | Bin 0 -> 3694 bytes
doxygen/img/pcont_hy_figd.gif | Bin 0 -> 2723 bytes
doxygen/img/ppatt_figa.gif | Bin 0 -> 2359 bytes
doxygen/img/ppatt_figb.gif | Bin 0 -> 2431 bytes
doxygen/img/ppatt_figc.gif | Bin 0 -> 2616 bytes
doxygen/img/ppatt_figd.gif | Bin 0 -> 2505 bytes
doxygen/img/preg_figa.gif | Bin 0 -> 2359 bytes
doxygen/img/preg_figb.gif | Bin 0 -> 2033 bytes
doxygen/img/preg_figc.gif | Bin 0 -> 3242 bytes
doxygen/img/preg_figd.gif | Bin 0 -> 2367 bytes
examples/CMakeTests.cmake | 4 +-
java/examples/datasets/CMakeLists.txt | 2 +-
tools/test/h5diff/CMakeTests.cmake | 4 +-
tools/test/h5dump/CMakeTests.cmake | 8 +-
tools/test/misc/CMakeTestsClear.cmake | 8 +
44 files changed, 2014 insertions(+), 77 deletions(-)
delete mode 100644 config/cmake/patch.xml
create mode 100644 config/cmake/patch.xml.in
create mode 100644 doxygen/dox/ExamplesAPI.dox
create mode 100644 doxygen/dox/IntroParExamples.dox
create mode 100644 doxygen/dox/IntroParHDF5.dox
create mode 100644 doxygen/img/pchunk_figa.gif
create mode 100644 doxygen/img/pchunk_figb.gif
create mode 100644 doxygen/img/pchunk_figc.gif
create mode 100644 doxygen/img/pchunk_figd.gif
create mode 100644 doxygen/img/pcont_hy_figa.gif
create mode 100644 doxygen/img/pcont_hy_figb.gif
create mode 100644 doxygen/img/pcont_hy_figc.gif
create mode 100644 doxygen/img/pcont_hy_figd.gif
create mode 100644 doxygen/img/ppatt_figa.gif
create mode 100644 doxygen/img/ppatt_figb.gif
create mode 100644 doxygen/img/ppatt_figc.gif
create mode 100644 doxygen/img/ppatt_figd.gif
create mode 100644 doxygen/img/preg_figa.gif
create mode 100644 doxygen/img/preg_figb.gif
create mode 100644 doxygen/img/preg_figc.gif
create mode 100644 doxygen/img/preg_figd.gif
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index e87b627..10b3a9c 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -336,30 +336,31 @@ jobs:
- name: Dump matrix context
run: echo '${{ toJSON(matrix) }}'
- - name: Install CMake Dependencies (Linux)
- run: sudo apt-get install ninja-build doxygen graphviz
- if: matrix.os == 'ubuntu-latest'
-
- - name: Install Autotools Dependencies (Linux, serial)
+ # Only CMake need ninja-build, but we just install it unilaterally
+ # libssl, etc. are needed for the ros3 VFD
+ - name: Install Linux Dependencies
run: |
sudo apt update
- sudo apt install automake autoconf libtool libtool-bin
- sudo apt install gcc-12 g++-12 gfortran-12
+ sudo apt-get install ninja-build doxygen graphviz
sudo apt install libssl3 libssl-dev libcurl4 libcurl4-openssl-dev
+ sudo apt install gcc-12 g++-12 gfortran-12
echo "CC=gcc-12" >> $GITHUB_ENV
echo "CXX=g++-12" >> $GITHUB_ENV
echo "FC=gfortran-12" >> $GITHUB_ENV
+ if: matrix.os == 'ubuntu-latest'
+
+ # CMake gets libaec from fetchcontent
+ - name: Install Autotools Dependencies (Linux)
+ run: |
+ sudo apt install automake autoconf libtool libtool-bin
sudo apt install libaec0 libaec-dev
- if: (matrix.generator == 'autogen') && (matrix.parallel != 'enable')
+ if: (matrix.generator == 'autogen')
- name: Install Autotools Dependencies (Linux, parallel)
run: |
- sudo apt update
- sudo apt install automake autoconf libtool libtool-bin
sudo apt install openmpi-bin openmpi-common mpi-default-dev
echo "CC=mpicc" >> $GITHUB_ENV
echo "FC=mpif90" >> $GITHUB_ENV
- sudo apt install libaec0 libaec-dev
if: (matrix.generator == 'autogen') && (matrix.parallel == 'enable')
- name: Install Dependencies (Windows)
@@ -390,7 +391,19 @@ jobs:
sh ./autogen.sh
mkdir "${{ runner.workspace }}/build"
cd "${{ runner.workspace }}/build"
- ${{ matrix.flags }} $GITHUB_WORKSPACE/configure --enable-build-mode=${{ matrix.build_mode.autotools }} --${{ matrix.deprec_sym }}-deprecated-symbols --with-default-api-version=${{ matrix.default_api }} --enable-shared --${{ matrix.parallel }}-parallel --${{ matrix.cpp }}-cxx --${{ matrix.fortran }}-fortran --${{ matrix.java }}-java --${{ matrix.mirror_vfd }}-mirror-vfd --${{ matrix.direct_vfd }}-direct-vfd --${{ matrix.ros3_vfd }}-ros3-vfd --with-szlib=${{ matrix.szip }}
+ ${{ matrix.flags }} $GITHUB_WORKSPACE/configure \
+ --enable-build-mode=${{ matrix.build_mode.autotools }} \
+ --${{ matrix.deprec_sym }}-deprecated-symbols \
+ --with-default-api-version=${{ matrix.default_api }} \
+ --enable-shared \
+ --${{ matrix.parallel }}-parallel \
+ --${{ matrix.cpp }}-cxx \
+ --${{ matrix.fortran }}-fortran \
+ --${{ matrix.java }}-java \
+ --${{ matrix.mirror_vfd }}-mirror-vfd \
+ --${{ matrix.direct_vfd }}-direct-vfd \
+ --${{ matrix.ros3_vfd }}-ros3-vfd \
+ --with-szlib=${{ matrix.szip }}
shell: bash
if: (matrix.generator == 'autogen') && ! (matrix.thread_safety.enabled)
@@ -399,7 +412,15 @@ jobs:
sh ./autogen.sh
mkdir "${{ runner.workspace }}/build"
cd "${{ runner.workspace }}/build"
- ${{ matrix.flags }} $GITHUB_WORKSPACE/configure --enable-build-mode=${{ matrix.build_mode.autotools }} --enable-shared --enable-threadsafe --disable-hl --${{ matrix.mirror_vfd }}-mirror-vfd --${{ matrix.direct_vfd }}-direct-vfd --${{ matrix.ros3_vfd }}-ros3-vfd --with-szlib=${{ matrix.szip }}
+ ${{ matrix.flags }} $GITHUB_WORKSPACE/configure \
+ --enable-build-mode=${{ matrix.build_mode.autotools }} \
+ --enable-shared \
+ --enable-threadsafe \
+ --disable-hl \
+ --${{ matrix.mirror_vfd }}-mirror-vfd \
+ --${{ matrix.direct_vfd }}-direct-vfd \
+ --${{ matrix.ros3_vfd }}-ros3-vfd \
+ --with-szlib=${{ matrix.szip }}
shell: bash
if: (matrix.generator == 'autogen') && (matrix.thread_safety.enabled)
@@ -411,7 +432,25 @@ jobs:
run: |
mkdir "${{ runner.workspace }}/build"
cd "${{ runner.workspace }}/build"
- cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake ${{ matrix.generator }} -DCMAKE_BUILD_TYPE=${{ matrix.build_mode.cmake }} -DCMAKE_TOOLCHAIN_FILE=${{ matrix.toolchain }} -DBUILD_SHARED_LIBS=ON -DHDF5_ENABLE_ALL_WARNINGS=ON -DHDF5_ENABLE_PARALLEL:BOOL=${{ matrix.parallel }} -DHDF5_BUILD_CPP_LIB:BOOL=${{ matrix.cpp }} -DHDF5_BUILD_FORTRAN=${{ matrix.fortran }} -DHDF5_BUILD_JAVA=${{ matrix.java }} -DHDF5_BUILD_DOC=${{ matrix.docs }} -DBUILD_SZIP_WITH_FETCHCONTENT=${{ matrix.libaecfc }} -DLIBAEC_USE_LOCALCONTENT=${{ matrix.localaec }} -DBUILD_ZLIB_WITH_FETCHCONTENT=${{ matrix.zlibfc }} -DZLIB_USE_LOCALCONTENT=${{ matrix.localzlib }} -DHDF5_ENABLE_MIRROR_VFD:BOOL=${{ matrix.mirror_vfd }} -DHDF5_ENABLE_DIRECT_VFD:BOOL=${{ matrix.direct_vfd }} -DHDF5_ENABLE_ROS3_VFD:BOOL=${{ matrix.ros3_vfd }} $GITHUB_WORKSPACE
+ cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \
+ ${{ matrix.generator }} \
+ -DCMAKE_BUILD_TYPE=${{ matrix.build_mode.cmake }} \
+ -DCMAKE_TOOLCHAIN_FILE=${{ matrix.toolchain }} \
+ -DBUILD_SHARED_LIBS=ON \
+ -DHDF5_ENABLE_ALL_WARNINGS=ON \
+ -DHDF5_ENABLE_PARALLEL:BOOL=${{ matrix.parallel }} \
+ -DHDF5_BUILD_CPP_LIB:BOOL=${{ matrix.cpp }} \
+ -DHDF5_BUILD_FORTRAN=${{ matrix.fortran }} \
+ -DHDF5_BUILD_JAVA=${{ matrix.java }} \
+ -DHDF5_BUILD_DOC=${{ matrix.docs }} \
+ -DBUILD_SZIP_WITH_FETCHCONTENT=${{ matrix.libaecfc }} \
+ -DLIBAEC_USE_LOCALCONTENT=${{ matrix.localaec }} \
+ -DBUILD_ZLIB_WITH_FETCHCONTENT=${{ matrix.zlibfc }} \
+ -DZLIB_USE_LOCALCONTENT=${{ matrix.localzlib }} \
+ -DHDF5_ENABLE_MIRROR_VFD:BOOL=${{ matrix.mirror_vfd }} \
+ -DHDF5_ENABLE_DIRECT_VFD:BOOL=${{ matrix.direct_vfd }} \
+ -DHDF5_ENABLE_ROS3_VFD:BOOL=${{ matrix.ros3_vfd }} \
+ $GITHUB_WORKSPACE
shell: bash
if: (matrix.generator != 'autogen') && ! (matrix.thread_safety.enabled)
@@ -420,7 +459,27 @@ jobs:
run: |
mkdir "${{ runner.workspace }}/build"
cd "${{ runner.workspace }}/build"
- cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake ${{ matrix.generator }} -DCMAKE_BUILD_TYPE=${{ matrix.build_mode.cmake }} -DCMAKE_TOOLCHAIN_FILE=${{ matrix.toolchain }} -DBUILD_SHARED_LIBS=ON -DHDF5_ENABLE_ALL_WARNINGS=ON -DHDF5_ENABLE_THREADSAFE:BOOL=ON -DHDF5_ENABLE_PARALLEL:BOOL=${{ matrix.parallel }} -DHDF5_BUILD_CPP_LIB:BOOL=OFF -DHDF5_BUILD_FORTRAN:BOOL=OFF -DHDF5_BUILD_JAVA:BOOL=OFF -DHDF5_BUILD_HL_LIB:BOOL=OFF -DHDF5_BUILD_DOC=OFF -DBUILD_SZIP_WITH_FETCHCONTENT=${{ matrix.libaecfc }} -DLIBAEC_USE_LOCALCONTENT=${{ matrix.localaec }} -DBUILD_ZLIB_WITH_FETCHCONTENT=${{ matrix.zlibfc }} -DZLIB_USE_LOCALCONTENT=${{ matrix.localzlib }} -DHDF5_ENABLE_MIRROR_VFD:BOOL=${{ matrix.mirror_vfd }} -DHDF5_ENABLE_DIRECT_VFD:BOOL=${{ matrix.direct_vfd }} -DHDF5_ENABLE_ROS3_VFD:BOOL=${{ matrix.ros3_vfd }} $GITHUB_WORKSPACE
+ cmake -C $GITHUB_WORKSPACE/config/cmake/cacheinit.cmake \
+ ${{ matrix.generator }} \
+ -DCMAKE_BUILD_TYPE=${{ matrix.build_mode.cmake }} \
+ -DCMAKE_TOOLCHAIN_FILE=${{ matrix.toolchain }} \
+ -DBUILD_SHARED_LIBS=ON \
+ -DHDF5_ENABLE_ALL_WARNINGS=ON \
+ -DHDF5_ENABLE_THREADSAFE:BOOL=ON \
+ -DHDF5_ENABLE_PARALLEL:BOOL=${{ matrix.parallel }} \
+ -DHDF5_BUILD_CPP_LIB:BOOL=OFF \
+ -DHDF5_BUILD_FORTRAN:BOOL=OFF \
+ -DHDF5_BUILD_JAVA:BOOL=OFF \
+ -DHDF5_BUILD_HL_LIB:BOOL=OFF \
+ -DHDF5_BUILD_DOC=OFF \
+ -DBUILD_SZIP_WITH_FETCHCONTENT=${{ matrix.libaecfc }} \
+ -DLIBAEC_USE_LOCALCONTENT=${{ matrix.localaec }} \
+ -DBUILD_ZLIB_WITH_FETCHCONTENT=${{ matrix.zlibfc }} \
+ -DZLIB_USE_LOCALCONTENT=${{ matrix.localzlib }} \
+ -DHDF5_ENABLE_MIRROR_VFD:BOOL=${{ matrix.mirror_vfd }} \
+ -DHDF5_ENABLE_DIRECT_VFD:BOOL=${{ matrix.direct_vfd }} \
+ -DHDF5_ENABLE_ROS3_VFD:BOOL=${{ matrix.ros3_vfd }} \
+ $GITHUB_WORKSPACE
shell: bash
if: (matrix.generator != 'autogen') && (matrix.thread_safety.enabled)
diff --git a/.github/workflows/netcdf.yml b/.github/workflows/netcdf.yml
index 0ec7541..5b1ebf2 100644
--- a/.github/workflows/netcdf.yml
+++ b/.github/workflows/netcdf.yml
@@ -14,6 +14,9 @@ on:
- 'COPYING**'
- '**.md'
+permissions:
+ contents: read
+
# Using concurrency to cancel any in-progress job or run
concurrency:
group: ${{ github.workflow }}-${{ github.sha || github.event.pull_request.number }}
diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake
index 200634e..3a1a0de 100644
--- a/CMakeFilters.cmake
+++ b/CMakeFilters.cmake
@@ -9,7 +9,7 @@
# If you do not have access to either file, you may request a copy from
# help@hdfgroup.org.
#
-option (USE_LIBAEC "Use AEC library as SZip Filter" OFF)
+option (USE_LIBAEC "Use AEC library as SZip Filter" ON)
option (USE_LIBAEC_STATIC "Use static AEC library " OFF)
option (ZLIB_USE_EXTERNAL "Use External Library Building for ZLIB" 0)
option (SZIP_USE_EXTERNAL "Use External Library Building for SZIP" 0)
diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake
index 1fb7714..3aa7981 100644
--- a/CMakeInstallation.cmake
+++ b/CMakeInstallation.cmake
@@ -390,7 +390,13 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
set(CPACK_WIX_PROPERTY_ARPURLINFOABOUT "${HDF5_PACKAGE_URL}")
set(CPACK_WIX_PROPERTY_ARPHELPLINK "${HDF5_PACKAGE_BUGREPORT}")
if (BUILD_SHARED_LIBS)
- set(CPACK_WIX_PATCH_FILE "${HDF_RESOURCES_DIR}/patch.xml")
+ if (${HDF_CFG_NAME} MATCHES "Debug" OR ${HDF_CFG_NAME} MATCHES "Developer")
+ set (WIX_CMP_NAME "${HDF5_LIB_NAME}${CMAKE_DEBUG_POSTFIX}")
+ else ()
+ set (WIX_CMP_NAME "${HDF5_LIB_NAME}")
+ endif ()
+ configure_file (${HDF_RESOURCES_DIR}/patch.xml.in ${HDF5_BINARY_DIR}/patch.xml @ONLY)
+ set(CPACK_WIX_PATCH_FILE "${HDF5_BINARY_DIR}/patch.xml")
endif ()
elseif (APPLE)
list (APPEND CPACK_GENERATOR "STGZ")
diff --git a/config/cmake/fileCompareTest.cmake b/config/cmake/fileCompareTest.cmake
index 4a8dc09..f4c46f6 100644
--- a/config/cmake/fileCompareTest.cmake
+++ b/config/cmake/fileCompareTest.cmake
@@ -59,7 +59,7 @@ if (TEST_STRINGS STREQUAL "YES")
endif ()
else ()
if (CMAKE_VERSION VERSION_LESS "3.14.0")
- message (FATAL_ERROR "CANNOT get file size, file command SIZE not supported")
+ message (STATUS "CANNOT get file size, file command SIZE not supported")
else ()
file (SIZE ${TEST_FOLDER}/${TEST_ONEFILE} TEST_ONE_SIZE)
file (SIZE ${TEST_FOLDER}/${TEST_TWOFILE} TEST_TWO_SIZE)
@@ -74,7 +74,7 @@ else ()
elseif (TEST_FUNCTION MATCHES "LTEQ")
if (TEST_ONE_SIZE LESS_EQUAL TEST_TWO_SIZE)
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.15.0")
- message (VERBOSES "Passed: The size of ${TEST_FOLDER}/${TEST_ONEFILE} was less or equal ${TEST_FOLDER}/${TEST_TWOFILE}")
+ message (VERBOSE "Passed: The size of ${TEST_FOLDER}/${TEST_ONEFILE} was less or equal ${TEST_FOLDER}/${TEST_TWOFILE}")
endif ()
else ()
message (FATAL_ERROR "The size of ${TEST_FOLDER}/${TEST_ONEFILE} was NOT less or equal ${TEST_FOLDER}/${TEST_TWOFILE}")
diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in
index 699db89..496d260 100644
--- a/config/cmake/hdf5-config.cmake.in
+++ b/config/cmake/hdf5-config.cmake.in
@@ -44,6 +44,7 @@ set (${HDF5_PACKAGE_NAME}_ENABLE_PLUGIN_SUPPORT @HDF5_ENABLE_PLUGIN_SUPPORT@)
set (${HDF5_PACKAGE_NAME}_ENABLE_Z_LIB_SUPPORT @HDF5_ENABLE_Z_LIB_SUPPORT@)
set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_SUPPORT @HDF5_ENABLE_SZIP_SUPPORT@)
set (${HDF5_PACKAGE_NAME}_ENABLE_SZIP_ENCODING @HDF5_ENABLE_SZIP_ENCODING@)
+set (${HDF5_PACKAGE_NAME}_ENABLE_ROS3_VFD @HDF5_ENABLE_ROS3_VFD@)
set (${HDF5_PACKAGE_NAME}_BUILD_SHARED_LIBS @H5_ENABLE_SHARED_LIB@)
set (${HDF5_PACKAGE_NAME}_BUILD_STATIC_LIBS @H5_ENABLE_STATIC_LIB@)
set (${HDF5_PACKAGE_NAME}_PACKAGE_EXTLIBS @HDF5_PACKAGE_EXTLIBS@)
@@ -51,7 +52,8 @@ set (${HDF5_PACKAGE_NAME}_EXPORT_LIBRARIES @HDF5_LIBRARIES_TO_EXPORT@)
set (${HDF5_PACKAGE_NAME}_ARCHITECTURE "@CMAKE_GENERATOR_ARCHITECTURE@")
set (${HDF5_PACKAGE_NAME}_TOOLSET "@CMAKE_GENERATOR_TOOLSET@")
set (${HDF5_PACKAGE_NAME}_DEFAULT_API_VERSION "@DEFAULT_API_VERSION@")
-set (${HDF5_PACKAGE_NAME}_PARALLEL_FILTERED_WRITES "@PARALLEL_FILTERED_WRITES@")
+set (${HDF5_PACKAGE_NAME}_PARALLEL_FILTERED_WRITES @PARALLEL_FILTERED_WRITES@)
+set (${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN "@HDF5_INSTALL_MOD_FORTRAN@")
#-----------------------------------------------------------------------------
# Dependencies
@@ -67,6 +69,11 @@ if (${HDF5_PACKAGE_NAME}_ENABLE_PARALLEL)
find_package(MPI QUIET REQUIRED)
endif ()
+if (${HDF5_PACKAGE_NAME}_ENABLE_THREADSAFE)
+ set(THREADS_PREFER_PTHREAD_FLAG ON)
+ find_package(Threads QUIET REQUIRED)
+endif ()
+
if (${HDF5_PACKAGE_NAME}_BUILD_JAVA)
set (${HDF5_PACKAGE_NAME}_JAVA_INCLUDE_DIRS
@PACKAGE_CURRENT_BUILD_DIR@/lib/jarhdf5-@HDF5_VERSION_STRING@.jar
@@ -143,14 +150,14 @@ foreach (comp IN LISTS ${HDF5_PACKAGE_NAME}_FIND_COMPONENTS)
list (REMOVE_ITEM ${HDF5_PACKAGE_NAME}_FIND_COMPONENTS ${comp})
set (${HDF5_PACKAGE_NAME}_LIB_TYPE ${${HDF5_PACKAGE_NAME}_LIB_TYPE} ${comp})
- if (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN)
+ if (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN AND ${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN STREQUAL "SHARED")
set (${HDF5_PACKAGE_NAME}_INCLUDE_DIR_FORTRAN "@PACKAGE_INCLUDE_INSTALL_DIR@/shared")
endif ()
elseif (comp STREQUAL "static")
list (REMOVE_ITEM ${HDF5_PACKAGE_NAME}_FIND_COMPONENTS ${comp})
set (${HDF5_PACKAGE_NAME}_LIB_TYPE ${${HDF5_PACKAGE_NAME}_LIB_TYPE} ${comp})
- if (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN)
+ if (${HDF5_PACKAGE_NAME}_BUILD_FORTRAN AND ${HDF5_PACKAGE_NAME}_INSTALL_MOD_FORTRAN STREQUAL "STATIC")
set (${HDF5_PACKAGE_NAME}_INCLUDE_DIR_FORTRAN "@PACKAGE_INCLUDE_INSTALL_DIR@/static")
endif ()
endif ()
diff --git a/config/cmake/patch.xml b/config/cmake/patch.xml
deleted file mode 100644
index 1bdff3e..0000000
--- a/config/cmake/patch.xml
+++ /dev/null
@@ -1,11 +0,0 @@
-
-
-
-
-
diff --git a/config/cmake/patch.xml.in b/config/cmake/patch.xml.in
new file mode 100644
index 0000000..d6843e1
--- /dev/null
+++ b/config/cmake/patch.xml.in
@@ -0,0 +1,11 @@
+
+
+
+
+
diff --git a/config/cmake/runTest.cmake b/config/cmake/runTest.cmake
index e26b8ea..b8abe92 100644
--- a/config/cmake/runTest.cmake
+++ b/config/cmake/runTest.cmake
@@ -122,6 +122,10 @@ if (NOT TEST_RESULT EQUAL TEST_EXPECT)
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
message (STATUS "Output :\n${TEST_STREAM}")
endif ()
+ if (EXISTS "${TEST_FOLDER}/${TEST_OUTPUT}.err")
+ file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
+ message (STATUS "Error Output :\n${TEST_STREAM}")
+ endif ()
endif ()
message (FATAL_ERROR "Failed: Test program ${TEST_PROGRAM} exited != ${TEST_EXPECT}.\n${TEST_ERROR}")
endif ()
diff --git a/config/cmake/scripts/HDF5options.cmake b/config/cmake/scripts/HDF5options.cmake
index 5267212..92bfd37 100644
--- a/config/cmake/scripts/HDF5options.cmake
+++ b/config/cmake/scripts/HDF5options.cmake
@@ -69,9 +69,9 @@ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRIN
### disable using ext zlib
#set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=OFF")
-### enable using ext szip
-#set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=ON")
-#set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=ON")
+### disable using ext szip
+#set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF")
+#set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=OFF")
#### package examples ####
#set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACK_EXAMPLES:BOOL=ON -DHDF5_EXAMPLES_COMPRESSED:STRING=HDF5Examples-2.0.3-Source.tar.gz -DHDF5_EXAMPLES_COMPRESSED_DIR:PATH=${CTEST_SCRIPT_DIRECTORY}")
diff --git a/config/sanitizer/README.md b/config/sanitizer/README.md
index 308f9c3..b33c100 100644
--- a/config/sanitizer/README.md
+++ b/config/sanitizer/README.md
@@ -304,4 +304,4 @@ file(GLOB_RECURSE CMAKE_FILES
)
cmake_format(TARGET_NAME ${CMAKE_FILES})
-```
\ No newline at end of file
+```
diff --git a/config/toolchain/build32.cmake b/config/toolchain/build32.cmake
index a2566c3..f636ea8 100644
--- a/config/toolchain/build32.cmake
+++ b/config/toolchain/build32.cmake
@@ -42,7 +42,7 @@ elseif(MINGW)
set (CMAKE_CROSSCOMPILING_EMULATOR wine)
include_directories(/usr/${TOOLCHAIN_PREFIX}/include)
- set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS On CACHE BOOL "Export windows symbols")
+ set (CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS On CACHE BOOL "Export windows symbols")
else ()
set (CMAKE_SYSTEM_NAME Linux)
diff --git a/config/toolchain/clang.cmake b/config/toolchain/clang.cmake
index af176aa..2d35641 100644
--- a/config/toolchain/clang.cmake
+++ b/config/toolchain/clang.cmake
@@ -1,16 +1,16 @@
# Uncomment the following to use cross-compiling
-#set(CMAKE_SYSTEM_NAME Linux)
+#set (CMAKE_SYSTEM_NAME Linux)
-set(CMAKE_COMPILER_VENDOR "clang")
+set (CMAKE_COMPILER_VENDOR "clang")
if(WIN32)
- set(CMAKE_C_COMPILER clang-cl)
- set(CMAKE_CXX_COMPILER clang-cl)
+ set (CMAKE_C_COMPILER clang-cl)
+ set (CMAKE_CXX_COMPILER clang-cl)
else()
- set(CMAKE_C_COMPILER clang)
- set(CMAKE_CXX_COMPILER clang++)
+ set (CMAKE_C_COMPILER clang)
+ set (CMAKE_CXX_COMPILER clang++)
endif()
-set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
+set (CMAKE_EXPORT_COMPILE_COMMANDS ON)
# the following is used if cross-compiling
-set(CMAKE_CROSSCOMPILING_EMULATOR "")
+set (CMAKE_CROSSCOMPILING_EMULATOR "")
diff --git a/config/toolchain/crayle.cmake b/config/toolchain/crayle.cmake
index bf7cf69..02df8ff 100644
--- a/config/toolchain/crayle.cmake
+++ b/config/toolchain/crayle.cmake
@@ -1,10 +1,10 @@
# The following line will use cross-compiling
-set(CMAKE_SYSTEM_NAME Linux)
+set (CMAKE_SYSTEM_NAME Linux)
-set(CMAKE_COMPILER_VENDOR "CrayLinuxEnvironment")
+set (CMAKE_COMPILER_VENDOR "CrayLinuxEnvironment")
-set(CMAKE_C_COMPILER cc)
-set(CMAKE_Fortran_COMPILER ftn)
+set (CMAKE_C_COMPILER cc)
+set (CMAKE_Fortran_COMPILER ftn)
# the following is used if cross-compiling
-set(CMAKE_CROSSCOMPILING_EMULATOR "")
+set (CMAKE_CROSSCOMPILING_EMULATOR "")
diff --git a/config/toolchain/gcc.cmake b/config/toolchain/gcc.cmake
index c41d0ca..f0771ed 100644
--- a/config/toolchain/gcc.cmake
+++ b/config/toolchain/gcc.cmake
@@ -1,11 +1,11 @@
# Uncomment the following line and the correct system name to use cross-compiling
-#set(CMAKE_SYSTEM_NAME Linux)
+#set (CMAKE_SYSTEM_NAME Linux)
-set(CMAKE_COMPILER_VENDOR "GCC")
+set (CMAKE_COMPILER_VENDOR "GCC")
-set(CMAKE_C_COMPILER cc)
-set(CMAKE_CXX_COMPILER c++)
-set(CMAKE_Fortran_COMPILER gfortran)
+set (CMAKE_C_COMPILER cc)
+set (CMAKE_CXX_COMPILER c++)
+set (CMAKE_Fortran_COMPILER gfortran)
# the following is used if cross-compiling
-set(CMAKE_CROSSCOMPILING_EMULATOR "")
+set (CMAKE_CROSSCOMPILING_EMULATOR "")
diff --git a/config/toolchain/mingw64.cmake b/config/toolchain/mingw64.cmake
index 1830488..1b13891 100644
--- a/config/toolchain/mingw64.cmake
+++ b/config/toolchain/mingw64.cmake
@@ -11,4 +11,4 @@ set (CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set (CMAKE_CROSSCOMPILING_EMULATOR wine64)
include_directories(/usr/${TOOLCHAIN_PREFIX}/include)
-set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS On CACHE BOOL "Export windows symbols")
+set (CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS On CACHE BOOL "Export windows symbols")
diff --git a/config/toolchain/pgi.cmake b/config/toolchain/pgi.cmake
index ec58cbb..ff2f048 100644
--- a/config/toolchain/pgi.cmake
+++ b/config/toolchain/pgi.cmake
@@ -1,11 +1,11 @@
# Uncomment the following to use cross-compiling
-#set(CMAKE_SYSTEM_NAME Linux)
+#set (CMAKE_SYSTEM_NAME Linux)
-set(CMAKE_COMPILER_VENDOR "PGI")
+set (CMAKE_COMPILER_VENDOR "PGI")
-set(CMAKE_C_COMPILER pgcc)
-set(CMAKE_CXX_COMPILER pgc++)
-set(CMAKE_Fortran_COMPILER pgf90)
+set (CMAKE_C_COMPILER pgcc)
+set (CMAKE_CXX_COMPILER pgc++)
+set (CMAKE_Fortran_COMPILER pgf90)
# the following is used if cross-compiling
-set(CMAKE_CROSSCOMPILING_EMULATOR "")
+set (CMAKE_CROSSCOMPILING_EMULATOR "")
diff --git a/doxygen/dox/ExamplesAPI.dox b/doxygen/dox/ExamplesAPI.dox
new file mode 100644
index 0000000..8f88c4e
--- /dev/null
+++ b/doxygen/dox/ExamplesAPI.dox
@@ -0,0 +1,1010 @@
+/** @page ExAPI Examples by API
+
+Navigate back: \ref index "Main" / \ref GettingStarted
+
+
+\section sec_exapi_desc Examples Description
+The C, FORTRAN and Java examples below point to the examples in the hdf5-examples github repository. Examples for older versions of HDF5
+are handled by setting the appropriate USE_API_xxx definition. HDF5-1.6 examples are in a "16"-named subdirectory.
+
+The Java examples are in the HDF5-1.10 source code, and the Java Object package examples are in the HDFView source.
+Please note that you must comment out the "package" statement at the top when downloading a Java Object example individually.
+
+The MATLAB and Python examples were generously provided by a user and are not tested.
+
+Languages are C, Fortran, Java (JHI5), Java Object Package, Python (High Level), and Python (Low Level APIs).
+
+\subsection sec_exapi_dsets Datasets
+
+
+
+\subsection sec_exapi_grps Groups
+
+
+\subsection sec_exapi_dtypes Datatypes
+
+
+\subsection sec_exapi_filts Filters
+
+
+\subsection sec_exapi_java Java General
+
+
+
+\subsection sec_exapi_par Parallel
+
+
+Feature |
+Languages |
+HDF5 File |
+Output |
+
+
+Creating and Accessing a File |
+
+C
+FORTRAN
+ MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+Creating and Accessing a Dataset |
+
+C
+FORTRAN
+ MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+Writing and Reading Contiguous Hyperslabs |
+
+C
+FORTRAN
+ MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+Writing and Reading Regularly Spaced Data Hyperslabs |
+
+C
+FORTRAN
+ MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+Writing and Reading Pattern Hyperslabs |
+
+C
+FORTRAN
+ MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+Writing and Reading Chunk Hyperslabs |
+
+C
+FORTRAN
+ MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+Using the Subfiling VFD to Write a File Striped Across Multiple Subfiles |
+
+C
+ FORTRAN MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+Write to Datasets with Filters Applied |
+
+C
+ FORTRAN MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+Collectively Write Datasets with Filters and Not All Ranks have Data |
+
+C
+ FORTRAN MATLAB PyHigh PyLow
+ |
+ph5_.h5 |
+ph5_.tst |
+
+
+
+
+
+Navigate back: \ref index "Main" / \ref GettingStarted
+
+*/
diff --git a/doxygen/dox/GettingStarted.dox b/doxygen/dox/GettingStarted.dox
index 29c5033..87f3566 100644
--- a/doxygen/dox/GettingStarted.dox
+++ b/doxygen/dox/GettingStarted.dox
@@ -50,10 +50,10 @@ Parallel HDF5, and the HDF5-1.10 VDS and SWMR new features:
-Introduction to Parallel HDF5
+\ref IntroParHDF5
|
-A brief introduction to Parallel HDF5. If you are new to HDF5 please see the @ref LearnBasics topic first.
+A brief introduction to Parallel HDF5. If you are new to HDF5 please see the @ref LearnBasics topic first.
|
diff --git a/doxygen/dox/IntroHDF5.dox b/doxygen/dox/IntroHDF5.dox
index 3ca7d00..2c25659 100644
--- a/doxygen/dox/IntroHDF5.dox
+++ b/doxygen/dox/IntroHDF5.dox
@@ -607,7 +607,7 @@ on the HDF-EOS Tools and Information Center pag
\section secHDF5Examples Examples
\li \ref LBExamples
-\li Examples by API
+\li \ref ExAPI
\li Examples in the Source Code
\li Other Examples
diff --git a/doxygen/dox/IntroParExamples.dox b/doxygen/dox/IntroParExamples.dox
new file mode 100644
index 0000000..3929106
--- /dev/null
+++ b/doxygen/dox/IntroParExamples.dox
@@ -0,0 +1,569 @@
+/** @page IntroParContHyperslab Writing by Contiguous Hyperslab
+
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
+
+
+This example shows how to write a contiguous buffer in memory to a contiguous hyperslab in a file. In this case,
+each parallel process writes a contiguous hyperslab to the file.
+
+In the C example (figure a), each hyperslab in memory consists of an equal number of consecutive rows. In the FORTRAN
+90 example (figure b), each hyperslab in memory consists of
+an equal number of consecutive columns. This reflects the difference in the storage order for C and FORTRAN 90.
+
+
+Figure a C Example |
+Figure b Fortran Example |
+
+
+\image html pcont_hy_figa.gif
+ |
+
+\image html pcont_hy_figb.gif
+ |
+
+
+
+\section secIntroParContHyperslabC Writing a Contiguous Hyperslab in C
+In this example, you have a dataset of 8 (rows) x 5 (columns) and each process writes an equal number
+of rows to the dataset. The dataset hyperslab is defined as follows:
+\code
+ count [0] = dimsf [0] / number_processes
+ count [1] = dimsf [1]
+\endcode
+where,
+\code
+ dimsf [0] is the number of rows in the dataset
+ dimsf [1] is the number of columns in the dataset
+\endcode
+The offset for the hyperslab is different for each process:
+\code
+ offset [0] = k * count[0]
+ offset [1] = 0
+\endcode
+where,
+\code
+ "k" is the process id number
+ count [0] is the number of rows written in each hyperslab
+ offset [1] = 0 indicates to start at the beginning of the row
+\endcode
+
+The number of processes that you could use would be 1, 2, 4, or 8. The number of rows that would be written by each slab is as follows:
+
+
+Processes |
+Size of count[0](\# of rows) |
+
+1 | 8 |
+
+2 | 4 |
+
+4 | 2 |
+
+8 | 1 |
+
+
+
+If using 4 processes, then process 1 would look like:
+
+
+
+\image html pcont_hy_figc.gif
+ |
+
+
+
+The code would look like the following:
+\code
+ 71 /*
+ 72 * Each process defines dataset in memory and writes it to the hyperslab
+ 73 * in the file.
+ 74 */
+ 75 count[0] = dimsf[0]/mpi_size;
+ 76 count[1] = dimsf[1];
+ 77 offset[0] = mpi_rank * count[0];
+ 78 offset[1] = 0;
+ 79 memspace = H5Screate_simple(RANK, count, NULL);
+ 80
+ 81 /*
+ 82 * Select hyperslab in the file.
+ 83 */
+ 84 filespace = H5Dget_space(dset_id);
+ 85 H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, NULL, count, NULL);
+\endcode
+
+Below is the example program:
+
+
+If using this example with 4 processes, then,
+\li Process 0 writes "10"s to the file.
+\li Process 1 writes "11"s.
+\li Process 2 writes "12"s.
+\li Process 3 writes "13"s.
+
+The following is the output from h5dump for the HDF5 file created by this example using 4 processes:
+\code
+HDF5 "SDS_row.h5" {
+GROUP "/" {
+ DATASET "IntArray" {
+ DATATYPE H5T_STD_I32BE
+ DATASPACE SIMPLE { ( 8, 5 ) / ( 8, 5 ) }
+ DATA {
+ 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11,
+ 11, 11, 11, 11, 11,
+ 12, 12, 12, 12, 12,
+ 12, 12, 12, 12, 12,
+ 13, 13, 13, 13, 13,
+ 13, 13, 13, 13, 13
+ }
+ }
+}
+}
+\endcode
+
+
+\section secIntroParContHyperslabFort Writing a Contiguous Hyperslab in Fortran
+In this example you have a dataset of 5 (rows) x 8 (columns). Since a contiguous hyperslab in Fortran 90
+consists of consecutive columns, each process will be writing an equal number of columns to the dataset.
+
+You would define the size of the hyperslab to write to the dataset as follows:
+\code
+ count(1) = dimsf(1)
+ count(2) = dimsf(2) / number_of_processes
+\endcode
+
+where,
+\code
+ dimsf(1) is the number of rows in the dataset
+ dimsf(2) is the number of columns
+\endcode
+
+The offset for the hyperslab dimension would be different for each process:
+\code
+ offset (1) = 0
+ offset (2) = k * count (2)
+\endcode
+
+where,
+\code
+ offset (1) = 0 indicates to start at the beginning of the column
+ "k" is the process id number
+ "count(2) is the number of columns to be written by each hyperslab
+\endcode
+
+The number of processes that could be used in this example are 1, 2, 4, or 8. The number of
+columns that could be written by each slab is as follows:
+
+
+Processes |
+Size of count (2)(\# of columns) |
+
+1 | 8 |
+
+2 | 4 |
+
+4 | 2 |
+
+8 | 1 |
+
+
+
+If using 4 processes, the offset and count parameters for Process 1 would look like:
+
+
+
+\image html pcont_hy_figd.gif
+ |
+
+
+
+The code would look like the following:
+\code
+ 69 ! Each process defines dataset in memory and writes it to the hyperslab
+ 70 ! in the file.
+ 71 !
+ 72 count(1) = dimsf(1)
+ 73 count(2) = dimsf(2)/mpi_size
+ 74 offset(1) = 0
+ 75 offset(2) = mpi_rank * count(2)
+ 76 CALL h5screate_simple_f(rank, count, memspace, error)
+ 77 !
+ 78 ! Select hyperslab in the file.
+ 79 !
+ 80 CALL h5dget_space_f(dset_id, filespace, error)
+ 81 CALL h5sselect_hyperslab_f (filespace, H5S_SELECT_SET_F, offset, count, error)
+\endcode
+
+Below is the F90 example program which illustrates how to write contiguous hyperslabs by column in Parallel HDF5:
+
+
+If you run this program with 4 processes and look at the output with h5dump you will notice that the output is
+much like the output shown above for the C example. This is because h5dump is written in C. The data would be
+displayed in columns if it was printed using Fortran 90 code.
+
+
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
+
+@page IntroParRegularSpaced Writing by Regularly Spaced Data
+
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
+
+
+In this case, each process writes data from a contiguous buffer into disconnected locations in the file, using a regular pattern.
+
+In C it is done by selecting a hyperslab in a file that consists of regularly spaced columns. In F90, it is done by selecting a
+hyperslab in a file that consists of regularly spaced rows.
+
+
+Figure a C Example |
+Figure b Fortran Example |
+
+
+\image html preg_figa.gif
+ |
+
+\image html preg_figb.gif
+ |
+
+
+
+\section secIntroParRegularSpacedC Writing Regularly Spaced Columns in C
+In this example, you have two processes that write to the same dataset, each writing to
+every other column in the dataset. For each process the hyperslab in the file is set up as follows:
+\code
+ 89 count[0] = 1;
+ 90 count[1] = dimsm[1];
+ 91 offset[0] = 0;
+ 92 offset[1] = mpi_rank;
+ 93 stride[0] = 1;
+ 94 stride[1] = 2;
+ 95 block[0] = dimsf[0];
+ 96 block[1] = 1;
+\endcode
+
+The stride is 2 for dimension 1 to indicate that every other position along this
+dimension will be written to. A stride of 1 indicates that every position along a dimension will be written to.
+
+For two processes, the mpi_rank will be either 0 or 1. Therefore:
+\li Process 0 writes to even columns (0, 2, 4...)
+\li Process 1 writes to odd columns (1, 3, 5...)
+
+The block size allows each process to write a column of data to every other position in the dataset.
+
+
+
+
+\image html preg_figc.gif
+ |
+
+
+
+Below is an example program for writing hyperslabs by column in Parallel HDF5:
+
+
+The following is the output from h5dump for the HDF5 file created by this example:
+\code
+HDF5 "SDS_col.h5" {
+GROUP "/" {
+ DATASET "IntArray" {
+ DATATYPE H5T_STD_I32BE
+ DATASPACE SIMPLE { ( 8, 6 ) / ( 8, 6 ) }
+ DATA {
+ 1, 2, 10, 20, 100, 200,
+ 1, 2, 10, 20, 100, 200,
+ 1, 2, 10, 20, 100, 200,
+ 1, 2, 10, 20, 100, 200,
+ 1, 2, 10, 20, 100, 200,
+ 1, 2, 10, 20, 100, 200,
+ 1, 2, 10, 20, 100, 200,
+ 1, 2, 10, 20, 100, 200
+ }
+ }
+}
+}
+\endcode
+
+
+\section secIntroParRegularSpacedFort Writing Regularly Spaced Rows in Fortran
+In this example, you have two processes that write to the same dataset, each writing to every
+other row in the dataset. For each process the hyperslab in the file is set up as follows:
+
+
+You would define the size of the hyperslab to write to the dataset as follows:
+\code
+ 83 ! Each process defines dataset in memory and writes it to
+ 84 ! the hyperslab in the file.
+ 85 !
+ 86 count(1) = dimsm(1)
+ 87 count(2) = 1
+ 88 offset(1) = mpi_rank
+ 89 offset(2) = 0
+ 90 stride(1) = 2
+ 91 stride(2) = 1
+ 92 block(1) = 1
+ 93 block(2) = dimsf(2)
+\endcode
+
+The stride is 2 for dimension 1 to indicate that every other position along this dimension will
+be written to. A stride of 1 indicates that every position along a dimension will be written to.
+
+For two process, the mpi_rank will be either 0 or 1. Therefore:
+\li Process 0 writes to even rows (0, 2, 4 ...)
+\li Process 1 writes to odd rows (1, 3, 5 ...)
+
+The block size allows each process to write a row of data to every other position in the dataset,
+rather than just a point of data.
+
+The following shows the data written by Process 1 to the file:
+
+
+
+\image html preg_figd.gif
+ |
+
+
+
+Below is the example program for writing hyperslabs by column in Parallel HDF5:
+
+
+The output for h5dump on the file created by this program will look like the output as shown above for the C example. This is
+because h5dump is written in C. The data would be displayed in rows if it were printed using Fortran 90 code.
+
+
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
+
+@page IntroParPattern Writing by Pattern
+
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
+
+
+This is another example of writing data into disconnected locations in a file. Each process writes data from the contiguous
+buffer into regularly scattered locations in the file.
+
+Each process defines a hyperslab in the file as described below and writes data to it. The C and Fortran 90 examples below
+result in the same data layout in the file.
+
+
+
+Figure a C Example |
+Figure b Fortran Example |
+
+
+\image html ppatt_figa.gif
+ |
+
+\image html ppatt_figb.gif
+ |
+
+
+
+The C and Fortran 90 examples use four processes to write the pattern shown above. Each process defines a hyperslab by:
+\li Specifying a stride of 2 for each dimension, which indicates that you wish to write to every other position along a dimension.
+\li Specifying a different offset for each process:
+
+
+C | Process 0 | Process 1 | Process 2 | Process 3 |
+
+offset[0] = 0 | offset[0] = 1 | offset[0] = 0 | offset[0] = 1 |
+
+offset[1] = 0 | offset[1] = 0 | offset[1] = 1 | offset[1] = 1 |
+
+Fortran | Process 0 | Process 1 | Process 2 | Process 3 |
+
+offset(1) = 0 | offset(1) = 0 | offset(1) = 1 | offset(1) = 1 |
+
+offset(2) = 0 | offset(2) = 1 | offset(2) = 0 | offset(2) = 1 |
+
+
+\li Specifying the size of the slab to write. The count is the number of positions along a dimension to write to. If writing a 4 x 2 slab,
+then the count would be:
+
+
+C | Fortran |
+
+count[0] = 4 | count(1) = 2 |
+
+count[1] = 2 | count(2) = 4 |
+
+
+
+For example, the offset, count, and stride parameters for Process 2 would look like:
+
+
+Figure a C Example |
+Figure b Fortran Example |
+
+
+\image html ppatt_figc.gif
+ |
+
+\image html ppatt_figd.gif
+ |
+
+
+
+Below are example programs for writing hyperslabs by pattern in Parallel HDF5:
+
+
+The following is the output from h5dump for the HDF5 file created in this example:
+\code
+HDF5 "SDS_pat.h5" {
+GROUP "/" {
+ DATASET "IntArray" {
+ DATATYPE H5T_STD_I32BE
+ DATASPACE SIMPLE { ( 8, 4 ) / ( 8, 4 ) }
+ DATA {
+ 1, 3, 1, 3,
+ 2, 4, 2, 4,
+ 1, 3, 1, 3,
+ 2, 4, 2, 4,
+ 1, 3, 1, 3,
+ 2, 4, 2, 4,
+ 1, 3, 1, 3,
+ 2, 4, 2, 4
+ }
+ }
+}
+}
+\endcode
+The h5dump utility is written in C so the output is in C order.
+
+
+
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
+
+@page IntroParChunk Writing by Chunk
+
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
+
+
+In this example each process writes a "chunk" of data to a dataset. The C and Fortran 90
+examples result in the same data layout in the file.
+
+
+
+Figure a C Example |
+Figure b Fortran Example |
+
+
+\image html pchunk_figa.gif
+ |
+
+\image html pchunk_figb.gif
+ |
+
+
+
+For this example, four processes are used, and a 4 x 2 chunk is written to the dataset by each process.
+
+To do this, you would:
+\li Use the block parameter to specify a chunk of size 4 x 2 (or 2 x 4 for Fortran).
+\li Use a different offset (start) for each process, based on the chunk size:
+
+
+C | Process 0 | Process 1 | Process 2 | Process 3 |
+
+offset[0] = 0 | offset[0] = 0 | offset[0] = 4 | offset[0] = 4 |
+
+offset[1] = 0 | offset[1] = 2 | offset[1] = 0 | offset[1] = 2 |
+
+Fortran | Process 0 | Process 1 | Process 2 | Process 3 |
+
+offset(1) = 0 | offset(1) = 2 | offset(1) = 0 | offset(1) = 2 |
+
+offset(2) = 0 | offset(2) = 0 | offset(2) = 4 | offset(2) = 4 |
+
+
+
+For example, the offset and block parameters for Process 2 would look like:
+
+
+Figure a C Example |
+Figure b Fortran Example |
+
+
+\image html pchunk_figc.gif
+ |
+
+\image html pchunk_figd.gif
+ |
+
+
+
+Below are example programs for writing hyperslabs by pattern in Parallel HDF5:
+
+
+The following is the output from h5dump for the HDF5 file created in this example:
+\code
+HDF5 "SDS_chnk.h5" {
+GROUP "/" {
+ DATASET "IntArray" {
+ DATATYPE H5T_STD_I32BE
+ DATASPACE SIMPLE { ( 8, 4 ) / ( 8, 4 ) }
+ DATA {
+ 1, 1, 2, 2,
+ 1, 1, 2, 2,
+ 1, 1, 2, 2,
+ 1, 1, 2, 2,
+ 3, 3, 4, 4,
+ 3, 3, 4, 4,
+ 3, 3, 4, 4,
+ 3, 3, 4, 4
+ }
+ }
+}
+}
+\endcode
+The h5dump utility is written in C so the output is in C order.
+
+
+Navigate back: \ref index "Main" / \ref GettingStarted / \ref IntroParHDF5
+
+*/
diff --git a/doxygen/dox/IntroParHDF5.dox b/doxygen/dox/IntroParHDF5.dox
new file mode 100644
index 0000000..1f04e96
--- /dev/null
+++ b/doxygen/dox/IntroParHDF5.dox
@@ -0,0 +1,271 @@
+/** @page IntroParHDF5 A Brief Introduction to Parallel HDF5
+
+Navigate back: \ref index "Main" / \ref GettingStarted
+
+
+If you are new to HDF5 please see the @ref LearnBasics topic first.
+
+\section sec_pintro_overview Overview of Parallel HDF5 (PHDF5) Design
+There were several requirements that we had for Parallel HDF5 (PHDF5). These were:
+\li Parallel HDF5 files had to be compatible with serial HDF5 files and sharable
+between different serial and parallel platforms.
+\li Parallel HDF5 had to be designed to have a single file image to all processes,
+rather than having one file per process. Having one file per process can cause expensive
+post processing, and the files are not usable by different processes.
+\li A standard parallel I/O interface had to be portable to different platforms.
+
+With these requirements of HDF5 our initial target was to support MPI programming, but not
+for shared memory programming. We had done some experimentation with thread-safe support
+for Pthreads and for OpenMP, and decided to use these.
+
+Implementation requirements were to:
+\li Not use Threads, since they were not commonly supported in 1998 when we were looking at this.
+\li Not have a reserved process, as this might interfere with parallel algorithms.
+\li Not spawn any processes, as this is not even commonly supported now.
+
+The following shows the Parallel HDF5 implementation layers.
+
+
+\subsection subsec_pintro_prog Parallel Programming with HDF5
+This tutorial assumes that you are somewhat familiar with parallel programming with MPI (Message Passing Interface).
+
+If you are not familiar with parallel programming, here is a tutorial that may be of interest:
+Tutorial on HDF5 I/O tuning at NERSC
+
+Some of the terms that you must understand in this tutorial are:
+
+-
+MPI Communicator
+Allows a group of processes to communicate with each other.
+
+Following are the MPI routines for initializing MPI and the communicator and finalizing a session with MPI:
+
+
+C |
+Fortran |
+Description |
+
+
+MPI_Init |
+MPI_INIT |
+Initialize MPI (MPI_COMM_WORLD usually) |
+
+
+MPI_Comm_size |
+MPI_COMM_SIZE |
+Define how many processes are contained in the communicator |
+
+
+MPI_Comm_rank |
+MPI_COMM_RANK |
+Define the process ID number within the communicator (from 0 to n-1) |
+
+
+MPI_Finalize |
+MPI_FINALIZE |
+Exiting MPI |
+
+
+
+-
+Collective
+MPI defines this to mean all processes of the communicator must participate in the right order.
+
+
+
+Parallel HDF5 opens a parallel file with a communicator. It returns a file handle to be used for future access to the file.
+
+All processes are required to participate in the collective Parallel HDF5 API. Different files can be opened using different communicators.
+
+Examples of what you can do with the Parallel HDF5 collective API:
+\li File Operation: Create, open and close a file
+\li Object Creation: Create, open, and close a dataset
+\li Object Structure: Extend a dataset (increase dimension sizes)
+\li Dataset Operations: Write to or read from a dataset
+(Array data transfer can be collective or independent.)
+
+Once a file is opened by the processes of a communicator:
+\li All parts of the file are accessible by all processes.
+\li All objects in the file are accessible by all processes.
+\li Multiple processes write to the same dataset.
+\li Each process writes to an individual dataset.
+
+Please refer to the Supported Configuration Features Summary in the release notes for the current release
+of HDF5 for an up-to-date list of the platforms that we support Parallel HDF5 on.
+
+
+\subsection subsec_pintro_create_file Creating and Accessing a File with PHDF5
+The programming model for creating and accessing a file is as follows:
+
+- Set up an access template object to control the file access mechanism.
+- Open the file.
+- Close the file.
+
+
+Each process of the MPI communicator creates an access template and sets it up with MPI parallel
+access information. This is done with the #H5Pcreate call to obtain the file access property list
+and the #H5Pset_fapl_mpio call to set up parallel I/O access.
+
+Following is example code for creating an access template in HDF5:
+C
+\code
+ 23 MPI_Comm comm = MPI_COMM_WORLD;
+ 24 MPI_Info info = MPI_INFO_NULL;
+ 25
+ 26 /*
+ 27 * Initialize MPI
+ 28 */
+ 29 MPI_Init(&argc, &argv);
+ 30 MPI_Comm_size(comm, &mpi_size);
+ 31 MPI_Comm_rank(comm, &mpi_rank);
+ 32
+ 33 /*
+ 34 * Set up file access property list with parallel I/O access
+ 35 */
+ 36 plist_id = H5Pcreate(H5P_FILE_ACCESS); 37 H5Pset_fapl_mpio(plist_id, comm, info);
+\endcode
+
+Fortran
+\code
+ 23 comm = MPI_COMM_WORLD
+ 24 info = MPI_INFO_NULL
+ 25
+ 26 CALL MPI_INIT(mpierror)
+ 27 CALL MPI_COMM_SIZE(comm, mpi_size, mpierror)
+ 28 CALL MPI_COMM_RANK(comm, mpi_rank, mpierror)
+ 29 !
+ 30 ! Initialize FORTRAN interface
+ 31 !
+ 32 CALL h5open_f(error)
+ 33
+ 34 !
+ 35 ! Setup file access property list with parallel I/O access.
+ 36 !
+ 37 CALL h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, error) 38 CALL h5pset_fapl_mpio_f(plist_id, comm, info, error)
+\endcode
+
+The following example programs create an HDF5 file using Parallel HDF5:
+C: file_create.c
+F90: file_create.F90
+
+
+\subsection subsec_pintro_create_dset Creating and Accessing a Dataset with PHDF5
+The programming model for creating and accessing a dataset is as follows:
+
+-
+Create or open a Parallel HDF5 file with a collective call to:
+#H5Dcreate
+#H5Dopen
+
+-
+Obtain a copy of the file transfer property list and set it to use collective or independent I/O.
+
+-
+Do this by first passing a data transfer property list class type to: #H5Pcreate
+
+-
+Then set the data transfer mode to either use independent I/O access or to use collective I/O, with a call to: #H5Pset_dxpl_mpio
+
+Following are the parameters required by this call:
+C
+\code
+ herr_t H5Pset_dxpl_mpio (hid_t dxpl_id, H5FD_mpio_xfer_t xfer_mode )
+ dxpl_id IN: Data transfer property list identifier
+ xfer_mode IN: Transfer mode:
+ H5FD_MPIO_INDEPENDENT - use independent I/O access
+ (default)
+ H5FD_MPIO_COLLECTIVE - use collective I/O access
+\endcode
+
+Fortran
+\code
+ h5pset_dxpl_mpi_f (prp_id, data_xfer_mode, hdferr)
+ prp_id IN: Property List Identifier (INTEGER (HID_T))
+ data_xfer_mode IN: Data transfer mode (INTEGER)
+ H5FD_MPIO_INDEPENDENT_F (0)
+ H5FD_MPIO_COLLECTIVE_F (1)
+ hdferr IN: Error code (INTEGER)
+\endcode
+
+-
+Access the dataset with the defined transfer property list.
+All processes that have opened a dataset may do collective I/O. Each process may do an independent
+and arbitrary number of data I/O access calls, using:
+#H5Dwrite
+#H5Dread
+
+If a dataset is unlimited, you can extend it with a collective call to: #H5Dextend
+
+
+
+
+
+The following code demonstrates a collective write using Parallel HDF5:
+C
+\code
+ 95 /*
+ 96 * Create property list for collective dataset write.
+ 97 */
+ 98 plist_id = H5Pcreate (H5P_DATASET_XFER); 99 H5Pset_dxpl_mpio (plist_id, H5FD_MPIO_COLLECTIVE);
+ 100
+ 101 status = H5Dwrite (dset_id, H5T_NATIVE_INT, memspace, filespace,
+ 102 plist_id, data);
+\endcode
+
+Fortran
+\code
+ 108 ! Create property list for collective dataset write
+ 109 !
+ 110 CALL h5pcreate_f (H5P_DATASET_XFER_F, plist_id, error) 111 CALL h5pset_dxpl_mpio_f (plist_id, H5FD_MPIO_COLLECTIVE_F, error)
+ 112
+ 113 !
+ 114 ! Write the dataset collectively.
+ 115 !
+ 116 CALL h5dwrite_f (dset_id, H5T_NATIVE_INTEGER, data, dimsfi, error, &
+ 117 file_space_id = filespace, mem_space_id = memspace, xfer_prp = plist_id)
+\endcode
+
+The following example programs create an HDF5 dataset using Parallel HDF5:
+C: dataset.c
+F90: dataset.F90
+
+
+\subsubsection subsec_pintro_hyperslabs Hyperslabs
+The programming model for writing and reading hyperslabs is:
+/li Each process defines the memory and file hyperslabs.
+/li Each process executes a partial write/read call which is either collective or independent.
+
+The memory and file hyperslabs in the first step are defined with the #H5Sselect_hyperslab.
+
+The start (or offset), count, stride, and block parameters define the portion of the dataset
+to write to. By changing the values of these parameters you can write hyperslabs with Parallel
+HDF5 by contiguous hyperslab, by regularly spaced data in a column/row, by patterns, and by chunks:
+
+
+
+
+\li @subpage IntroParContHyperslab
+ |
+
+
+
+\li @subpage IntroParRegularSpaced
+ |
+
+
+
+\li @subpage IntroParPattern
+ |
+
+
+
+\li @subpage IntroParChunk
+ |
+
+
+
+
+
+Navigate back: \ref index "Main" / \ref GettingStarted
+
+*/
diff --git a/doxygen/dox/LearnBasics1.dox b/doxygen/dox/LearnBasics1.dox
index a9b6d0e..53c8e0a 100644
--- a/doxygen/dox/LearnBasics1.dox
+++ b/doxygen/dox/LearnBasics1.dox
@@ -642,7 +642,7 @@ See the programming example for an illustration of the use of these calls.
\subsection subsecLBDsetCreateContent File Contents
The contents of the file dset.h5 (dsetf.h5 for FORTRAN) are shown below:
-Contents of dset.h5 ( dsetf.h5)
+Contents of dset.h5 (dsetf.h5)
\image html imgLBDsetCreate.gif
diff --git a/doxygen/img/pchunk_figa.gif b/doxygen/img/pchunk_figa.gif
new file mode 100644
index 0000000..90b49c0
Binary files /dev/null and b/doxygen/img/pchunk_figa.gif differ
diff --git a/doxygen/img/pchunk_figb.gif b/doxygen/img/pchunk_figb.gif
new file mode 100644
index 0000000..c825fc3
Binary files /dev/null and b/doxygen/img/pchunk_figb.gif differ
diff --git a/doxygen/img/pchunk_figc.gif b/doxygen/img/pchunk_figc.gif
new file mode 100644
index 0000000..9975a87
Binary files /dev/null and b/doxygen/img/pchunk_figc.gif differ
diff --git a/doxygen/img/pchunk_figd.gif b/doxygen/img/pchunk_figd.gif
new file mode 100644
index 0000000..45da389
Binary files /dev/null and b/doxygen/img/pchunk_figd.gif differ
diff --git a/doxygen/img/pcont_hy_figa.gif b/doxygen/img/pcont_hy_figa.gif
new file mode 100644
index 0000000..1417d17
Binary files /dev/null and b/doxygen/img/pcont_hy_figa.gif differ
diff --git a/doxygen/img/pcont_hy_figb.gif b/doxygen/img/pcont_hy_figb.gif
new file mode 100644
index 0000000..a3b637b
Binary files /dev/null and b/doxygen/img/pcont_hy_figb.gif differ
diff --git a/doxygen/img/pcont_hy_figc.gif b/doxygen/img/pcont_hy_figc.gif
new file mode 100644
index 0000000..91bab7d
Binary files /dev/null and b/doxygen/img/pcont_hy_figc.gif differ
diff --git a/doxygen/img/pcont_hy_figd.gif b/doxygen/img/pcont_hy_figd.gif
new file mode 100644
index 0000000..2836b4f
Binary files /dev/null and b/doxygen/img/pcont_hy_figd.gif differ
diff --git a/doxygen/img/ppatt_figa.gif b/doxygen/img/ppatt_figa.gif
new file mode 100644
index 0000000..5c86c93
Binary files /dev/null and b/doxygen/img/ppatt_figa.gif differ
diff --git a/doxygen/img/ppatt_figb.gif b/doxygen/img/ppatt_figb.gif
new file mode 100644
index 0000000..fe4e350
Binary files /dev/null and b/doxygen/img/ppatt_figb.gif differ
diff --git a/doxygen/img/ppatt_figc.gif b/doxygen/img/ppatt_figc.gif
new file mode 100644
index 0000000..aca8ef9
Binary files /dev/null and b/doxygen/img/ppatt_figc.gif differ
diff --git a/doxygen/img/ppatt_figd.gif b/doxygen/img/ppatt_figd.gif
new file mode 100644
index 0000000..e6c55c0
Binary files /dev/null and b/doxygen/img/ppatt_figd.gif differ
diff --git a/doxygen/img/preg_figa.gif b/doxygen/img/preg_figa.gif
new file mode 100644
index 0000000..0929bf4
Binary files /dev/null and b/doxygen/img/preg_figa.gif differ
diff --git a/doxygen/img/preg_figb.gif b/doxygen/img/preg_figb.gif
new file mode 100644
index 0000000..33e57fc
Binary files /dev/null and b/doxygen/img/preg_figb.gif differ
diff --git a/doxygen/img/preg_figc.gif b/doxygen/img/preg_figc.gif
new file mode 100644
index 0000000..a4f98ff
Binary files /dev/null and b/doxygen/img/preg_figc.gif differ
diff --git a/doxygen/img/preg_figd.gif b/doxygen/img/preg_figd.gif
new file mode 100644
index 0000000..fe345fb
Binary files /dev/null and b/doxygen/img/preg_figd.gif differ
diff --git a/examples/CMakeTests.cmake b/examples/CMakeTests.cmake
index 30f73c4..e2cd826 100644
--- a/examples/CMakeTests.cmake
+++ b/examples/CMakeTests.cmake
@@ -117,8 +117,8 @@ if (H5_HAVE_PARALLEL AND HDF5_TEST_PARALLEL AND NOT WIN32)
add_test (NAME MPI_TEST_EXAMPLES-${parallel_example} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${NUMPROCS} ${MPIEXEC_PREFLAGS} $ ${MPIEXEC_POSTFLAGS})
else ()
add_test (NAME MPI_TEST_EXAMPLES-${parallel_example} COMMAND "${CMAKE_COMMAND}"
- -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE};${MPIEXEC_NUMPROC_FLAG};${NUMPROCS};${MPIEXEC_PREFLAGS};$;${MPIEXEC_POSTFLAGS}"
- -D "TEST_ARGS:STRING="
+ -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE}"
+ -D "TEST_ARGS:STRING=${MPIEXEC_NUMPROC_FLAG};${NUMPROCS};${MPIEXEC_PREFLAGS};$;${MPIEXEC_POSTFLAGS}"
-D "TEST_EXPECT=0"
-D "TEST_SKIP_COMPARE=TRUE"
-D "TEST_OUTPUT=${parallel_example}.out"
diff --git a/java/examples/datasets/CMakeLists.txt b/java/examples/datasets/CMakeLists.txt
index 6ed03ca..7542e8e 100644
--- a/java/examples/datasets/CMakeLists.txt
+++ b/java/examples/datasets/CMakeLists.txt
@@ -80,7 +80,7 @@ endforeach ()
if (BUILD_TESTING AND HDF5_TEST_EXAMPLES AND HDF5_TEST_SERIAL)
get_property (target_name TARGET ${HDF5_JAVA_JNI_LIB_TARGET} PROPERTY OUTPUT_NAME)
- set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$:${CMAKE_DEBUG_POSTFIX}>;")
+ set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=${target_name}$<$,$>:${CMAKE_DEBUG_POSTFIX}>;")
set (last_test "")
foreach (example ${HDF_JAVA_EXAMPLES})
diff --git a/tools/test/h5diff/CMakeTests.cmake b/tools/test/h5diff/CMakeTests.cmake
index 7e437af..ccb9380 100644
--- a/tools/test/h5diff/CMakeTests.cmake
+++ b/tools/test/h5diff/CMakeTests.cmake
@@ -422,8 +422,8 @@
add_test (
NAME MPI_TEST_H5DIFF-${resultfile}
COMMAND "${CMAKE_COMMAND}"
- -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE};${MPIEXEC_NUMPROC_FLAG};${MPIEXEC_MAX_NUMPROCS};${MPIEXEC_PREFLAGS};$;${MPIEXEC_POSTFLAGS}"
- -D "TEST_ARGS:STRING=${ARGN}"
+ -D "TEST_PROGRAM=${MPIEXEC_EXECUTABLE}"
+ -D "TEST_ARGS:STRING=${MPIEXEC_NUMPROC_FLAG};${MPIEXEC_MAX_NUMPROCS};${MPIEXEC_PREFLAGS};$;${MPIEXEC_POSTFLAGS};${ARGN}"
-D "TEST_FOLDER=${PROJECT_BINARY_DIR}/PAR/testfiles"
-D "TEST_OUTPUT=${resultfile}.out"
-D "TEST_EXPECT=0"
diff --git a/tools/test/h5dump/CMakeTests.cmake b/tools/test/h5dump/CMakeTests.cmake
index 0ae7bbd..cdc3be8 100644
--- a/tools/test/h5dump/CMakeTests.cmake
+++ b/tools/test/h5dump/CMakeTests.cmake
@@ -580,7 +580,7 @@
)
add_test (
NAME H5DUMP-${resultfile}-output-cmp
- COMMAND ${CMAKE_COMMAND} -E compare_files --ignore-eol ${resultfile}.txt ${resultfile}.exp
+ COMMAND ${CMAKE_COMMAND} -E compare_files ${CMAKE_IGNORE_EOL} ${resultfile}.txt ${resultfile}.exp
)
set_tests_properties (H5DUMP-${resultfile}-output-cmp PROPERTIES
DEPENDS H5DUMP-${resultfile}
@@ -645,7 +645,7 @@
)
add_test (
NAME H5DUMP-${resultfile}-output-cmp
- COMMAND ${CMAKE_COMMAND} -E compare_files --ignore-eol ${resultfile}.txt ${resultfile}.exp
+ COMMAND ${CMAKE_COMMAND} -E compare_files ${CMAKE_IGNORE_EOL} ${resultfile}.txt ${resultfile}.exp
)
set_tests_properties (H5DUMP-${resultfile}-output-cmp PROPERTIES
DEPENDS H5DUMP-${resultfile}
@@ -653,7 +653,7 @@
)
add_test (
NAME H5DUMP-${resultfile}-output-cmp-ddl
- COMMAND ${CMAKE_COMMAND} -E compare_files --ignore-eol ${ddlfile}.txt ${ddlfile}.exp
+ COMMAND ${CMAKE_COMMAND} -E compare_files ${CMAKE_IGNORE_EOL} ${ddlfile}.txt ${ddlfile}.exp
)
set_tests_properties (H5DUMP-${resultfile}-output-cmp-ddl PROPERTIES
DEPENDS H5DUMP-${resultfile}-output-cmp
@@ -699,7 +699,7 @@
)
add_test (
NAME H5DUMP-output-cmp-${resultfile}
- COMMAND ${CMAKE_COMMAND} -E compare_files --ignore-eol ${resultfile}.txt ${resultfile}.exp
+ COMMAND ${CMAKE_COMMAND} -E compare_files ${CMAKE_IGNORE_EOL} ${resultfile}.txt ${resultfile}.exp
)
set_tests_properties (H5DUMP-output-cmp-${resultfile} PROPERTIES
DEPENDS H5DUMP-output-${resultfile}
diff --git a/tools/test/misc/CMakeTestsClear.cmake b/tools/test/misc/CMakeTestsClear.cmake
index 5e307aa..a554972 100644
--- a/tools/test/misc/CMakeTestsClear.cmake
+++ b/tools/test/misc/CMakeTestsClear.cmake
@@ -99,6 +99,10 @@
-D "TEST_REFERENCE=${resultfile}.ddl"
-P "${HDF_RESOURCES_DIR}/runTest.cmake"
)
+ if (last_test)
+ set_tests_properties (H5CLEAR_CMP-${testname} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5CLEAR_CMP-${testname}")
endif ()
endmacro ()
@@ -117,6 +121,10 @@
-D "TEST_ERRREF=${resultfile}.err"
-P "${HDF_RESOURCES_DIR}/runTest.cmake"
)
+ if (last_test)
+ set_tests_properties (H5CLEAR_CMP-${testname} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5CLEAR_CMP-${testname}")
endif ()
endmacro ()
--
cgit v0.12
|