summaryrefslogtreecommitdiffstats
path: root/fortran/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'fortran/testpar')
-rw-r--r--fortran/testpar/CMakeLists.txt110
-rw-r--r--fortran/testpar/CMakeTests.cmake19
-rw-r--r--fortran/testpar/Makefile.am23
-rw-r--r--fortran/testpar/Makefile.in904
-rw-r--r--fortran/testpar/hyper.F90 (renamed from fortran/testpar/hyper.f90)215
-rw-r--r--fortran/testpar/mdset.F90 (renamed from fortran/testpar/mdset.f90)47
-rw-r--r--fortran/testpar/multidsetrw.F90233
-rw-r--r--fortran/testpar/ptest.F90115
-rw-r--r--fortran/testpar/ptest.f90135
-rw-r--r--fortran/testpar/subfiling.F90401
10 files changed, 1035 insertions, 1167 deletions
diff --git a/fortran/testpar/CMakeLists.txt b/fortran/testpar/CMakeLists.txt
index b112db3..58ef95d 100644
--- a/fortran/testpar/CMakeLists.txt
+++ b/fortran/testpar/CMakeLists.txt
@@ -1,33 +1,103 @@
-cmake_minimum_required (VERSION 2.8.6)
-PROJECT (HDF5_FORTRAN_TESTPAR C CXX Fortran)
+cmake_minimum_required (VERSION 3.18)
+project (HDF5_FORTRAN_TESTPAR C Fortran)
#-----------------------------------------------------------------------------
# Setup include Directories
#-----------------------------------------------------------------------------
-INCLUDE_DIRECTORIES (${CMAKE_Fortran_MODULE_DIRECTORY} ${HDF5_F90_BINARY_DIR} ${HDF5_F90_SRC_DIR}/src)
+set (TESTPAR_INCLUDES ${MPI_Fortran_INCLUDE_DIRS} ${HDF5_F90_BINARY_DIR} ${HDF5_F90_SRC_DIR}/src)
+if (NOT BUILD_SHARED_LIBS)
+ set (TESTPAR_INCLUDES ${TESTPAR_INCLUDES} ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
+else ()
+ set (TESTPAR_INCLUDES ${TESTPAR_INCLUDES} ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
+endif ()
#-----------------------------------------------------------------------------
# Add Tests
#-----------------------------------------------------------------------------
#-- Adding test for parallel_test
-ADD_EXECUTABLE (parallel_test
- ptest.f90
- hyper.f90
- mdset.f90
+add_executable (parallel_test
+ ptest.F90
+ hyper.F90
+ mdset.F90
+ multidsetrw.F90
)
-TARGET_NAMING (parallel_test ${LIB_TYPE})
-TARGET_FORTRAN_WIN_PROPERTIES (parallel_test "")
-TARGET_LINK_LIBRARIES (parallel_test
- ${HDF5_F90_TEST_LIB_TARGET}
- ${HDF5_F90_LIB_TARGET}
- ${HDF5_LIB_TARGET}
- ${MPI_Fortran_LIBRARIES}
+target_include_directories (parallel_test
+ PRIVATE ${TESTPAR_INCLUDES}
)
-IF (WIN32 AND MSVC)
- TARGET_LINK_LIBRARIES (parallel_test "ws2_32.lib")
-ENDIF (WIN32 AND MSVC)
-SET_TARGET_PROPERTIES (parallel_test PROPERTIES LINKER_LANGUAGE Fortran)
-SET_TARGET_PROPERTIES (parallel_test PROPERTIES FOLDER test/fortran)
+target_compile_options(parallel_test
+ PRIVATE
+ "${HDF5_CMAKE_Fortran_FLAGS}"
+ $<$<STREQUAL:"x${CMAKE_Fortran_SIMULATE_ID}","xMSVC">:${WIN_COMPILE_FLAGS}>
+)
+if (NOT BUILD_SHARED_LIBS)
+ target_link_libraries (parallel_test
+ PRIVATE
+ ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} ${LINK_Fortran_LIBS}
+ $<$<STREQUAL:"x${CMAKE_Fortran_SIMULATE_ID}","xMSVC">:"ws2_32.lib">
+ )
+ set_target_properties (parallel_test PROPERTIES
+ FOLDER test/fortran
+ LINKER_LANGUAGE Fortran
+ Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/static
+ )
+else ()
+ target_link_libraries (parallel_test
+ PRIVATE
+ ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} ${LINK_Fortran_LIBS}
+ $<$<STREQUAL:"x${CMAKE_Fortran_SIMULATE_ID}","xMSVC">:"ws2_32.lib">
+ )
+ set_target_properties (parallel_test PROPERTIES
+ FOLDER test/fortran
+ LINKER_LANGUAGE Fortran
+ Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
+ )
+endif ()
+
+if(MSVC)
+ set_property(TARGET parallel_test PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}")
+endif()
+
+#-- Adding test for subfiling_test
+add_executable (subfiling_test
+ subfiling.F90
+)
+target_include_directories (subfiling_test
+ PRIVATE ${TESTPAR_INCLUDES}
+)
+target_compile_options(subfiling_test
+ PRIVATE
+ "${HDF5_CMAKE_Fortran_FLAGS}"
+ $<$<STREQUAL:"x${CMAKE_Fortran_SIMULATE_ID}","xMSVC">:${WIN_COMPILE_FLAGS}>
+)
+if (NOT BUILD_SHARED_LIBS)
+ target_link_libraries (subfiling_test
+ PRIVATE
+ ${HDF5_F90_TEST_LIB_TARGET} ${HDF5_F90_LIB_TARGET} ${HDF5_LIB_TARGET} ${LINK_Fortran_LIBS}
+ $<$<STREQUAL:"x${CMAKE_Fortran_SIMULATE_ID}","xMSVC">:"ws2_32.lib">
+ )
+ set_target_properties (subfiling_test PROPERTIES
+ FOLDER test/fortran
+ LINKER_LANGUAGE Fortran
+ Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/static
+ )
+else ()
+ target_link_libraries (subfiling_test
+ PRIVATE
+ ${HDF5_F90_TEST_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} ${LINK_Fortran_LIBS}
+ $<$<STREQUAL:"x${CMAKE_Fortran_SIMULATE_ID}","xMSVC">:"ws2_32.lib">
+ )
+ set_target_properties (subfiling_test PROPERTIES
+ FOLDER test/fortran
+ LINKER_LANGUAGE Fortran
+ Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
+ )
+endif ()
+
+if(MSVC)
+ set_property(TARGET subfiling_test PROPERTY LINK_FLAGS "/SUBSYSTEM:CONSOLE ${WIN_LINK_FLAGS}")
+endif()
-ADD_TEST (NAME parallel_test COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:parallel_test>)
+if (HDF5_TEST_FORTRAN AND HDF5_TEST_PARALLEL)
+ include (CMakeTests.cmake)
+endif ()
diff --git a/fortran/testpar/CMakeTests.cmake b/fortran/testpar/CMakeTests.cmake
new file mode 100644
index 0000000..1d893d5
--- /dev/null
+++ b/fortran/testpar/CMakeTests.cmake
@@ -0,0 +1,19 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://www.hdfgroup.org/licenses.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+##############################################################################
+##############################################################################
+### T E S T I N G ###
+##############################################################################
+##############################################################################
+add_test (NAME MPI_TEST_FORT_parallel_test COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:parallel_test> ${MPIEXEC_POSTFLAGS})
+add_test (NAME MPI_TEST_FORT_subfiling_test COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:subfiling_test> ${MPIEXEC_POSTFLAGS}) \ No newline at end of file
diff --git a/fortran/testpar/Makefile.am b/fortran/testpar/Makefile.am
index 2e85f3b..b1cefbc 100644
--- a/fortran/testpar/Makefile.am
+++ b/fortran/testpar/Makefile.am
@@ -1,16 +1,13 @@
#
# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
# All rights reserved.
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://www.hdfgroup.org/licenses.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -21,7 +18,8 @@
include $(top_srcdir)/config/commence.am
# Include files
-AM_FCFLAGS+=-I$(top_srcdir)/fortran/src -I$(top_srcdir)/fortran/test $(F9XMODFLAG)$(top_builddir)/fortran/src
+AM_FCFLAGS+=-I$(top_srcdir)/fortran/src -I$(top_srcdir)/fortran/test $(F9XMODFLAG)$(top_builddir)/fortran/src \
+ $(F9XMODFLAG)$(top_builddir)/fortran/test
# Some Fortran compilers can't build shared libraries, so sometimes we
# want to build a shared C library and a static Fortran library. If so,
@@ -34,14 +32,15 @@ else
endif
# These are our main targets
-TEST_PROG_PARA=parallel_test
+TEST_PROG_PARA=parallel_test subfiling_test
check_PROGRAMS=$(TEST_PROG_PARA)
# Temporary files
-CHECK_CLEANFILES+=parf[12].h5
+CHECK_CLEANFILES+=parf[12].h5 subf.h5*
# Test source files
-parallel_test_SOURCES=ptest.f90 hyper.f90 mdset.f90
+parallel_test_SOURCES=ptest.F90 hyper.F90 mdset.F90 multidsetrw.F90
+subfiling_test_SOURCES=subfiling.F90
# The tests depend on several libraries.
LDADD=$(LIBH5FTEST) $(LIBH5TEST) $(LIBH5F) $(LIBHDF5)
@@ -49,4 +48,4 @@ LDADD=$(LIBH5FTEST) $(LIBH5TEST) $(LIBH5F) $(LIBHDF5)
# Mark this directory as part of the Fortran API
FORTRAN_API=yes
-include $(top_srcdir)/config/conclude.am
+include $(top_srcdir)/config/conclude_fc.am
diff --git a/fortran/testpar/Makefile.in b/fortran/testpar/Makefile.in
deleted file mode 100644
index 272fc45..0000000
--- a/fortran/testpar/Makefile.in
+++ /dev/null
@@ -1,904 +0,0 @@
-# Makefile.in generated by automake 1.11.1 from Makefile.am.
-# @configure_input@
-
-# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002,
-# 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation,
-# Inc.
-# This Makefile.in is free software; the Free Software Foundation
-# gives unlimited permission to copy and/or distribute it,
-# with or without modifications, as long as this notice is preserved.
-
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
-# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
-# PARTICULAR PURPOSE.
-
-@SET_MAKE@
-
-#
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
-#
-# HDF5 Fortran Parallel Library Test Makefile(.in)
-#
-VPATH = @srcdir@
-pkgdatadir = $(datadir)/@PACKAGE@
-pkgincludedir = $(includedir)/@PACKAGE@
-pkglibdir = $(libdir)/@PACKAGE@
-pkglibexecdir = $(libexecdir)/@PACKAGE@
-am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
-install_sh_DATA = $(install_sh) -c -m 644
-install_sh_PROGRAM = $(install_sh) -c
-install_sh_SCRIPT = $(install_sh) -c
-INSTALL_HEADER = $(INSTALL_DATA)
-transform = $(program_transform_name)
-NORMAL_INSTALL = :
-PRE_INSTALL = :
-POST_INSTALL = :
-NORMAL_UNINSTALL = :
-PRE_UNINSTALL = :
-POST_UNINSTALL = :
-build_triplet = @build@
-host_triplet = @host@
-DIST_COMMON = $(srcdir)/Makefile.am $(srcdir)/Makefile.in \
- $(top_srcdir)/config/commence.am \
- $(top_srcdir)/config/conclude.am
-
-# Some Fortran compilers can't build shared libraries, so sometimes we
-# want to build a shared C library and a static Fortran library. If so,
-# pass the -static flag to the library linker.
-# (Actually, we historically have bad luck combining shared libraries with
-# parallel code. But you're welcome to try...)
-@FORTRAN_SHARED_CONDITIONAL_FALSE@am__append_1 = -static
-check_PROGRAMS = $(am__EXEEXT_1)
-TESTS = $(check_PROGRAMS)
-subdir = fortran/testpar
-ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
-am__aclocal_m4_deps = $(top_srcdir)/configure.in
-am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
- $(ACLOCAL_M4)
-mkinstalldirs = $(SHELL) $(top_srcdir)/bin/mkinstalldirs
-CONFIG_HEADER = $(top_builddir)/src/H5config.h
-CONFIG_CLEAN_FILES =
-CONFIG_CLEAN_VPATH_FILES =
-am__EXEEXT_1 = parallel_test$(EXEEXT)
-am_parallel_test_OBJECTS = ptest.$(OBJEXT) hyper.$(OBJEXT) \
- mdset.$(OBJEXT)
-parallel_test_OBJECTS = $(am_parallel_test_OBJECTS)
-parallel_test_LDADD = $(LDADD)
-parallel_test_DEPENDENCIES = $(LIBH5FTEST) $(LIBH5TEST) $(LIBH5F) \
- $(LIBHDF5)
-AM_V_lt = $(am__v_lt_$(V))
-am__v_lt_ = $(am__v_lt_$(AM_DEFAULT_VERBOSITY))
-am__v_lt_0 = --silent
-DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/src
-FCCOMPILE = $(FC) $(AM_FCFLAGS) $(FCFLAGS)
-LTFCCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=FC $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=compile $(FC) $(AM_FCFLAGS) $(FCFLAGS)
-AM_V_FC = $(am__v_FC_$(V))
-am__v_FC_ = $(am__v_FC_$(AM_DEFAULT_VERBOSITY))
-am__v_FC_0 = @echo " FC " $@;
-AM_V_at = $(am__v_at_$(V))
-am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY))
-am__v_at_0 = @
-FCLD = $(FC)
-FCLINK = $(LIBTOOL) $(AM_V_lt) --tag=FC $(AM_LIBTOOLFLAGS) \
- $(LIBTOOLFLAGS) --mode=link $(FCLD) $(AM_FCFLAGS) $(FCFLAGS) \
- $(AM_LDFLAGS) $(LDFLAGS) -o $@
-AM_V_FCLD = $(am__v_FCLD_$(V))
-am__v_FCLD_ = $(am__v_FCLD_$(AM_DEFAULT_VERBOSITY))
-am__v_FCLD_0 = @echo " FCLD " $@;
-AM_V_GEN = $(am__v_GEN_$(V))
-am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY))
-am__v_GEN_0 = @echo " GEN " $@;
-SOURCES = $(parallel_test_SOURCES)
-DIST_SOURCES = $(parallel_test_SOURCES)
-ETAGS = etags
-CTAGS = ctags
-am__tty_colors = \
-red=; grn=; lgn=; blu=; std=
-DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
-ACLOCAL = /home1/packages/automake/automake-1.9.6/bin/aclocal-1.9 -I /afs/ncsa/projects/hdf/packages/libtool_1.5.14/Linux_2.4/share/aclocal
-ADD_PARALLEL_FILES = @ADD_PARALLEL_FILES@
-AMTAR = @AMTAR@
-
-# H5_CFLAGS holds flags that should be used when building hdf5,
-# but which should not be exported to h5cc for building other programs.
-# AM_CFLAGS is an automake construct which should be used by Makefiles
-# instead of CFLAGS, as CFLAGS is reserved solely for the user to define.
-# This applies to FCFLAGS, CXXFLAGS, CPPFLAGS, and LDFLAGS as well.
-AM_CFLAGS = @AM_CFLAGS@ @H5_CFLAGS@
-AM_CPPFLAGS = @AM_CPPFLAGS@ @H5_CPPFLAGS@
-AM_CXXFLAGS = @AM_CXXFLAGS@ @H5_CXXFLAGS@
-AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
-
-# Include files
-AM_FCFLAGS = @AM_FCFLAGS@ @H5_FCFLAGS@ -I$(top_srcdir)/fortran/src \
- -I$(top_srcdir)/fortran/test \
- $(F9XMODFLAG)$(top_builddir)/fortran/src
-AM_LDFLAGS = @AM_LDFLAGS@ @H5_LDFLAGS@ $(am__append_1)
-AM_MAKEFLAGS = @AM_MAKEFLAGS@
-AR = @AR@
-AS = @AS@
-
-# Set the paths for AFS installs of autotools for Linux machines
-# Ideally, these tools should never be needed during the build.
-AUTOCONF = /home1/packages/autoconf/autoconf-2.60/bin/autoconf
-AUTOHEADER = /home1/packages/autoconf/autoconf-2.60/bin/autoheader
-AUTOMAKE = /home1/packages/automake/automake-1.9.6/bin/automake-1.9
-AWK = @AWK@
-BYTESEX = @BYTESEX@
-CC = @CC@
-CCDEPMODE = @CCDEPMODE@
-CC_VERSION = @CC_VERSION@
-CFLAGS = @CFLAGS@
-CLEARFILEBUF = @CLEARFILEBUF@
-CODESTACK = @CODESTACK@
-CONFIG_DATE = @CONFIG_DATE@
-CONFIG_MODE = @CONFIG_MODE@
-CONFIG_USER = @CONFIG_USER@
-CPP = @CPP@
-CPPFLAGS = @CPPFLAGS@
-CXX = @CXX@
-CXXCPP = @CXXCPP@
-CXXDEPMODE = @CXXDEPMODE@
-CXXFLAGS = @CXXFLAGS@
-CXX_VERSION = @CXX_VERSION@
-CYGPATH_W = @CYGPATH_W@
-DEBUG_PKG = @DEBUG_PKG@
-DEFAULT_API_VERSION = @DEFAULT_API_VERSION@
-DEFS = @DEFS@
-DEPDIR = @DEPDIR@
-DEPRECATED_SYMBOLS = @DEPRECATED_SYMBOLS@
-DIRECT_VFD = @DIRECT_VFD@
-DLLTOOL = @DLLTOOL@
-DSYMUTIL = @DSYMUTIL@
-DUMPBIN = @DUMPBIN@
-DYNAMIC_DIRS = @DYNAMIC_DIRS@
-ECHO_C = @ECHO_C@
-ECHO_N = @ECHO_N@
-ECHO_T = @ECHO_T@
-EGREP = @EGREP@
-EXEEXT = @EXEEXT@
-EXTERNAL_FILTERS = @EXTERNAL_FILTERS@
-
-# Make sure that these variables are exported to the Makefiles
-F9XMODEXT = @F9XMODEXT@
-F9XMODFLAG = @F9XMODFLAG@
-F9XSUFFIXFLAG = @F9XSUFFIXFLAG@
-FC = @FC@
-FC2003 = @FC2003@
-FCFLAGS = @FCFLAGS@
-FCFLAGS_f90 = @FCFLAGS_f90@
-FCLIBS = @FCLIBS@
-FC_VERSION = @FC_VERSION@
-FGREP = @FGREP@
-FILTERS = @FILTERS@
-FSEARCH_DIRS = @FSEARCH_DIRS@
-GPFS = @GPFS@
-GREP = @GREP@
-H5_CFLAGS = @H5_CFLAGS@
-H5_CPPFLAGS = @H5_CPPFLAGS@
-H5_CXXFLAGS = @H5_CXXFLAGS@
-H5_CXX_SHARED = @H5_CXX_SHARED@
-H5_FCFLAGS = @H5_FCFLAGS@
-H5_FORTRAN_SHARED = @H5_FORTRAN_SHARED@
-H5_LDFLAGS = @H5_LDFLAGS@
-H5_LONE_COLON = @H5_LONE_COLON@
-H5_VERSION = @H5_VERSION@
-HADDR_T = @HADDR_T@
-HAVE_DMALLOC = @HAVE_DMALLOC@
-HAVE_FORTRAN_2003 = @HAVE_FORTRAN_2003@
-HDF5_HL = @HDF5_HL@
-HDF5_INTERFACES = @HDF5_INTERFACES@
-HDF_CXX = @HDF_CXX@
-HDF_FORTRAN = @HDF_FORTRAN@
-HDF_FORTRAN2003 = @HDF_FORTRAN2003@
-HID_T = @HID_T@
-HL = @HL@
-HL_FOR = @HL_FOR@
-HSIZE_T = @HSIZE_T@
-HSSIZE_T = @HSSIZE_T@
-INSTALL = @INSTALL@
-INSTALL_DATA = @INSTALL_DATA@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
-INSTRUMENT = @INSTRUMENT@
-INSTRUMENT_LIBRARY = @INSTRUMENT_LIBRARY@
-LARGEFILE = @LARGEFILE@
-LD = @LD@
-LDFLAGS = @LDFLAGS@
-LIBOBJS = @LIBOBJS@
-LIBS = @LIBS@
-LIBTOOL = @LIBTOOL@
-LIPO = @LIPO@
-LL_PATH = @LL_PATH@
-LN_S = @LN_S@
-LTLIBOBJS = @LTLIBOBJS@
-LT_STATIC_EXEC = @LT_STATIC_EXEC@
-MAINT = @MAINT@
-MAKEINFO = @MAKEINFO@
-MANIFEST_TOOL = @MANIFEST_TOOL@
-MKDIR_P = @MKDIR_P@
-MPE = @MPE@
-MPI_GET_SIZE = @MPI_GET_SIZE@
-NM = @NM@
-NMEDIT = @NMEDIT@
-OBJDUMP = @OBJDUMP@
-OBJECT_NAMELEN_DEFAULT_F = @OBJECT_NAMELEN_DEFAULT_F@
-OBJEXT = @OBJEXT@
-OTOOL = @OTOOL@
-OTOOL64 = @OTOOL64@
-PACKAGE = @PACKAGE@
-PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
-PACKAGE_NAME = @PACKAGE_NAME@
-PACKAGE_STRING = @PACKAGE_STRING@
-PACKAGE_TARNAME = @PACKAGE_TARNAME@
-PACKAGE_URL = @PACKAGE_URL@
-PACKAGE_VERSION = @PACKAGE_VERSION@
-PARALLEL = @PARALLEL@
-PATH_SEPARATOR = @PATH_SEPARATOR@
-PERL = @PERL@
-PTHREAD = @PTHREAD@
-RANLIB = @RANLIB@
-ROOT = @ROOT@
-RUNPARALLEL = @RUNPARALLEL@
-RUNSERIAL = @RUNSERIAL@
-R_INTEGER = @R_INTEGER@
-R_LARGE = @R_LARGE@
-SEARCH = @SEARCH@
-SED = @SED@
-SETX = @SETX@
-SET_MAKE = @SET_MAKE@
-SHELL = @SHELL@
-SIZE_T = @SIZE_T@
-STATIC_EXEC = @STATIC_EXEC@
-STATIC_SHARED = @STATIC_SHARED@
-STRICT_FORMAT_CHECKS = @STRICT_FORMAT_CHECKS@
-STRIP = @STRIP@
-TESTPARALLEL = @TESTPARALLEL@
-THREADSAFE = @THREADSAFE@
-TIME = @TIME@
-TR = @TR@
-TRACE_API = @TRACE_API@
-UNAME_INFO = @UNAME_INFO@
-USE_FILTER_DEFLATE = @USE_FILTER_DEFLATE@
-USE_FILTER_FLETCHER32 = @USE_FILTER_FLETCHER32@
-USE_FILTER_NBIT = @USE_FILTER_NBIT@
-USE_FILTER_SCALEOFFSET = @USE_FILTER_SCALEOFFSET@
-USE_FILTER_SHUFFLE = @USE_FILTER_SHUFFLE@
-USE_FILTER_SZIP = @USE_FILTER_SZIP@
-USINGMEMCHECKER = @USINGMEMCHECKER@
-VERSION = @VERSION@
-WORDS_BIGENDIAN = @WORDS_BIGENDIAN@
-abs_builddir = @abs_builddir@
-abs_srcdir = @abs_srcdir@
-abs_top_builddir = @abs_top_builddir@
-abs_top_srcdir = @abs_top_srcdir@
-ac_ct_AR = @ac_ct_AR@
-ac_ct_CC = @ac_ct_CC@
-ac_ct_CXX = @ac_ct_CXX@
-ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
-ac_ct_FC = @ac_ct_FC@
-am__include = @am__include@
-am__leading_dot = @am__leading_dot@
-am__quote = @am__quote@
-am__tar = @am__tar@
-am__untar = @am__untar@
-bindir = @bindir@
-build = @build@
-build_alias = @build_alias@
-build_cpu = @build_cpu@
-build_os = @build_os@
-build_vendor = @build_vendor@
-builddir = @builddir@
-datadir = @datadir@
-datarootdir = @datarootdir@
-
-# Install directories that automake doesn't know about
-docdir = $(exec_prefix)/doc
-dvidir = @dvidir@
-enable_shared = @enable_shared@
-enable_static = @enable_static@
-exec_prefix = @exec_prefix@
-host = @host@
-host_alias = @host_alias@
-host_cpu = @host_cpu@
-host_os = @host_os@
-host_vendor = @host_vendor@
-htmldir = @htmldir@
-includedir = @includedir@
-infodir = @infodir@
-install_sh = @install_sh@
-libdir = @libdir@
-libexecdir = @libexecdir@
-localedir = @localedir@
-localstatedir = @localstatedir@
-mandir = @mandir@
-mkdir_p = @mkdir_p@
-oldincludedir = @oldincludedir@
-pdfdir = @pdfdir@
-prefix = @prefix@
-program_transform_name = @program_transform_name@
-psdir = @psdir@
-sbindir = @sbindir@
-sharedstatedir = @sharedstatedir@
-srcdir = @srcdir@
-sysconfdir = @sysconfdir@
-target_alias = @target_alias@
-top_build_prefix = @top_build_prefix@
-top_builddir = @top_builddir@
-top_srcdir = @top_srcdir@
-
-# Shell commands used in Makefiles
-RM = rm -f
-CP = cp
-
-# Some machines need a command to run executables; this is that command
-# so that our tests will run.
-# We use RUNEXEC instead of RUNSERIAL directly because it may be that
-# some tests need to be run with a different command. Older versions
-# of the makefiles used the command
-# $(LIBTOOL) --mode=execute
-# in some directories, for instance.
-RUNEXEC = $(RUNSERIAL)
-
-# Libraries to link to while building
-LIBHDF5 = $(top_builddir)/src/libhdf5.la
-LIBH5TEST = $(top_builddir)/test/libh5test.la
-LIBH5F = $(top_builddir)/fortran/src/libhdf5_fortran.la
-LIBH5FTEST = $(top_builddir)/fortran/test/libh5test_fortran.la
-LIBH5CPP = $(top_builddir)/c++/src/libhdf5_cpp.la
-LIBH5TOOLS = $(top_builddir)/tools/lib/libh5tools.la
-LIBH5_HL = $(top_builddir)/hl/src/libhdf5_hl.la
-LIBH5F_HL = $(top_builddir)/hl/fortran/src/libhdf5hl_fortran.la
-LIBH5CPP_HL = $(top_builddir)/hl/c++/src/libhdf5_hl_cpp.la
-
-# Note that in svn revision 19400 the '/' after DESTDIR in H5* variables below
-# has been removed. According to the official description of DESTDIR by Gnu at
-# http://www.gnu.org/prep/standards/html_node/DESTDIR.html, DESTDIR is
-# prepended to the normal and complete install path that it precedes for the
-# purpose of installing in a temporary directory which is useful for building
-# rpms and other packages. The '/' after ${DESTDIR} will be followed by another
-# '/' at the beginning of the normal install path. When DESTDIR is empty the
-# path then begins with '//', which is incorrect and causes problems at least for
-# Cygwin.
-
-# Scripts used to build examples
-# If only shared libraries have been installed, have h5cc build examples with
-# shared libraries instead of static libraries
-H5CC = ${DESTDIR}$(bindir)/h5cc
-H5CC_PP = ${DESTDIR}$(bindir)/h5pcc
-H5FC = ${DESTDIR}$(bindir)/h5fc
-H5FC_PP = ${DESTDIR}$(bindir)/h5pfc
-H5CPP = ${DESTDIR}$(bindir)/h5c++
-ACLOCAL_AMFLAGS = "-I m4"
-
-# The trace script; this is used on source files from the C library to
-# insert tracing macros.
-TRACE = perl $(top_srcdir)/bin/trace
-
-# .chkexe files are used to mark tests that have run successfully.
-# .chklog files are output from those tests.
-# *.clog are from the MPE option.
-
-# Temporary files
-CHECK_CLEANFILES = *.chkexe *.chklog *.clog parf[12].h5
-
-# These are our main targets
-TEST_PROG_PARA = parallel_test
-
-# Test source files
-parallel_test_SOURCES = ptest.f90 hyper.f90 mdset.f90
-
-# The tests depend on several libraries.
-LDADD = $(LIBH5FTEST) $(LIBH5TEST) $(LIBH5F) $(LIBHDF5)
-
-# Mark this directory as part of the Fortran API
-FORTRAN_API = yes
-
-# Automake needs to be taught how to build lib, progs, and tests targets.
-# These will be filled in automatically for the most part (e.g.,
-# lib_LIBRARIES are built for lib target), but EXTRA_LIB, EXTRA_PROG, and
-# EXTRA_TEST variables are supplied to allow the user to force targets to
-# be built at certain times.
-LIB = $(lib_LIBRARIES) $(lib_LTLIBRARIES) $(noinst_LIBRARIES) \
- $(noinst_LTLIBRARIES) $(check_LIBRARIES) $(check_LTLIBRARIES) $(EXTRA_LIB)
-
-PROGS = $(bin_PROGRAMS) $(bin_SCRIPTS) $(noinst_PROGRAMS) $(noinst_SCRIPTS) \
- $(EXTRA_PROG)
-
-TEST_PROG_CHKEXE = $(TEST_PROG:=.chkexe_)
-TEST_PROG_PARA_CHKEXE = $(TEST_PROG_PARA:=.chkexe_)
-TEST_SCRIPT_CHKSH = $(TEST_SCRIPT:=.chkexe_)
-TEST_SCRIPT_PARA_CHKSH = $(TEST_SCRIPT_PARA:=.chkexe_)
-all: all-am
-
-.SUFFIXES:
-.SUFFIXES: .f90 .lo .o .obj
-$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am $(top_srcdir)/config/commence.am $(top_srcdir)/config/conclude.am $(am__configure_deps)
- @for dep in $?; do \
- case '$(am__configure_deps)' in \
- *$$dep*) \
- ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
- && { if test -f $@; then exit 0; else break; fi; }; \
- exit 1;; \
- esac; \
- done; \
- echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign fortran/testpar/Makefile'; \
- $(am__cd) $(top_srcdir) && \
- $(AUTOMAKE) --foreign fortran/testpar/Makefile
-.PRECIOUS: Makefile
-Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
- @case '$?' in \
- *config.status*) \
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
- *) \
- echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
- cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
- esac;
-
-$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-
-$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
- cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
-$(am__aclocal_m4_deps):
-
-clean-checkPROGRAMS:
- @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \
- echo " rm -f" $$list; \
- rm -f $$list || exit $$?; \
- test -n "$(EXEEXT)" || exit 0; \
- list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
- echo " rm -f" $$list; \
- rm -f $$list
-parallel_test$(EXEEXT): $(parallel_test_OBJECTS) $(parallel_test_DEPENDENCIES)
- @rm -f parallel_test$(EXEEXT)
- $(AM_V_FCLD)$(FCLINK) $(parallel_test_OBJECTS) $(parallel_test_LDADD) $(LIBS)
-
-mostlyclean-compile:
- -rm -f *.$(OBJEXT)
-
-distclean-compile:
- -rm -f *.tab.c
-
-.f90.o:
- $(AM_V_FC) @AM_BACKSLASH@
- $(FCCOMPILE) -c -o $@ $(FCFLAGS_f90) $<
-
-.f90.obj:
- $(AM_V_FC) @AM_BACKSLASH@
- $(FCCOMPILE) -c -o $@ $(FCFLAGS_f90) `$(CYGPATH_W) '$<'`
-
-.f90.lo:
- $(AM_V_FC) @AM_BACKSLASH@
- $(LTFCCOMPILE) -c -o $@ $(FCFLAGS_f90) $<
-
-mostlyclean-libtool:
- -rm -f *.lo
-
-clean-libtool:
- -rm -rf .libs _libs
-
-ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- mkid -fID $$unique
-tags: TAGS
-
-TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- set x; \
- here=`pwd`; \
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- shift; \
- if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
- test -n "$$unique" || unique=$$empty_fix; \
- if test $$# -gt 0; then \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- "$$@" $$unique; \
- else \
- $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
- $$unique; \
- fi; \
- fi
-ctags: CTAGS
-CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \
- $(TAGS_FILES) $(LISP)
- list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
- unique=`for i in $$list; do \
- if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
- done | \
- $(AWK) '{ files[$$0] = 1; nonempty = 1; } \
- END { if (nonempty) { for (i in files) print i; }; }'`; \
- test -z "$(CTAGS_ARGS)$$unique" \
- || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
- $$unique
-
-GTAGS:
- here=`$(am__cd) $(top_builddir) && pwd` \
- && $(am__cd) $(top_srcdir) \
- && gtags -i $(GTAGS_ARGS) "$$here"
-
-distclean-tags:
- -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
-
-distdir: $(DISTFILES)
- @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
- list='$(DISTFILES)'; \
- dist_files=`for file in $$list; do echo $$file; done | \
- sed -e "s|^$$srcdirstrip/||;t" \
- -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
- case $$dist_files in \
- */*) $(MKDIR_P) `echo "$$dist_files" | \
- sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
- sort -u` ;; \
- esac; \
- for file in $$dist_files; do \
- if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
- if test -d $$d/$$file; then \
- dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
- if test -d "$(distdir)/$$file"; then \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
- cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
- find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
- fi; \
- cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
- else \
- test -f "$(distdir)/$$file" \
- || cp -p $$d/$$file "$(distdir)/$$file" \
- || exit 1; \
- fi; \
- done
-check-am: all-am
- $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS)
- $(MAKE) $(AM_MAKEFLAGS) check-TESTS
-check: check-am
-all-am: Makefile all-local
-installdirs:
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
-
-install-am: all-am
- @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
-
-installcheck: installcheck-am
-install-strip:
- $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
- install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
- `test -z '$(STRIP)' || \
- echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
-mostlyclean-generic:
-
-clean-generic:
-
-distclean-generic:
- -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
- -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
-
-maintainer-clean-generic:
- @echo "This command is intended for maintainers to use"
- @echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
-
-clean-am: clean-checkPROGRAMS clean-generic clean-libtool \
- mostlyclean-am
-
-distclean: distclean-am
- -rm -f Makefile
-distclean-am: clean-am distclean-compile distclean-generic \
- distclean-tags
-
-dvi: dvi-am
-
-dvi-am:
-
-html: html-am
-
-html-am:
-
-info: info-am
-
-info-am:
-
-install-data-am:
-
-install-dvi: install-dvi-am
-
-install-dvi-am:
-
-install-exec-am:
-
-install-html: install-html-am
-
-install-html-am:
-
-install-info: install-info-am
-
-install-info-am:
-
-install-man:
-
-install-pdf: install-pdf-am
-
-install-pdf-am:
-
-install-ps: install-ps-am
-
-install-ps-am:
-
-installcheck-am:
-
-maintainer-clean: maintainer-clean-am
- -rm -f Makefile
-maintainer-clean-am: distclean-am maintainer-clean-generic
-
-mostlyclean: mostlyclean-am
-
-mostlyclean-am: mostlyclean-compile mostlyclean-generic \
- mostlyclean-libtool mostlyclean-local
-
-pdf: pdf-am
-
-pdf-am:
-
-ps: ps-am
-
-ps-am:
-
-uninstall-am:
-
-.MAKE: check-am install-am install-strip
-
-.PHONY: CTAGS GTAGS all all-am all-local check check-TESTS check-am \
- clean clean-checkPROGRAMS clean-generic clean-libtool ctags \
- distclean distclean-compile distclean-generic \
- distclean-libtool distclean-tags distdir dvi dvi-am html \
- html-am info info-am install install-am install-data \
- install-data-am install-dvi install-dvi-am install-exec \
- install-exec-am install-html install-html-am install-info \
- install-info-am install-man install-pdf install-pdf-am \
- install-ps install-ps-am install-strip installcheck \
- installcheck-am installdirs maintainer-clean \
- maintainer-clean-generic mostlyclean mostlyclean-compile \
- mostlyclean-generic mostlyclean-libtool mostlyclean-local pdf \
- pdf-am ps ps-am tags uninstall uninstall-am
-
-
-# List all build rules defined by HDF5 Makefiles as "PHONY" targets here.
-# This tells the Makefiles that these targets are not files to be built but
-# commands that should be executed even if a file with the same name already
-# exists.
-.PHONY: build-check-clean build-check-p build-check-s build-lib build-progs \
- build-tests check-clean check-install check-p check-s check-vfd \
- install-doc lib progs tests uninstall-doc _exec_check-s _test help
-
-help:
- @$(top_srcdir)/bin/makehelp
-
-# lib/progs/tests targets recurse into subdirectories. build-* targets
-# build files in this directory.
-build-lib: $(LIB)
-build-progs: $(LIB) $(PROGS)
-build-tests: $(LIB) $(PROGS) $(TESTS)
-
-# General rule for recursive building targets.
-# BUILT_SOURCES contain targets that need to be built before anything else
-# in the directory (e.g., for Fortran type detection)
-lib progs tests check-s check-p :: $(BUILT_SOURCES)
- @$(MAKE) $(AM_MAKEFLAGS) build-$@ || exit 1;
- @for d in X $(SUBDIRS); do \
- if test $$d != X && test $$d != .; then \
- (set -x; cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \
- fi; \
- done
-
-# General rule for recursive cleaning targets. Like the rule above,
-# but doesn't require building BUILT_SOURCES.
-check-clean ::
- @$(MAKE) $(AM_MAKEFLAGS) build-$@ || exit 1;
- @for d in X $(SUBDIRS); do \
- if test $$d != X && test $$d != .; then \
- (set -x; cd $$d && $(MAKE) $(AM_MAKEFLAGS) $@) || exit 1; \
- fi; \
- done
-
-# Tell Automake to build tests when the user types `make all' (this is
-# not its default behavior). Also build EXTRA_LIB and EXTRA_PROG since
-# Automake won't build them automatically, either.
-all-local: $(EXTRA_LIB) $(EXTRA_PROG) $(TESTS)
-
-# make install-doc doesn't do anything outside of doc directory, but
-# Makefiles should recognize it.
-# UPDATE: docs no longer reside in this build tree, so this target
-# is depreciated.
-install-doc uninstall-doc:
- @echo "Nothing to be done."
-
-# clean up files generated by tests so they can be re-run.
-build-check-clean:
- $(RM) -rf $(CHECK_CLEANFILES)
-
-# run check-clean whenever mostlyclean is run
-mostlyclean-local: build-check-clean
-
-# check-install is just a synonym for installcheck
-check-install: installcheck
-
-# Run each test in order, passing $(TEST_FLAGS) to the program.
-# Since tests are done in a shell loop, "make -i" does apply inside it.
-# Set HDF5_Make_Ignore to a non-blank string to ignore errors inside the loop.
-# The timestamps give a rough idea how much time the tests use.
-#
-# Note that targets in TESTS (defined above) will be built when the user
-# types 'make tests' or 'make check', but only programs in TEST_PROG,
-# TEST_PROG_PARA, or TEST_SCRIPT will actually be executed.
-check-TESTS: test
-
-test _test:
- @$(MAKE) build-check-s
- @$(MAKE) build-check-p
-
-# Actual execution of check-s.
-build-check-s: $(LIB) $(PROGS) $(TESTS)
- @if test -n "$(TEST_PROG)$(TEST_SCRIPT)"; then \
- echo "===Serial tests in `echo ${PWD} | sed -e s:.*/::` begin `date`==="; \
- fi
- @$(MAKE) $(AM_MAKEFLAGS) _exec_check-s
- @if test -n "$(TEST_PROG)$(TEST_SCRIPT)"; then \
- echo "===Serial tests in `echo ${PWD} | sed -e s:.*/::` ended `date`===";\
- fi
-
-_exec_check-s: $(TEST_PROG_CHKEXE) $(TEST_SCRIPT_CHKSH)
-
-# The dummy.chkexe here prevents the target from being
-# empty if there are no tests in the current directory.
-# $${log} is the log file.
-# $${tname} is the name of test.
-$(TEST_PROG_CHKEXE) $(TEST_PROG_PARA_CHKEXE) dummy.chkexe_:
- @if test "X$@" != "X.chkexe_" && test "X$@" != "Xdummy.chkexe_"; then \
- tname=$(@:.chkexe_=)$(EXEEXT);\
- log=$(@:.chkexe_=.chklog); \
- echo "============================"; \
- if $(top_srcdir)/bin/newer $(@:.chkexe_=.chkexe) $${tname}; then \
- echo "No need to test $${tname} again."; \
- else \
- echo "============================" > $${log}; \
- if test "X$(FORTRAN_API)" = "Xyes"; then \
- echo "Fortran API: Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \
- echo "Fortran API: $(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \
- elif test "X$(CXX_API)" = "Xyes"; then \
- echo "C++ API: Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \
- echo "C++ API: $(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log};\
- else \
- echo "Testing $(HDF5_DRIVER) $${tname} $(TEST_FLAGS)"; \
- echo "$(HDF5_DRIVER) $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \
- fi; \
- echo "============================" >> $${log}; \
- srcdir="$(srcdir)" \
- $(TIME) $(RUNEXEC) ./$${tname} $(TEST_FLAGS) >> $${log} 2>&1 \
- && touch $(@:.chkexe_=.chkexe) || \
- (test $$HDF5_Make_Ignore && echo "*** Error ignored") || \
- (cat $${log} && false) || exit 1; \
- echo "" >> $${log}; \
- echo "Finished testing $${tname} $(TEST_FLAGS)" >> $${log}; \
- echo "============================" >> $${log}; \
- echo "Finished testing $${tname} $(TEST_FLAGS)"; \
- cat $${log}; \
- fi; \
- fi
-
-# The dummysh.chkexe here prevents the target from being
-# empty if there are no tests in the current directory.
-# $${log} is the log file.
-# $${tname} is the name of test.
-$(TEST_SCRIPT_CHKSH) $(TEST_SCRIPT_PARA_CHKSH) dummysh.chkexe_:
- @if test "X$@" != "X.chkexe_" && test "X$@" != "Xdummysh.chkexe_"; then \
- cmd=$(@:.chkexe_=);\
- tname=`basename $$cmd`;\
- chkname=`basename $(@:.chkexe_=.chkexe)`;\
- log=`basename $(@:.chkexe_=.chklog)`; \
- echo "============================"; \
- if $(top_srcdir)/bin/newer $${chkname} $$cmd $(SCRIPT_DEPEND); then \
- echo "No need to test $${tname} again."; \
- else \
- echo "============================" > $${log}; \
- if test "X$(FORTRAN_API)" = "Xyes"; then \
- echo "Fortran API: Testing $${tname} $(TEST_FLAGS)"; \
- echo "Fortran API: $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \
- elif test "X$(CXX_API)" = "Xyes"; then \
- echo "C++ API: Testing $${tname} $(TEST_FLAGS)"; \
- echo "C++ API: $${tname} $(TEST_FLAGS) Test Log" >> $${log}; \
- else \
- echo "Testing $${tname} $(TEST_FLAGS)"; \
- echo "$${tname} $(TEST_FLAGS) Test Log" >> $${log}; \
- fi; \
- echo "============================" >> $${log}; \
- RUNSERIAL="$(RUNSERIAL)" RUNPARALLEL="$(RUNPARALLEL)" \
- srcdir="$(srcdir)" \
- $(TIME) $(SHELL) $$cmd $(TEST_FLAGS) >> $${log} 2>&1 \
- && touch $${chkname} || \
- (test $$HDF5_Make_Ignore && echo "*** Error ignored") || \
- (cat $${log} && false) || exit 1; \
- echo "" >> $${log}; \
- echo "Finished testing $${tname} $(TEST_FLAGS)" >> $${log}; \
- echo "============================" >> $${log}; \
- echo "Finished testing $${tname} $(TEST_FLAGS)"; \
- cat $${log}; \
- fi; \
- echo "============================"; \
- fi
-
-# Actual execution of check-p.
-build-check-p: $(LIB) $(PROGS) $(TESTS)
- @if test -n "$(TEST_PROG_PARA)$(TEST_SCRIPT_PARA)"; then \
- echo "===Parallel tests in `echo ${PWD} | sed -e s:.*/::` begin `date`==="; \
- fi
- @if test -n "$(TEST_PROG_PARA)"; then \
- echo "**** Hint ****"; \
- echo "Parallel test files reside in the current directory" \
- "by default."; \
- echo "Set HDF5_PARAPREFIX to use another directory. E.g.,"; \
- echo " HDF5_PARAPREFIX=/PFS/user/me"; \
- echo " export HDF5_PARAPREFIX"; \
- echo " make check"; \
- echo "**** end of Hint ****"; \
- fi
- @for test in $(TEST_PROG_PARA) dummy; do \
- if test $$test != dummy; then \
- $(MAKE) $(AM_MAKEFLAGS) $$test.chkexe_ \
- RUNEXEC="$(RUNPARALLEL)" || exit 1; \
- fi; \
- done
- @for test in $(TEST_SCRIPT_PARA) dummy; do \
- if test $$test != dummy; then \
- $(MAKE) $(AM_MAKEFLAGS) $$test.chkexe_ || exit 1; \
- fi; \
- done
- @if test -n "$(TEST_PROG_PARA)$(TEST_SCRIPT_PARA)"; then \
- echo "===Parallel tests in `echo ${PWD} | sed -e s:.*/::` ended `date`===";\
- fi
-
-# Run test with different Virtual File Driver
-check-vfd: $(LIB) $(PROGS) $(TESTS)
- @for vfd in $(VFD_LIST) dummy; do \
- if test $$vfd != dummy; then \
- echo "============================"; \
- echo "Testing Virtual File Driver $$vfd"; \
- echo "============================"; \
- $(MAKE) $(AM_MAKEFLAGS) check-clean || exit 1; \
- HDF5_DRIVER=$$vfd $(MAKE) $(AM_MAKEFLAGS) check || exit 1; \
- fi; \
- done
-
-# Tell versions [3.59,3.63) of GNU make to not export all variables.
-# Otherwise a system limit (for SysV at least) may be exceeded.
-.NOEXPORT:
diff --git a/fortran/testpar/hyper.f90 b/fortran/testpar/hyper.F90
index 1d65ae1..8051b38 100644
--- a/fortran/testpar/hyper.f90
+++ b/fortran/testpar/hyper.F90
@@ -1,27 +1,26 @@
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
! Copyright by The HDF Group. *
-! Copyright by the Board of Trustees of the University of Illinois. *
! All rights reserved. *
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://www.hdfgroup.org/licenses. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-!//////////////////////////////////////////////////////////
+!
! writes/reads dataset by hyperslabs
-!//////////////////////////////////////////////////////////
+!
SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
- USE hdf5
+ USE HDF5
+ USE MPI
+ USE TH5_MISC
+
IMPLICIT NONE
- INCLUDE 'mpif.h'
INTEGER, INTENT(in) :: length ! array length
LOGICAL, INTENT(in) :: do_collective ! use collective I/O
@@ -29,7 +28,6 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
INTEGER, INTENT(in) :: mpi_size ! number of processes in the group of communicator
INTEGER, INTENT(in) :: mpi_rank ! rank of the calling process in the communicator
INTEGER, INTENT(inout) :: nerrors ! number of errors
- INTEGER :: mpierror ! MPI hdferror flag
INTEGER :: hdferror ! HDF hdferror flag
INTEGER(hsize_t), DIMENSION(1) :: dims ! dataset dimensions
INTEGER(hsize_t), DIMENSION(1) :: cdims ! chunk dimensions
@@ -50,14 +48,16 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
INTEGER :: icount ! number of elements in array
CHARACTER(len=80) :: filename ! filename
INTEGER :: i
-
- !//////////////////////////////////////////////////////////
+ INTEGER :: actual_io_mode ! The type of I/O performed by this process
+ LOGICAL :: is_coll
+ LOGICAL :: is_coll_true = .TRUE.
+ !
! initialize the array data between the processes (3)
! for the 12 size array we get
! p0 = 1,2,3,4
! p1 = 5,6,7,8
! p2 = 9,10,11,12
- !//////////////////////////////////////////////////////////
+ !
ALLOCATE(wbuf(0:length-1),stat=hdferror)
IF (hdferror /= 0) THEN
@@ -79,17 +79,16 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
wbuf(i) = i
ENDDO
- !//////////////////////////////////////////////////////////
+ !
! HDF5 I/O
- !//////////////////////////////////////////////////////////
+ !
dims(1) = length
cdims(1) = length/mpi_size ! define chunks as the number of processes
- !//////////////////////////////////////////////////////////
+ !
! setup file access property list with parallel I/O access
- !//////////////////////////////////////////////////////////
-
+ !
CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -104,14 +103,67 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
nerrors = nerrors + 1
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! create the file collectively
- !//////////////////////////////////////////////////////////
-
+ !
CALL h5_fixname_f("parf1", filename, fapl_id, hdferror)
- CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id)
- CALL check("h5fcreate_f", hdferror, nerrors)
+ IF(do_collective)THEN
+ ! verify settings for file access properties
+
+ ! Collective metadata writes
+ CALL h5pget_coll_metadata_write_f(fapl_id, is_coll, hdferror)
+ CALL check("h5pget_coll_metadata_write_f", hdferror, nerrors)
+ IF(is_coll .NEQV. .FALSE.)THEN
+ PRINT*, "Incorrect property setting for coll metadata writes"
+ nerrors = nerrors + 1
+ ENDIF
+
+ ! Collective metadata read API calling requirement
+ CALL h5pget_all_coll_metadata_ops_f(fapl_id, is_coll, hdferror)
+ CALL check("h5pget_all_coll_metadata_ops_f", hdferror, nerrors)
+ IF(is_coll .NEQV. .FALSE.)THEN
+ PRINT*, "Incorrect property setting for coll metadata API calls requirement"
+ nerrors = nerrors + 1
+ ENDIF
+
+ ! Collective metadata writes
+ CALL h5pset_coll_metadata_write_f(fapl_id, .TRUE., hdferror)
+ CALL check("h5pset_coll_metadata_write_f", hdferror, nerrors)
+ ! Collective metadata READ API calling requirement
+ CALL h5pset_all_coll_metadata_ops_f(fapl_id, is_coll_true, hdferror)
+ CALL check("h5pset_all_coll_metadata_ops_f", hdferror, nerrors)
+
+ CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id)
+ CALL check("h5fcreate_f", hdferror, nerrors)
+
+ ! close fapl and retrieve it from file
+ CALL h5pclose_f(fapl_id, hdferror)
+ CALL check("h5pclose_f", hdferror, nerrors)
+ CALL h5fget_access_plist_f(file_id, fapl_id, hdferror)
+ CALL check("h5fget_access_plist_f", hdferror, nerrors)
+
+ ! verify settings for file access properties
+
+ ! Collective metadata writes
+ CALL h5pget_coll_metadata_write_f(fapl_id, is_coll, hdferror)
+ CALL check("h5pget_coll_metadata_write_f", hdferror, nerrors)
+ IF(is_coll .NEQV. .TRUE.)THEN
+ PRINT*, "Incorrect property setting for coll metadata writes"
+ nerrors = nerrors + 1
+ ENDIF
+
+ ! Collective metadata read API calling requirement
+ CALL h5pget_all_coll_metadata_ops_f(fapl_id, is_coll, hdferror)
+ CALL check("h5pget_all_coll_metadata_ops_f", hdferror, nerrors)
+ IF(is_coll .NEQV. .TRUE.)THEN
+ PRINT*, "Incorrect property setting for coll metadata API calls requirement"
+ nerrors = nerrors + 1
+ ENDIF
+ ELSE
+ CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id)
+ CALL check("h5fcreate_f", hdferror, nerrors)
+ ENDIF
CALL h5screate_simple_f(1, dims, fspace_id, hdferror)
CALL check("h5screate_simple_f", hdferror, nerrors)
@@ -119,9 +171,9 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL h5screate_simple_f(1, dims, mspace_id, hdferror)
CALL check("h5screate_simple_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! modify dataset creation properties to enable chunking
- !//////////////////////////////////////////////////////////
+ !
CALL h5pcreate_f(H5P_DATASET_CREATE_F, dcpl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -131,38 +183,38 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL check("h5pset_chunk_f", hdferror, nerrors)
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! create the dataset
- !//////////////////////////////////////////////////////////
+ !
CALL h5dcreate_f(file_id, "dset", H5T_NATIVE_INTEGER, fspace_id, dset_id, hdferror, dcpl_id)
CALL check("h5dcreate_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! define hyperslab
- !//////////////////////////////////////////////////////////
+ !
counti(1) = icount
start(1) = istart
- !//////////////////////////////////////////////////////////
+ !
! select hyperslab in memory
- !//////////////////////////////////////////////////////////
+ !
CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
CALL check("h5sselect_hyperslab_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! select hyperslab in the file
- !//////////////////////////////////////////////////////////
+ !
CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
CALL check("h5sselect_hyperslab_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! create a property list for collective dataset write
- !//////////////////////////////////////////////////////////
+ !
CALL h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
@@ -172,17 +224,39 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL check("h5pset_dxpl_mpio_f", hdferror, nerrors)
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! write dataset
- !//////////////////////////////////////////////////////////
+ !
CALL h5dwrite_f(dset_id,H5T_NATIVE_INTEGER,wbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id)
CALL check("h5dwrite_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ ! Check h5pget_mpio_actual_io_mode_f function
+ CALL h5pget_mpio_actual_io_mode_f(dxpl_id, actual_io_mode, hdferror)
+ CALL check("h5pget_mpio_actual_io_mode_f", hdferror, nerrors)
+
+! MSB -- TODO FIX: skipping for now since multi-dataset
+! has no specific path for contiguous collective
+!
+! IF(do_collective.AND.do_chunk)THEN
+! IF(actual_io_mode.NE.H5D_MPIO_CHUNK_COLLECTIVE_F)THEN
+! CALL check("h5pget_mpio_actual_io_mode_f", -1, nerrors)
+! ENDIF
+! ELSEIF(.NOT.do_collective)THEN
+! IF(actual_io_mode.NE.H5D_MPIO_NO_COLLECTIVE_F)THEN
+! CALL check("h5pget_mpio_actual_io_mode_f", -1, nerrors)
+! ENDIF
+! ELSEIF( do_collective.AND.(.NOT.do_chunk))THEN
+! IF(actual_io_mode.NE.H5D_MPIO_CONTIG_COLLECTIVE_F)THEN
+! CALL check("h5pget_mpio_actual_io_mode_f", -1, nerrors)
+! ENDIF
+! ENDIF
+! MSB
+
+ !
! close HDF5 I/O
- !//////////////////////////////////////////////////////////
+ !
CALL h5pclose_f(fapl_id, hdferror)
CALL check("h5pclose_f", hdferror, nerrors)
@@ -205,86 +279,85 @@ SUBROUTINE hyper(length,do_collective,do_chunk, mpi_size, mpi_rank, nerrors)
CALL h5fclose_f(file_id, hdferror)
CALL check("h5fclose_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! reopen file with read access
- !//////////////////////////////////////////////////////////
+ !
CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5pset_fapl_mpio_f", hdferror, nerrors)
CALL h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5fopen_f", hdferror, nerrors)
CALL h5screate_simple_f(1, dims, fspace_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5screate_simple_f", hdferror, nerrors)
CALL h5screate_simple_f(1, dims, mspace_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check(" h5screate_simple_f", hdferror, nerrors)
CALL h5dopen_f(file_id, "dset", dset_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5dopen_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! select hyperslab in memory
- !//////////////////////////////////////////////////////////
+ !
CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5sselect_hyperslab_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! select hyperslab in the file
- !//////////////////////////////////////////////////////////
+ !
CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5sselect_hyperslab_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! create a property list for collective dataset read
- !//////////////////////////////////////////////////////////
+ !
CALL h5pcreate_f(H5P_DATASET_XFER_F, dxpl_id, hdferror)
CALL check("h5pcreate_f", hdferror, nerrors)
-
IF (do_collective) THEN
CALL h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5pset_dxpl_mpio_f", hdferror, nerrors)
ENDIF
- !//////////////////////////////////////////////////////////
+ !
! read dataset
- !//////////////////////////////////////////////////////////
+ !
CALL h5dread_f(dset_id,H5T_NATIVE_INTEGER,rbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5dread_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! close HDF5 I/O
- !//////////////////////////////////////////////////////////
+ !
CALL h5pclose_f(fapl_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5pclose_f", hdferror, nerrors)
CALL h5pclose_f(dxpl_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5pclose_f", hdferror, nerrors)
CALL h5sclose_f(fspace_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5sclose_f", hdferror, nerrors)
CALL h5sclose_f(mspace_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5sclose_f", hdferror, nerrors)
CALL h5dclose_f(dset_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5dclose_f", hdferror, nerrors)
CALL h5fclose_f(file_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5fclose_f", hdferror, nerrors)
- !//////////////////////////////////////////////////////////
+ !
! compare read and write data. each process compares a subset of the array
- !//////////////////////////////////////////////////////////
+ !
DO i = istart, iend-1
IF( wbuf(i) /= rbuf(i)) THEN
diff --git a/fortran/testpar/mdset.f90 b/fortran/testpar/mdset.F90
index 9d14a50..6757f4d 100644
--- a/fortran/testpar/mdset.f90
+++ b/fortran/testpar/mdset.F90
@@ -1,16 +1,13 @@
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
! Copyright by The HDF Group. *
-! Copyright by the Board of Trustees of the University of Illinois. *
! All rights reserved. *
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://www.hdfgroup.org/licenses. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
@@ -19,9 +16,11 @@
!//////////////////////////////////////////////////////////
SUBROUTINE multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
- USE hdf5
+ USE HDF5
+ USE MPI
+ USE TH5_MISC
+
IMPLICIT NONE
- INCLUDE 'mpif.h'
INTEGER, INTENT(in) :: length ! array length
LOGICAL, INTENT(in) :: do_collective ! use collective I/O
@@ -29,7 +28,6 @@ SUBROUTINE multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_ra
INTEGER, INTENT(in) :: mpi_size ! number of processes in the group of communicator
INTEGER, INTENT(in) :: mpi_rank ! rank of the calling process in the communicator
INTEGER, INTENT(inout) :: nerrors ! number of errors
- INTEGER :: mpierror ! MPI hdferror flag
INTEGER :: hdferror ! HDF hdferror flag
INTEGER(hsize_t), DIMENSION(1) :: dims ! dataset dimensions
INTEGER(hsize_t), DIMENSION(1) :: cdims ! chunk dimensions
@@ -218,30 +216,30 @@ SUBROUTINE multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_ra
CALL check("h5pcreate_f", hdferror, nerrors)
CALL h5pset_fapl_mpio_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5pset_fapl_mpio_f", hdferror, nerrors)
CALL h5fopen_f(filename, H5F_ACC_RDWR_F, file_id, hdferror, access_prp = fapl_id)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5fopen_f", hdferror, nerrors)
CALL h5screate_simple_f(1, dims, fspace_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5screate_simple_f", hdferror, nerrors)
CALL h5screate_simple_f(1, dims, mspace_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5screate_simple_f", hdferror, nerrors)
!//////////////////////////////////////////////////////////
! select hyperslab in memory
!//////////////////////////////////////////////////////////
CALL h5sselect_hyperslab_f(mspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5sselect_hyperslab_f", hdferror, nerrors)
!//////////////////////////////////////////////////////////
! select hyperslab in the file
!//////////////////////////////////////////////////////////
CALL h5sselect_hyperslab_f(fspace_id, H5S_SELECT_SET_F, start, counti, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5sselect_hyperslab_f", hdferror, nerrors)
!//////////////////////////////////////////////////////////
! create a property list for collective dataset read
@@ -252,7 +250,7 @@ SUBROUTINE multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_ra
IF (do_collective) THEN
CALL h5pset_dxpl_mpio_f(dxpl_id, H5FD_MPIO_COLLECTIVE_F, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5pset_dxpl_mpio_f", hdferror, nerrors)
ENDIF
!//////////////////////////////////////////////////////////
@@ -266,11 +264,11 @@ SUBROUTINE multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_ra
! create this dataset
CALL h5dopen_f(file_id, dsetname, dset_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5dopen_f", hdferror, nerrors)
! read this dataset
CALL h5dread_f(dset_id,H5T_NATIVE_INTEGER,rbuf,dims,hdferror,file_space_id=fspace_id,mem_space_id=mspace_id,xfer_prp=dxpl_id)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5dread_f", hdferror, nerrors)
! close this dataset
CALL h5dclose_f(dset_id, hdferror)
@@ -296,20 +294,19 @@ SUBROUTINE multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_ra
!//////////////////////////////////////////////////////////
CALL h5pclose_f(fapl_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5pclose_f", hdferror, nerrors)
CALL h5pclose_f(dxpl_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5pclose_f", hdferror, nerrors)
CALL h5sclose_f(fspace_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5sclose_f", hdferror, nerrors)
CALL h5sclose_f(mspace_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
+ CALL check("h5sclose_f", hdferror, nerrors)
CALL h5fclose_f(file_id, hdferror)
- CALL check("h5pcreate_f", hdferror, nerrors)
-
+ CALL check("h5fclose_f", hdferror, nerrors)
DEALLOCATE(wbuf)
DEALLOCATE(rbuf)
diff --git a/fortran/testpar/multidsetrw.F90 b/fortran/testpar/multidsetrw.F90
new file mode 100644
index 0000000..e39900c
--- /dev/null
+++ b/fortran/testpar/multidsetrw.F90
@@ -0,0 +1,233 @@
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+! Copyright by The HDF Group. *
+! All rights reserved. *
+! *
+! This file is part of HDF5. The full HDF5 copyright notice, including *
+! terms governing use, modification, and redistribution, is contained in *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://www.hdfgroup.org/licenses. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+
+!
+! writes/reads dataset by hyperslabs using multi-dataset routines, h5dread_multi and
+! h5dwrite_multi
+!
+
+SUBROUTINE pmultiple_dset_hyper_rw(do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
+
+ USE iso_c_binding
+ USE TH5_MISC
+ USE hdf5
+ USE mpi
+ IMPLICIT NONE
+
+ LOGICAL, INTENT(in) :: do_collective ! use collective IO
+ LOGICAL, INTENT(in) :: do_chunk ! use chunking
+ INTEGER, INTENT(in) :: mpi_size ! number of processes in the group of communicator
+ INTEGER, INTENT(in) :: mpi_rank ! rank of the calling process in the communicator
+ INTEGER, INTENT(inout) :: nerrors ! number of errors
+ CHARACTER(LEN=80):: dsetname ! Dataset name
+ INTEGER(hsize_t), DIMENSION(1:2) :: cdims ! chunk dimensions
+
+ INTEGER(HID_T) :: file_id ! File identifier
+ INTEGER(HID_T) :: filespace ! Dataspace identifier in file
+ INTEGER(HID_T) :: memspace ! Dataspace identifier in memory
+ INTEGER(HID_T) :: plist_id ! Property list identifier
+ INTEGER(HID_T) :: dcpl_id ! Dataset creation property list
+ INTEGER(HSIZE_T), DIMENSION(1:2) :: dimsf ! Dataset dimensions.
+
+ INTEGER(HSIZE_T), DIMENSION(1:2) :: count
+ INTEGER(HSSIZE_T), DIMENSION(1:2) :: offset
+ INTEGER, ALLOCATABLE, DIMENSION(:,:,:), TARGET :: DATA ! Data to write
+ INTEGER, ALLOCATABLE, DIMENSION(:,:,:), TARGET :: rDATA ! Data to write
+ INTEGER, PARAMETER :: rank = 2 ! Dataset rank
+ INTEGER :: i
+ INTEGER(HSIZE_T) :: ii, jj, kk, istart
+ INTEGER :: error ! Error flags
+
+ INTEGER(SIZE_T), PARAMETER :: ndsets = 5
+ INTEGER(HID_T), DIMENSION(1:ndsets) :: dset_id
+ INTEGER(HID_T), DIMENSION(1:ndsets) :: mem_type_id
+ INTEGER(HID_T), DIMENSION(1:ndsets) :: mem_space_id
+ INTEGER(HID_T), DIMENSION(1:ndsets) :: file_space_id
+ TYPE(C_PTR), DIMENSION(1:ndsets) :: buf_md
+ INTEGER(SIZE_T) :: obj_count
+ INTEGER :: data_xfer_mode
+
+ dimsf(1) = 5_hsize_t
+ dimsf(2) = INT(mpi_size, hsize_t)*8_hsize_t
+
+ !
+ ! Setup file access property list with parallel I/O access.
+ !
+ CALL h5pcreate_f(H5P_FILE_ACCESS_F, plist_id, error)
+ CALL check("h5pcreate_f", error, nerrors)
+ CALL h5pset_fapl_mpio_f(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL, error)
+ CALL check("h5pset_fapl_mpio_f", error, nerrors)
+ !
+ ! Create the file collectively.
+ !
+ CALL h5fcreate_f("parf2.h5", H5F_ACC_TRUNC_F, file_id, error, access_prp = plist_id)
+ CALL check("h5fcreate_f", error, nerrors)
+ CALL h5pclose_f(plist_id, error)
+ CALL check("h5pclose_f", error, nerrors)
+ !
+ ! Create the data space for the dataset.
+ !
+ CALL h5screate_simple_f(rank, dimsf, filespace, error)
+ CALL check("h5screate_simple_f", error, nerrors)
+ !
+ ! Each process defines dataset in memory and writes it to the hyperslab
+ ! in the file.
+ !
+ count(1) = dimsf(1)
+ count(2) = dimsf(2)/mpi_size
+ offset(1) = 0
+ offset(2) = mpi_rank * count(2)
+ CALL h5screate_simple_f(rank, count, memspace, error)
+ CALL check("h5screate_simple_f", error, nerrors)
+
+ !
+ ! Modify dataset creation properties to enable chunking
+ !
+
+ CALL h5pcreate_f(H5P_DATASET_CREATE_F, dcpl_id, error)
+ CALL check("h5pcreate_f", error, nerrors)
+
+ IF (do_chunk) THEN
+ cdims(1) = dimsf(1)
+ cdims(2) = dimsf(2)/mpi_size/2
+ CALL h5pset_chunk_f(dcpl_id, 2, cdims, error)
+ CALL check("h5pset_chunk_f", error, nerrors)
+ ENDIF
+ !
+ ! Select hyperslab in the file.
+ !
+ CALL h5sselect_hyperslab_f(filespace, H5S_SELECT_SET_F, offset, count, error)
+ CALL check("h5sselect_hyperslab_f", error, nerrors)
+ !
+ ! Initialize data buffer
+ !
+ ALLOCATE ( DATA(COUNT(1),COUNT(2), ndsets))
+ ALLOCATE ( rdata(COUNT(1),COUNT(2), ndsets))
+
+ ! Create property list for collective dataset write
+ !
+ CALL h5pcreate_f(H5P_DATASET_XFER_F, plist_id, error)
+ CALL check("h5pcreate_f", error, nerrors)
+ IF(do_collective)THEN
+ CALL h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, error)
+ CALL check("h5pset_dxpl_mpio_f", error, nerrors)
+ ELSE
+ CALL h5pset_dxpl_mpio_f(plist_id, H5FD_MPIO_INDEPENDENT_F, error)
+ CALL check("h5pset_dxpl_mpio_f", error, nerrors)
+ ENDIF
+
+ !
+ ! Create the dataset with default properties.
+ !
+ mem_type_id(1:ndsets) = H5T_NATIVE_INTEGER
+ mem_space_id(1:ndsets) = memspace
+ file_space_id(1:ndsets)= filespace
+
+ DO ii = 1, ndsets
+ ! Create the data
+ DO kk = 1, COUNT(1)
+ DO jj = 1, COUNT(2)
+ istart = (kk-1)*dimsf(2) + mpi_rank*COUNT(2)
+ DATA(kk,jj,ii) = INT((istart + jj)*10**(ii-1))
+ ENDDO
+ ENDDO
+ ! Point to te data
+ buf_md(ii) = C_LOC(DATA(1,1,ii))
+
+ ! direct the output of the write statement to unit "dsetname"
+ WRITE(dsetname,'("dataset ",I0)') ii
+ ! create the dataset
+ CALL h5dcreate_f(file_id, dsetname, H5T_NATIVE_INTEGER, filespace, dset_id(ii), error, dcpl_id)
+ CALL check("h5dcreate_f", error, nerrors)
+ ENDDO
+
+ !
+ ! Write the dataset collectively.
+ !
+ CALL h5dwrite_multi_f(ndsets, dset_id, mem_type_id, mem_space_id, file_space_id, buf_md, error, plist_id)
+ CALL check("h5dwrite_multi_f", error, nerrors)
+
+ CALL h5pget_dxpl_mpio_f(plist_id, data_xfer_mode, error)
+ CALL check("h5pget_dxpl_mpio_f", error, nerrors)
+
+ IF(do_collective)THEN
+ IF(data_xfer_mode.NE.H5FD_MPIO_COLLECTIVE_F)THEN
+ nerrors = nerrors + 1
+ ENDIF
+ ENDIF
+
+ DO i = 1, ndsets
+ ! Point to the read buffer
+ buf_md(i) = C_LOC(rdata(1,1,i))
+ ENDDO
+
+ CALL H5Dread_multi_f(ndsets, dset_id, mem_type_id, mem_space_id, file_space_id, buf_md, error, plist_id)
+ CALL check("h5dread_multi_f", error, nerrors)
+
+ CALL h5pget_dxpl_mpio_f(plist_id, data_xfer_mode, error)
+ CALL check("h5pget_dxpl_mpio_f", error, nerrors)
+
+ IF(do_collective)THEN
+ IF(data_xfer_mode.NE.H5FD_MPIO_COLLECTIVE_F)THEN
+ nerrors = nerrors + 1
+ ENDIF
+ ENDIF
+
+ DO i = 1, ndsets
+ ! Close all the datasets
+ CALL h5dclose_f(dset_id(i), error)
+ CALL check("h5dclose_f", error, nerrors)
+ ENDDO
+
+ ! check the data read and write buffers
+ DO ii = 1, ndsets
+ ! Create the data
+ DO kk = 1, COUNT(1)
+ DO jj = 1, COUNT(2)
+ IF(rDATA(kk,jj,ii).NE.DATA(kk,jj,ii))THEN
+ nerrors = nerrors + 1
+ ENDIF
+ ENDDO
+ ENDDO
+ ENDDO
+ !
+ ! Deallocate data buffer.
+ !
+ DEALLOCATE(data, rdata)
+
+ !
+ ! Close dataspaces.
+ !
+ CALL h5sclose_f(filespace, error)
+ CALL check("h5sclose_f", error, nerrors)
+ CALL h5sclose_f(memspace, error)
+ CALL check("h5sclose_f", error, nerrors)
+ !
+ ! Close the dataset and property list.
+ !
+ CALL h5pclose_f(dcpl_id, error)
+ CALL check("h5pclose_f", error, nerrors)
+ CALL h5pclose_f(plist_id, error)
+ CALL check("h5pclose_f", error, nerrors)
+
+ CALL h5fget_obj_count_f(file_id, H5F_OBJ_ALL_F, obj_count, error)
+ IF(obj_count.NE.1)THEN
+ nerrors = nerrors + 1
+ END IF
+
+ !
+ ! Close the file.
+ !
+ CALL h5fclose_f(file_id, error)
+ CALL check("h5fclose_f", error, nerrors)
+
+END SUBROUTINE pmultiple_dset_hyper_rw
diff --git a/fortran/testpar/ptest.F90 b/fortran/testpar/ptest.F90
new file mode 100644
index 0000000..2974933
--- /dev/null
+++ b/fortran/testpar/ptest.F90
@@ -0,0 +1,115 @@
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+! Copyright by The HDF Group. *
+! All rights reserved. *
+! *
+! This file is part of HDF5. The full HDF5 copyright notice, including *
+! terms governing use, modification, and redistribution, is contained in *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://www.hdfgroup.org/licenses. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+
+!
+! main program for parallel HDF5 Fortran tests
+!
+
+PROGRAM parallel_test
+ USE HDF5
+ USE MPI
+ USE TH5_MISC
+
+ IMPLICIT NONE
+
+ INTEGER :: mpierror ! MPI hdferror flag
+ INTEGER :: hdferror ! HDF hdferror flag
+ INTEGER :: ret_total_error = 0 ! number of errors in subroutine
+ INTEGER :: total_error = 0 ! sum of the number of errors
+ INTEGER :: mpi_size ! number of processes in the group of communicator
+ INTEGER :: mpi_rank ! rank of the calling process in the communicator
+ INTEGER :: length = 12000 ! length of array
+ INTEGER :: i,j, sum
+ ! use collective MPI I/O
+ LOGICAL, DIMENSION(1:2) :: do_collective = (/.FALSE.,.TRUE./)
+ CHARACTER(LEN=11), DIMENSION(1:2) :: chr_collective =(/"independent", "collective "/)
+ ! use chunking
+ LOGICAL, DIMENSION(1:2) :: do_chunk = (/.FALSE.,.TRUE./)
+ CHARACTER(LEN=10), DIMENSION(1:2) :: chr_chunk =(/"contiguous", "chunk "/)
+
+ !
+ ! initialize MPI
+ !
+ CALL mpi_init(mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_INIT *FAILED*"
+ ENDIF
+ CALL mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror )
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_COMM_RANK *FAILED* Process = ", mpi_rank
+ ENDIF
+ CALL mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror )
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank
+ ENDIF
+ !
+ ! initialize the HDF5 fortran interface
+ !
+ CALL h5open_f(hdferror)
+ !
+ ! test write/read dataset by hyperslabs (contiguous/chunk) with independent/collective MPI I/O
+ !
+ DO i = 1, 2
+ DO j = 1, 2
+ ret_total_error = 0
+ CALL hyper(length, do_collective(j), do_chunk(i), mpi_size, mpi_rank, ret_total_error)
+ IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
+ "Writing/reading dataset by hyperslabs ("//TRIM(chr_chunk(i))//" layout, "//TRIM(chr_collective(j))//" MPI I/O)", &
+ total_error)
+ ENDDO
+ ENDDO
+
+ !
+ ! test write/read several datasets (independent MPI I/O)
+ !
+ ret_total_error = 0
+ CALL multiple_dset_write(length, do_collective(1), do_chunk(1), mpi_size, mpi_rank, ret_total_error)
+ IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
+ 'Writing/reading several datasets (contiguous layout, independent MPI I/O)', total_error)
+ !
+ ! test write/read multiple hyperslab datasets
+ !
+ DO i = 1, 2
+ DO j = 1, 2
+ ret_total_error = 0
+ CALL pmultiple_dset_hyper_rw(do_collective(j), do_chunk(i), mpi_size, mpi_rank, ret_total_error)
+ IF(mpi_rank==0) CALL write_test_status(ret_total_error, &
+ "Writing/reading multiple datasets by hyperslab ("//TRIM(chr_chunk(i))//" layout, "&
+ //TRIM(chr_collective(j))//" MPI I/O)", total_error)
+ ENDDO
+ ENDDO
+ !
+ ! close HDF5 interface
+ !
+ CALL h5close_f(hdferror)
+
+ CALL MPI_ALLREDUCE(total_error, sum, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, mpierror)
+
+ !
+ ! close MPI
+ !
+ IF (total_error == 0) THEN
+ CALL mpi_finalize(mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank
+ ENDIF
+ ELSE
+ WRITE(*,*) 'Errors detected in process ', mpi_rank
+ CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank
+ ENDIF
+ ENDIF
+ !
+ ! end main program
+ !
+END PROGRAM parallel_test
diff --git a/fortran/testpar/ptest.f90 b/fortran/testpar/ptest.f90
deleted file mode 100644
index e474668..0000000
--- a/fortran/testpar/ptest.f90
+++ /dev/null
@@ -1,135 +0,0 @@
-! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-! Copyright by The HDF Group. *
-! Copyright by the Board of Trustees of the University of Illinois. *
-! All rights reserved. *
-! *
-! This file is part of HDF5. The full HDF5 copyright notice, including *
-! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
-! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-
-!//////////////////////////////////////////////////////////
-! main program for parallel HDF5 Fortran tests
-!//////////////////////////////////////////////////////////
-
-PROGRAM parallel_test
- USE hdf5
- IMPLICIT NONE
- INCLUDE 'mpif.h'
-
- INTEGER :: mpierror ! MPI hdferror flag
- INTEGER :: hdferror ! HDF hdferror flag
- LOGICAL :: do_collective ! use collective MPI I/O
- LOGICAL :: do_chunk ! use chunking
- INTEGER :: nerrors = 0 ! number of errors
- INTEGER :: mpi_size ! number of processes in the group of communicator
- INTEGER :: mpi_rank ! rank of the calling process in the communicator
- INTEGER :: length = 12000 ! length of array
-
- !//////////////////////////////////////////////////////////
- ! initialize MPI
- !//////////////////////////////////////////////////////////
-
- CALL mpi_init(mpierror)
- IF (mpierror .NE. MPI_SUCCESS) THEN
- WRITE(*,*) "MPI_INIT *FAILED*"
- ENDIF
- CALL mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror )
- IF (mpierror .NE. MPI_SUCCESS) THEN
- WRITE(*,*) "MPI_COMM_RANK *FAILED* Process = ", mpi_rank
- ENDIF
- CALL mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror )
- IF (mpierror .NE. MPI_SUCCESS) THEN
- WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank
- ENDIF
- !//////////////////////////////////////////////////////////
- ! initialize the HDF5 fortran interface
- !//////////////////////////////////////////////////////////
-
- CALL h5open_f(hdferror)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with independent MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .FALSE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with collective MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (contiguous layout, collective MPI I/O)'
-
- do_collective = .TRUE.
- do_chunk = .FALSE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with independent MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .TRUE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read dataset by hyperslabs with collective MPI I/O
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading dataset by hyperslabs (chunk layout, collective MPI I/O)'
-
- do_collective = .TRUE.
- do_chunk = .TRUE.
- CALL hyper(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
- !//////////////////////////////////////////////////////////
- ! test write/read several datasets (independent MPI I/O)
- !//////////////////////////////////////////////////////////
-
- IF (mpi_rank == 0) WRITE(*,*) 'Writing/reading several datasets (contiguous layout, independent MPI I/O)'
-
- do_collective = .FALSE.
- do_chunk = .FALSE.
- CALL multiple_dset_write(length, do_collective, do_chunk, mpi_size, mpi_rank, nerrors)
-
-
- !//////////////////////////////////////////////////////////
- ! close HDF5 interface
- !//////////////////////////////////////////////////////////
-
- CALL h5close_f(hdferror)
-
- !//////////////////////////////////////////////////////////
- ! close MPI
- !//////////////////////////////////////////////////////////
-
- IF (nerrors == 0) THEN
- CALL mpi_finalize(mpierror)
- IF (mpierror .NE. MPI_SUCCESS) THEN
- WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank
- ENDIF
- ELSE
- WRITE(*,*) 'Errors detected in process ', mpi_rank
- CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror)
- IF (mpierror .NE. MPI_SUCCESS) THEN
- WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank
- ENDIF
- ENDIF
-
- !//////////////////////////////////////////////////////////
- ! end main program
- !//////////////////////////////////////////////////////////
-
-END PROGRAM parallel_test
-
diff --git a/fortran/testpar/subfiling.F90 b/fortran/testpar/subfiling.F90
new file mode 100644
index 0000000..18614b6
--- /dev/null
+++ b/fortran/testpar/subfiling.F90
@@ -0,0 +1,401 @@
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+! Copyright by The HDF Group. *
+! All rights reserved. *
+! *
+! This file is part of HDF5. The full HDF5 copyright notice, including *
+! terms governing use, modification, and redistribution, is contained in *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://www.hdfgroup.org/licenses. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+
+!
+! main program for subfiling HDF5 Fortran tests
+!
+
+#include <H5config_f.inc>
+
+PROGRAM subfiling_test
+ USE, INTRINSIC :: ISO_C_BINDING, ONLY : C_INT64_T
+ USE HDF5
+ USE MPI
+ USE TH5_MISC
+
+ IMPLICIT NONE
+
+ INTEGER :: total_error = 0 ! sum of the number of errors
+ INTEGER :: mpierror ! MPI hdferror flag
+ INTEGER :: mpi_rank ! rank of the calling process in the communicator
+
+#ifdef H5_HAVE_SUBFILING_VFD
+
+ CHARACTER(LEN=7), PARAMETER :: filename = "subf.h5"
+
+ INTEGER :: hdferror ! HDF hdferror flag
+ INTEGER :: mpi_size, mpi_size_ret ! number of processes in the group of communicator
+ INTEGER :: required, provided
+ LOGICAL :: file_exists
+
+ INTEGER(HID_T) :: fapl_id
+ INTEGER(HID_T) :: file_id
+ INTEGER :: comm, comm_ret
+ INTEGER :: info, info_ret
+ CHARACTER(LEN=3) :: info_val
+ CHARACTER(LEN=180) :: subfname
+ INTEGER :: i, sum
+ INTEGER(C_INT64_T) inode
+ TYPE(H5FD_subfiling_config_t) :: vfd_config
+ TYPE(H5FD_ioc_config_t) :: vfd_config_ioc
+ LOGICAL :: flag
+
+ INTEGER :: nerrors = 0
+
+ INTEGER(HID_T) :: driver_id
+
+ CHARACTER(len=8) :: hex1, hex2
+
+ !
+ ! initialize MPI
+ !
+ required = MPI_THREAD_MULTIPLE
+ CALL mpi_init_thread(required, provided, mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_INIT_THREAD *FAILED*"
+ nerrors = nerrors + 1
+ ENDIF
+ IF (provided .NE. required) THEN
+ WRITE(*,*) "MPI doesn't support MPI_Init_thread with MPI_THREAD_MULTIPLE *FAILED*"
+ nerrors = nerrors + 1
+ ENDIF
+ CALL mpi_comm_rank( MPI_COMM_WORLD, mpi_rank, mpierror )
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_COMM_RANK *FAILED* Process = ", mpi_rank
+ nerrors = nerrors + 1
+ ENDIF
+ CALL mpi_comm_size( MPI_COMM_WORLD, mpi_size, mpierror )
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_COMM_SIZE *FAILED* Process = ", mpi_rank
+ nerrors = nerrors + 1
+ ENDIF
+
+ IF(nerrors.NE.0)THEN
+ IF(mpi_rank==0) CALL write_test_status(sum, &
+ 'Testing Initializing mpi_init_thread', total_error)
+ CALL MPI_Barrier(MPI_COMM_WORLD, mpierror)
+ CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror)
+ ENDIF
+
+ !
+ ! initialize the HDF5 fortran interface
+ !
+ CALL h5open_f(hdferror)
+
+ ! ***********************************
+ ! Test H5Pset/get_mpi_params_f APIs
+ ! ***********************************
+ nerrors = 0
+ IF(mpi_size.GT.2)THEN
+
+ IF (mpi_rank.LE.1)THEN
+ CALL MPI_Comm_split(MPI_COMM_WORLD, 1, mpi_rank, comm, mpierror)
+ ELSE
+ CALL MPI_Comm_split(MPI_COMM_WORLD, 0, mpi_rank, comm, mpierror)
+ ENDIF
+
+ CALL MPI_Info_create(info, mpierror)
+ CALL MPI_Info_set( info, "foo", "bar", mpierror)
+
+ IF (mpi_rank.LE.1)THEN
+
+ CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror)
+ CALL check("h5pcreate_f", hdferror, nerrors)
+
+ CALL H5Pset_mpi_params_f(fapl_id, comm, info, hdferror)
+ CALL check("H5Pset_mpi_params_f", hdferror, nerrors)
+
+ CALL H5Pget_mpi_params_f(fapl_id, comm_ret, info_ret, hdferror)
+ CALL check("H5Pget_mpi_params_f", hdferror, nerrors)
+
+ CALL mpi_comm_size(comm_ret, mpi_size_ret, mpierror)
+
+ IF(mpi_size_ret.NE.2)THEN
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Failed H5Pset_mpi_params_f and H5Pget_mpi_params_f sequence"
+ nerrors = nerrors + 1
+ ENDIF
+
+ CALL mpi_info_get(info_ret,"foo", 3, info_val, flag, mpierror)
+ IF(flag .EQV. .TRUE.)THEN
+ IF(info_val.NE."bar")THEN
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Failed H5Pset_mpi_params_f and H5Pget_mpi_params_f sequence"
+ nerrors = nerrors + 1
+ ENDIF
+ ELSE
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Failed to find info value with mpi_info_get"
+ nerrors = nerrors + 1
+ ENDIF
+ CALL h5pclose_f(fapl_id, hdferror)
+ ENDIF
+
+ CALL MPI_Comm_free(comm, mpierror)
+ CALL MPI_Info_free(info, mpierror)
+
+ ENDIF
+
+ CALL MPI_REDUCE(nerrors, sum, 1, MPI_INTEGER, MPI_SUM, 0, MPI_COMM_WORLD, mpierror)
+ IF(mpi_rank==0) CALL write_test_status(sum, &
+ 'Testing H5Pset/get_mpi_params_f', total_error)
+
+ ! *********************************************************
+ ! Setup file access property list with subfiling I/O access
+ ! *********************************************************
+
+ nerrors = 0
+ CALL h5pcreate_f(H5P_FILE_ACCESS_F, fapl_id, hdferror)
+ CALL check("h5pcreate_f", hdferror, nerrors)
+
+ CALL H5Pset_mpi_params_f(fapl_id, MPI_COMM_WORLD, MPI_INFO_NULL, hdferror)
+ CALL check("H5Pset_mpi_params_f", hdferror, nerrors)
+
+ CALL H5Pget_mpi_params_f(fapl_id, comm, info, hdferror)
+ CALL check("H5Pset_mpi_params_f", hdferror, nerrors)
+
+ CALL mpi_comm_size(comm, mpi_size_ret, mpierror)
+ IF(mpi_size_ret.NE.mpi_size)THEN
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Failed H5Pset_mpi_params_f and H5Pget_mpi_params_f sequence"
+ nerrors = nerrors + 1
+ ENDIF
+
+ IF(mpi_rank==0) CALL write_test_status(nerrors, &
+ 'Testing H5Pset/get_mpi_params_f with defaults ', total_error)
+
+ ! Verify no new enum parameters have been added in C and not updated in Fortran
+ IF( IOC_SELECTION_OPTIONS_F .NE. 4)THEN
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Mismatch between Fortran and C H5FD_subfiling_ioc_select_t definitions"
+ nerrors = nerrors + 1
+ ENDIF
+
+ IF(mpi_rank==0) CALL write_test_status(nerrors, &
+ 'Testing Subfiling FD is registered', total_error)
+
+ ! *********************************************************
+ ! Check the default subfiling parameters
+ ! *********************************************************
+ nerrors = 0
+ CALL h5pset_fapl_subfiling_f(fapl_id, hdferror)
+ CALL check("h5pset_fapl_subfiling_f", hdferror, nerrors)
+
+ CALL h5pget_driver_f(fapl_id, driver_id, hdferror)
+ CALL check("h5pget_driver_f", hdferror, nerrors)
+
+ IF( driver_id .NE. H5FD_SUBFILING_F) THEN
+ WRITE(*,*) "Wrong file driver type returned"
+ nerrors = nerrors + 1
+ ENDIF
+
+ ! *********************************************************
+ ! Check the default parameters for subfiling and ioc
+ ! *********************************************************
+
+ CALL h5pget_fapl_subfiling_f(fapl_id, vfd_config, hdferror)
+ CALL check("h5pget_fapl_subfiling_f", hdferror, nerrors)
+
+ CALL h5pset_fapl_ioc_f(vfd_config%ioc_fapl_id, hdferror)
+ CALL check("h5pset_fapl_ioc_f", hdferror, nerrors)
+
+ CALL h5pget_fapl_ioc_f(vfd_config%ioc_fapl_id, vfd_config_ioc, hdferror)
+ CALL check("h5pget_fapl_ioc_f", hdferror, nerrors)
+
+ WRITE(hex1,'(z8)') H5FD_SUBFILING_FAPL_MAGIC_F
+ WRITE(hex2,'(z8)') vfd_config%magic
+
+ IF(hex1 .NE. hex2 .OR. &
+ vfd_config%version .NE. H5FD_SUBFILING_CURR_FAPL_VERSION_F .OR. &
+ .NOT.vfd_config%require_ioc .OR. &
+ vfd_config%shared_cfg%ioc_selection .NE. SELECT_IOC_ONE_PER_NODE_F .OR. &
+ vfd_config%shared_cfg%stripe_size .NE. H5FD_SUBFILING_DEFAULT_STRIPE_SIZE_F .OR. &
+ vfd_config%shared_cfg%stripe_count .NE. H5FD_SUBFILING_DEFAULT_STRIPE_COUNT_F &
+ )THEN
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Failed h5pget_fapl_subfiling_f"
+ nerrors = nerrors + 1
+ ENDIF
+
+ IF(mpi_rank==0) CALL write_test_status(nerrors, &
+ 'Testing H5Pset/get_fapl_subfiling_f with defaults', total_error)
+
+ WRITE(hex1,'(z8)') H5FD_IOC_FAPL_MAGIC_F
+ WRITE(hex2,'(z8)') vfd_config_ioc%magic
+
+ nerrors = 0
+ IF(hex1 .NE. hex2 .OR. &
+ vfd_config_ioc%version .NE. H5FD_IOC_CURR_FAPL_VERSION_F .OR. &
+ vfd_config_ioc%thread_pool_size .NE. H5FD_IOC_DEFAULT_THREAD_POOL_SIZE_F &
+ )THEN
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Failed h5pget_fapl_ioc_f"
+ nerrors = nerrors + 1
+ ENDIF
+
+ IF(mpi_rank==0) CALL write_test_status(nerrors, &
+ 'Testing H5Pset/get_fapl_ioc_f with defaults', total_error)
+
+ ! *********************************************************
+ ! Testing creating a file with subfiling, default settings
+ ! *********************************************************
+
+ CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id)
+ CALL check("h5fcreate_f", hdferror, nerrors)
+
+ CALL h5fclose_f(file_id, hdferror)
+ CALL check("h5fclose_f", hdferror, nerrors)
+
+ IF(mpi_rank==0) CALL write_test_status(nerrors, &
+ 'Testing H5Fcreate with subfiling with default settings', total_error)
+
+ ! *********************************************************
+ ! Testing creating a file with subfiling, modified settings
+ ! *********************************************************
+
+ ! Testing modifying defaults for subfiling FD
+
+ vfd_config%magic = H5FD_SUBFILING_FAPL_MAGIC_F
+ vfd_config%version = H5FD_SUBFILING_CURR_FAPL_VERSION_F
+ vfd_config%require_ioc = .TRUE.
+ vfd_config%shared_cfg%ioc_selection = SELECT_IOC_ONE_PER_NODE_F
+ vfd_config%shared_cfg%stripe_size = 16*1024*1024
+ vfd_config%shared_cfg%stripe_count = 3
+
+ nerrors = 0
+ CALL h5pset_fapl_subfiling_f(fapl_id, hdferror, vfd_config)
+ CALL check("h5pset_fapl_ioc_f", hdferror, nerrors)
+
+ CALL h5pget_fapl_subfiling_f(fapl_id, vfd_config, hdferror)
+ CALL check("h5pget_fapl_ioc_f", hdferror, nerrors)
+
+ WRITE(hex1,'(z8)') H5FD_SUBFILING_FAPL_MAGIC_F
+ WRITE(hex2,'(z8)') vfd_config%magic
+
+ IF(hex1 .NE. hex2 .OR. &
+ vfd_config%version .NE. H5FD_SUBFILING_CURR_FAPL_VERSION_F .OR. &
+ .NOT.vfd_config%require_ioc .OR. &
+ vfd_config%shared_cfg%ioc_selection .NE. SELECT_IOC_ONE_PER_NODE_F .OR. &
+ vfd_config%shared_cfg%stripe_size .NE. 16*1024*1024 .OR. &
+ vfd_config%shared_cfg%stripe_count .NE. 3 &
+ )THEN
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Failed h5pget_fapl_subfiling_f"
+ nerrors = nerrors + 1
+ ENDIF
+
+ IF(mpi_rank==0) CALL write_test_status(nerrors, &
+ 'Testing H5Pset/get_fapl_subfiling_f with custom settings', total_error)
+
+ vfd_config_ioc%magic = H5FD_IOC_FAPL_MAGIC_F
+ vfd_config_ioc%version = H5FD_IOC_CURR_FAPL_VERSION_F
+ vfd_config_ioc%thread_pool_size = 2
+
+ nerrors = 0
+ CALL h5pset_fapl_ioc_f(vfd_config%ioc_fapl_id, hdferror, vfd_config_ioc)
+ CALL check("h5pset_fapl_ioc_f", hdferror, nerrors)
+
+ CALL h5pget_fapl_ioc_f(vfd_config%ioc_fapl_id, vfd_config_ioc, hdferror)
+ CALL check("h5pget_fapl_ioc_f", hdferror, nerrors)
+
+ IF(& !vfd_config_ioc%magic .NE. H5FD_IOC_FAPL_MAGIC_F .OR. &
+ vfd_config_ioc%version .NE. H5FD_IOC_CURR_FAPL_VERSION_F .OR. &
+ vfd_config_ioc%thread_pool_size .NE. 2 &
+ )THEN
+ IF(mpi_rank.EQ.0) &
+ WRITE(*,*) "Failed h5pget_fapl_ioc_f"
+ nerrors = nerrors + 1
+ ENDIF
+ IF(mpi_rank==0) CALL write_test_status(nerrors, &
+ 'Testing H5Pset/get_fapl_ioc_f with custom settings', total_error)
+
+ ! *********************************************************
+ ! Testing creating a file with subfiling, custom settings
+ ! *********************************************************
+
+ CALL h5fcreate_f(filename, H5F_ACC_TRUNC_F, file_id, hdferror, access_prp = fapl_id)
+ CALL check("h5pcreate_f", hdferror, nerrors)
+
+ CALL h5fclose_f(file_id, hdferror)
+ CALL check("h5fclose_f", hdferror, nerrors)
+
+ IF(mpi_rank.EQ.0)THEN
+ INQUIRE(FILE=filename, EXIST=file_exists)
+ IF(.NOT.file_exists)THEN
+ WRITE(*,"(A,A)") "Failed to find the stub subfile ",TRIM(filename)
+ nerrors = nerrors + 1
+ ENDIF
+
+ CALL EXECUTE_COMMAND_LINE("stat --format='%i' "//filename//" >> tmp_inode", EXITSTAT=i)
+ IF(i.ne.0)THEN
+ WRITE(*,"(A,A)") "Failed to stat the stub subfile ",TRIM(filename)
+ nerrors = nerrors + 1
+ ENDIF
+
+ OPEN(11,FILE="tmp_inode")
+ READ(11,*) inode
+ CLOSE(11,STATUS="delete")
+
+ DO i = 1, vfd_config%shared_cfg%stripe_count
+ WRITE(subfname,'(A,".subfile_",I0,"_",I0,"_of_",I0)') filename,inode,i,vfd_config%shared_cfg%stripe_count
+ INQUIRE(FILE=subfname, EXIST=file_exists)
+ IF(.NOT.file_exists)THEN
+ WRITE(*,"(A,A)") "Failed to create the subfile ",TRIM(subfname)
+ nerrors = nerrors + 1
+ ENDIF
+ ENDDO
+
+ ENDIF
+
+ CALL h5pclose_f(fapl_id, hdferror)
+ CALL check("h5pclose_f", hdferror, nerrors)
+
+ IF(mpi_rank==0) CALL write_test_status(nerrors, &
+ 'Testing H5Fcreate with subfiling with custom settings', total_error)
+
+ !
+ ! close HDF5 interface
+ !
+ CALL h5close_f(hdferror)
+
+ CALL MPI_ALLREDUCE(total_error, sum, 1, MPI_INTEGER, MPI_SUM, MPI_COMM_WORLD, mpierror)
+
+ !
+ ! close MPI
+ !
+ IF (sum == 0) THEN
+ CALL mpi_finalize(mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_FINALIZE *FAILED* Process = ", mpi_rank
+ ENDIF
+ ELSE
+ WRITE(*,*) 'Errors detected in process ', mpi_rank
+ CALL mpi_abort(MPI_COMM_WORLD, 1, mpierror)
+ IF (mpierror .NE. MPI_SUCCESS) THEN
+ WRITE(*,*) "MPI_ABORT *FAILED* Process = ", mpi_rank
+ ENDIF
+ ENDIF
+ !
+ ! end main program
+ !
+
+#else
+
+ CALL mpi_init(mpierror)
+ CALL mpi_comm_rank(MPI_COMM_WORLD, mpi_rank, mpierror)
+ IF(mpi_rank==0) CALL write_test_status( -1, &
+ 'Subfiling not enabled', total_error)
+ CALL mpi_finalize(mpierror)
+
+#endif
+
+END PROGRAM subfiling_test