From 49fe497201bcefcd9beb01fe8fc2175974b73799 Mon Sep 17 00:00:00 2001 From: Jerome Soumagne Date: Mon, 9 Dec 2019 12:16:52 -0600 Subject: Revert "Merge branch 'hdf5_1_12' of https://bitbucket.hdfgroup.org/scm/hdffv/hdf5 into hdf5_1_12" This reverts commit 9f9336a5bd541752f472bab4c93da8de89f862cd, reversing changes made to 437a1919e7ba60fe75a33a466d264183a0255319. --- CMakeLists.txt | 17 +- MANIFEST | 1 - bin/checkapi | 4 +- bin/checkposix | 3 +- bin/debug-ohdr | 2 +- bin/dependencies | 4 +- bin/distdep | 5 +- bin/errors | 3 +- bin/iostats | 2 +- bin/make_err | 3 +- bin/make_overflow | 3 +- bin/make_vers | 7 +- bin/runbkgprog | 3 +- bin/trace | 3 +- c++/examples/Makefile.am | 4 +- c++/examples/run-c++-ex.sh.in | 27 +- config/cmake/H5pubconf.h.in | 3 - config/cmake/scripts/HDF5config.cmake | 4 +- config/commence.am | 2 +- config/gnu-flags | 98 +- config/netbsd | 56 - configure.ac | 42 +- examples/Makefile.am | 4 +- examples/run-c-ex.sh.in | 29 +- fortran/examples/Makefile.am | 4 +- fortran/examples/run-fortran-ex.sh.in | 27 +- hl/c++/examples/Makefile.am | 4 +- hl/c++/examples/run-hlc++-ex.sh.in | 28 +- hl/examples/Makefile.am | 4 +- hl/examples/run-hlc-ex.sh.in | 27 +- hl/fortran/examples/Makefile.am | 4 +- hl/fortran/examples/run-hlfortran-ex.sh.in | 4 +- hl/src/H5DS.c | 4 - hl/test/H5srcdir_str.h.in | 2 +- java/src/hdf/hdf5lib/HDF5Constants.java | 3 - java/src/jni/h5Constants.c | 2 - src/H5Aint.c | 1 - src/H5Cdbg.c | 5 +- src/H5Cpkg.h | 2 +- src/H5Dchunk.c | 23 +- src/H5Dlayout.c | 1 - src/H5F.c | 2 +- src/H5FDlog.c | 4 +- src/H5FSsection.c | 10 +- src/H5Fpublic.h | 3 +- src/H5Fsuper.c | 1 - src/H5Fsuper_cache.c | 26 +- src/H5HFcache.c | 8 +- src/H5HFtiny.c | 2 +- src/H5HGcache.c | 8 +- src/H5Ofill.c | 1 - src/H5Ofsinfo.c | 1 - src/H5Oint.c | 2 - src/H5Opline.c | 1 - src/H5Pmapl.c | 3 +- src/H5Rpublic.h | 13 +- src/H5S.c | 1 - src/H5SL.c | 4 +- src/H5SM.c | 4 +- src/H5Shyper.c | 1 - src/H5Spoint.c | 1 - src/H5Sselect.c | 2 +- src/H5T.c | 3 +- src/H5Tprivate.h | 2 +- src/H5VLcallback.c | 32 +- src/H5VLconnector.h | 8 +- src/H5VLpassthru.c | 2 +- src/H5VLprivate.h | 4 +- src/H5private.h | 21 +- src/H5trace.c | 6 +- src/libhdf5.settings.in | 2 +- test/CMakeLists.txt | 2 - test/H5srcdir.c | 61 - test/H5srcdir.h | 39 +- test/H5srcdir_str.h.in | 2 +- test/Makefile.am | 8 +- test/accum.c | 8 +- test/cache.c | 2 +- test/cache_common.c | 2 +- test/cache_tagging.c | 2 +- test/chunk_info.c | 1 - test/del_many_dense_attrs.c | 2 +- test/dsets.c | 152 +- test/dt_arith.c | 34 +- test/fillval.c | 5 - test/h5test.c | 3 +- test/null_vol_connector.c | 6 - test/swmr_sparse_reader.c | 4 +- test/tfile.c | 1 - test/tid.c | 25 +- test/trefer.c | 20 +- test/tvlstr.c | 4 +- test/vds_swmr.h | 24 +- test/vds_swmr_common.c | 36 - testpar/CMakeLists.txt | 1 - testpar/Makefile.am | 2 +- testpar/t_2Gio.c | 4974 ----------------------- tools/lib/h5diff.c | 3 - tools/lib/h5diff_array.c | 4 - tools/lib/h5tools_utils.c | 2 +- tools/src/h5import/h5import.c | 2 +- tools/src/h5repack/h5repack_main.c | 17 +- tools/test/h5jam/testh5jam.sh.in | 4 +- tools/test/h5repack/testfiles/h5repack-help.txt | 3 +- 104 files changed, 275 insertions(+), 5832 deletions(-) delete mode 100644 config/netbsd delete mode 100644 test/H5srcdir.c delete mode 100644 test/vds_swmr_common.c delete mode 100644 testpar/t_2Gio.c diff --git a/CMakeLists.txt b/CMakeLists.txt index 7ae0833..38ec775 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -646,9 +646,9 @@ if (H5_HAVE_PARALLEL) endif () endif () -#option (DEFAULT_API_VERSION "Enable v1.14 API (v16, v18, v110, v112, v114)" "v114") -set (DEFAULT_API_VERSION "v114" CACHE STRING "Enable v1.14 API (v16, v18, v110, v112, v114)") -set_property (CACHE DEFAULT_API_VERSION PROPERTY STRINGS v16 v18 v110 v112 v114) +#option (DEFAULT_API_VERSION "Enable v1.12 API (v16, v18, v110, v112)" "v112") +set (DEFAULT_API_VERSION "v112" CACHE STRING "Enable v1.12 API (v16, v18, v110, v112)") +set_property (CACHE DEFAULT_API_VERSION PROPERTY STRINGS v16 v18 v110 v112) #----------------------------------------------------------------------------- # Option to use 1.6.x API #----------------------------------------------------------------------------- @@ -685,17 +685,6 @@ if (DEFAULT_API_VERSION MATCHES "v112") endif () #----------------------------------------------------------------------------- -# Option to use 1.14.x API -#----------------------------------------------------------------------------- -if (NOT DEFAULT_API_VERSION) - set (DEFAULT_API_VERSION "v114") -endif () -set (H5_USE_114_API_DEFAULT 0) -if (DEFAULT_API_VERSION MATCHES "v114") - set (H5_USE_114_API_DEFAULT 1) -endif () - -#----------------------------------------------------------------------------- # Include user macros #----------------------------------------------------------------------------- include (UserMacros.cmake) diff --git a/MANIFEST b/MANIFEST index 86a47a5..e1f7d9c 100644 --- a/MANIFEST +++ b/MANIFEST @@ -136,7 +136,6 @@ ./config/linux-gnulibc2 ./config/lt_vers.am ./config/Makefile.am.blank -./config/netbsd ./config/pgi-fflags ./config/pgi-flags ./config/solaris diff --git a/bin/checkapi b/bin/checkapi index f5dcacc..6882dea 100755 --- a/bin/checkapi +++ b/bin/checkapi @@ -1,4 +1,4 @@ -#!/usr/bin/env perl +#!/usr/bin/perl -w # # Copyright by The HDF Group. # Copyright by the Board of Trustees of the University of Illinois. @@ -13,8 +13,6 @@ # require 5.003; -use warnings; - # Purpose: insures that API functions aren't called internally. # Usage: checkapi H5*.c my $filename = ""; diff --git a/bin/checkposix b/bin/checkposix index 233d15c..30128e3 100755 --- a/bin/checkposix +++ b/bin/checkposix @@ -1,6 +1,5 @@ -#!/usr/bin/env perl +#!/usr/bin/perl -w require 5.003; -use warnings; # # Copyright by The HDF Group. diff --git a/bin/debug-ohdr b/bin/debug-ohdr index 1363456..5b0a4b3 100755 --- a/bin/debug-ohdr +++ b/bin/debug-ohdr @@ -1,4 +1,4 @@ -#!/usr/bin/env perl +#!/usr/bin/perl # # Copyright by The HDF Group. # Copyright by the Board of Trustees of the University of Illinois. diff --git a/bin/dependencies b/bin/dependencies index 367351a..82247da 100755 --- a/bin/dependencies +++ b/bin/dependencies @@ -1,4 +1,4 @@ -#!/usr/bin/env perl +#!/usr/bin/perl -w # # Copyright by The HDF Group. # Copyright by the Board of Trustees of the University of Illinois. @@ -11,8 +11,6 @@ # If you do not have access to either file, you may request a copy from # help@hdfgroup.org. # -use warnings; - my $depend_file; my $new_depend_file; my $srcdir; diff --git a/bin/distdep b/bin/distdep index cd310e0..4643700 100755 --- a/bin/distdep +++ b/bin/distdep @@ -1,7 +1,4 @@ -#!/bin/sh -#! -*-perl-*- -eval 'exec perl -p -x -S $0 ${1+"$@"}' - if 0; +#!/usr/bin/perl -p # # Copyright by The HDF Group. # Copyright by the Board of Trustees of the University of Illinois. diff --git a/bin/errors b/bin/errors index 107bb9c..3c99fdc 100755 --- a/bin/errors +++ b/bin/errors @@ -1,6 +1,5 @@ -#!/usr/bin/env perl +#!/usr/local/bin/perl -w require 5.003; -use warnings; use Text::Tabs; # NOTE: THE FORMAT OF HRETURN_ERROR AND HGOTO_ERROR MACROS HAS diff --git a/bin/iostats b/bin/iostats index e389992..f054b9c 100755 --- a/bin/iostats +++ b/bin/iostats @@ -1,4 +1,4 @@ -#!/usr/bin/env perl +#!/usr/bin/perl # # Copyright by The HDF Group. # Copyright by the Board of Trustees of the University of Illinois. diff --git a/bin/make_err b/bin/make_err index 7f38591..623c1b6 100755 --- a/bin/make_err +++ b/bin/make_err @@ -1,7 +1,6 @@ -#!/usr/bin/env perl +#!/usr/bin/perl -w require 5.003; $indent=4; -use warnings; # # Copyright by The HDF Group. diff --git a/bin/make_overflow b/bin/make_overflow index cee0126..ccd640e 100755 --- a/bin/make_overflow +++ b/bin/make_overflow @@ -1,7 +1,6 @@ -#!/usr/bin/env perl +#!/usr/bin/perl -w require 5.003; use strict; -use warnings; # Global settings diff --git a/bin/make_vers b/bin/make_vers index c6d2c04..4de2dbd 100755 --- a/bin/make_vers +++ b/bin/make_vers @@ -1,6 +1,5 @@ -#!/usr/bin/env perl +#!/usr/bin/perl -w require 5.003; -use warnings; # Global settings # (The max_idx parameter is the only thing that needs to be changed when adding @@ -8,8 +7,8 @@ use warnings; # is added (like support for 1.4, etc), the min_sup_idx parameter will # need to be decremented. - QAK) -# Max. library "index" (0 = v1.0, 1 = 1.2, 2 = 1.4, 3 = 1.6, 4 = 1.8, 5 = 1.10, 6 = 1.12, 7 = 1.14, etc) -$max_idx = 7; +# Max. library "index" (0 = v1.0, 1 = 1.2, 2 = 1.4, 3 = 1.6, 4 = 1.8, 5 = 1.10, 6 = 1.12, etc) +$max_idx = 6; # Min. supported previous library version "index" (0 = v1.0, 1 = 1.2, etc) $min_sup_idx = 3; diff --git a/bin/runbkgprog b/bin/runbkgprog index f04ea89..69fa2d0 100755 --- a/bin/runbkgprog +++ b/bin/runbkgprog @@ -1,6 +1,5 @@ -#!/usr/bin/env perl +#!/usr/bin/perl -w require 5.003; -use warnings; $indent=4; # diff --git a/bin/trace b/bin/trace index 34bcd3a..3cae0a4 100755 --- a/bin/trace +++ b/bin/trace @@ -1,4 +1,4 @@ -#!/usr/bin/env perl +#!/usr/bin/perl -w ## # Copyright by The HDF Group. # Copyright by the Board of Trustees of the University of Illinois. @@ -12,7 +12,6 @@ # help@hdfgroup.org. ## require 5.003; -use warnings; $Source = ""; ############################################################################## diff --git a/c++/examples/Makefile.am b/c++/examples/Makefile.am index 0648504..51ab8e3 100644 --- a/c++/examples/Makefile.am +++ b/c++/examples/Makefile.am @@ -49,8 +49,8 @@ CXX_API=yes # Where to install examples # Note: no '/' after DESTDIR. Explanation in commence.am -EXAMPLEDIR=$(examplesdir)/c++ -EXAMPLETOPDIR=$(examplesdir) +EXAMPLEDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/c++ +EXAMPLETOPDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples # How to build programs using h5c++ $(EXTRA_PROG): $(H5CPP) diff --git a/c++/examples/run-c++-ex.sh.in b/c++/examples/run-c++-ex.sh.in index 03e1eac..d975924 100644 --- a/c++/examples/run-c++-ex.sh.in +++ b/c++/examples/run-c++-ex.sh.in @@ -18,7 +18,7 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # This script will compile and run the c++ examples from source files # -# installed in @examplesdir@/c++ using h5c++. The # +# installed in .../share/hdf5_examples/c++ using h5c++. The # # order for running programs with RunTest in the MAIN section below is taken # # from the Makefile. The order is important since some of the test programs # # use data files created by earlier test programs. Any future additions should # @@ -30,32 +30,9 @@ EXIT_SUCCESS=0 EXIT_FAILURE=1 -# -# Try to derive the path to the installation $prefix established -# by ./configure relative to the examples directory established by -# ./configure. If successful, set `prefix_relto_examplesdir` to the -# relative path. Otherwise, set `prefix_relto_examplesdir` to the -# absolute installation $prefix. -# -# This script uses the value of `prefix` in the user's environment, if -# it is set, below. The content of $() is evaluated in a sub-shell, so -# if `prefix` is set in the user's environment, the shell statements in -# $() won't clobbered it. -# -prefix_relto_examplesdir=$( -prefix=@prefix@ -examplesdir=@examplesdir@ -if [ ${examplesdir##${prefix}/} != ${examplesdir} ]; then - echo $(echo ${examplesdir##${prefix}/} | \ - sed 's,[^/][^/]*,..,g') -else - echo $prefix -fi -) - # Where the tool is installed. # default is relative path to installed location of the tools -prefix="${prefix:-../${prefix_relto_examplesdir}}" +prefix="${prefix:-../../..}" AR="@AR@" RANLIB="@RANLIB@" H5TOOL="h5c++" # The tool name diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in index 0836168..120c023 100644 --- a/config/cmake/H5pubconf.h.in +++ b/config/cmake/H5pubconf.h.in @@ -720,9 +720,6 @@ /* Define using v1.12 public API symbols by default */ #cmakedefine H5_USE_112_API_DEFAULT @H5_USE_112_API_DEFAULT@ -/* Define using v1.14 public API symbols by default */ -#cmakedefine H5_USE_114_API_DEFAULT @H5_USE_114_API_DEFAULT@ - /* Define if a memory checking tool will be used on the library, to cause library to be very picky about memory operations and also disable the internal free list manager code. */ diff --git a/config/cmake/scripts/HDF5config.cmake b/config/cmake/scripts/HDF5config.cmake index 7b61cdf..7c215c7 100644 --- a/config/cmake/scripts/HDF5config.cmake +++ b/config/cmake/scripts/HDF5config.cmake @@ -42,9 +42,9 @@ set (CTEST_SOURCE_VERSEXT "-alpha0") ############################################################################## # handle input parameters to script. #BUILD_GENERATOR - which CMake generator to use, required -#INSTALLDIR - HDF5-1.13.0 root folder +#INSTALLDIR - HDF5-1.10.0 root folder #CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo -#CTEST_SOURCE_NAME - name of source folder; HDF5-1.13.0 +#CTEST_SOURCE_NAME - name of source folder; HDF5-1.10.0 #MODEL - CDash group name #HPC - run alternate configurations for HPC machines; sbatch, bsub, raybsub, qsub #MPI - enable MPI diff --git a/config/commence.am b/config/commence.am index 830c494..a16eee5 100644 --- a/config/commence.am +++ b/config/commence.am @@ -70,7 +70,7 @@ H5CPP=${DESTDIR}$(bindir)/h5c++ # instead of CFLAGS, as CFLAGS is reserved solely for the user to define. # This applies to FCFLAGS, CXXFLAGS, CPPFLAGS, and LDFLAGS as well. -AM_CFLAGS=@AM_CFLAGS@ @H5_CFLAGS@ @H5_ECFLAGS@ +AM_CFLAGS=@AM_CFLAGS@ @H5_CFLAGS@ AM_FCFLAGS=@AM_FCFLAGS@ @H5_FCFLAGS@ AM_CXXFLAGS=@AM_CXXFLAGS@ @H5_CXXFLAGS@ AM_CPPFLAGS=@AM_CPPFLAGS@ @H5_CPPFLAGS@ diff --git a/config/gnu-flags b/config/gnu-flags index bc120a8..6355ccf 100644 --- a/config/gnu-flags +++ b/config/gnu-flags @@ -168,44 +168,12 @@ if test "X-gcc" = "X-$cc_vendor"; then # NOTE: Disable the -Wformat-nonliteral from -Wformat=2 here and re-add # it to the developer flags. # - H5_CFLAGS="$H5_CFLAGS -pedantic -Wall -Wextra" - H5_ECFLAGS="$H5_ECFLAGS -Werror=bad-function-cast" - H5_ECFLAGS="$H5_ECFLAGS -Werror=cast-align" - H5_CFLAGS="$H5_CFLAGS -Wcast-qual -Wconversion" - H5_ECFLAGS="$H5_ECFLAGS -Werror=declaration-after-statement" - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wdisabled-optimization" - H5_CFLAGS="$H5_CFLAGS -Wfloat-equal" - H5_CFLAGS="$H5_CFLAGS -Wformat=2 -Wno-format-nonliteral -Winit-self -Winvalid-pch" - H5_ECFLAGS="$H5_ECFLAGS -Werror=missing-declarations" - H5_CFLAGS="$H5_CFLAGS -Wmissing-include-dirs" - H5_ECFLAGS="$H5_ECFLAGS -Werror=missing-prototypes" - H5_ECFLAGS="$H5_ECFLAGS -Werror=nested-externs" - H5_ECFLAGS="$H5_ECFLAGS -Werror=old-style-definition" - H5_ECFLAGS="$H5_ECFLAGS -Werror=packed" - H5_ECFLAGS="$H5_ECFLAGS -Werror=redundant-decls" - H5_ECFLAGS="$H5_ECFLAGS -Werror=shadow" - H5_ECFLAGS="$H5_ECFLAGS -Werror=strict-prototypes" - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wswitch-default" - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wswitch-enum" - H5_CFLAGS="$H5_CFLAGS -Wundef" - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wunused-macros" - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wunsafe-loop-optimizations" - H5_CFLAGS="$H5_CFLAGS -Wwrite-strings" - - # - # HDF5 code should not trigger the following warnings under any - # circumstances, so ask the compiler to treat them as errors: - # -# H5_ECFLAGS="$H5_ECFLAGS -Werror=discarded-qualifiers" - H5_ECFLAGS="$H5_ECFLAGS -Werror=implicit-function-declaration" - H5_CFLAGS="$H5_CFLAGS -Wmaybe-uninitialized" - H5_ECFLAGS="$H5_ECFLAGS -Werror=pointer-sign" - H5_ECFLAGS="$H5_ECFLAGS -Werror=pointer-to-int-cast" - H5_ECFLAGS="$H5_ECFLAGS -Werror=switch" - H5_ECFLAGS="$H5_ECFLAGS -Werror=unused-but-set-variable" - H5_ECFLAGS="$H5_ECFLAGS -Werror=unused-function" - H5_ECFLAGS="$H5_ECFLAGS -Werror=unused-parameter" - H5_ECFLAGS="$H5_ECFLAGS -Werror=unused-variable" + H5_CFLAGS="$H5_CFLAGS -pedantic -Wall -Wextra -Wbad-function-cast -Wc++-compat -Wcast-align" + H5_CFLAGS="$H5_CFLAGS -Wcast-qual -Wconversion -Wdeclaration-after-statement -Wdisabled-optimization -Wfloat-equal" + H5_CFLAGS="$H5_CFLAGS -Wformat=2 -Wno-format-nonliteral -Winit-self -Winvalid-pch -Wmissing-declarations -Wmissing-include-dirs" + H5_CFLAGS="$H5_CFLAGS -Wmissing-prototypes -Wnested-externs -Wold-style-definition -Wpacked" + H5_CFLAGS="$H5_CFLAGS -Wredundant-decls -Wshadow -Wstrict-prototypes -Wswitch-enum -Wswitch-default" + H5_CFLAGS="$H5_CFLAGS -Wundef -Wunused-macros -Wunsafe-loop-optimizations -Wwrite-strings" ###################### # Developer warnings # @@ -254,34 +222,17 @@ if test "X-gcc" = "X-$cc_vendor"; then # gcc 4.3 if test $cc_vers_major -ge 5 -o $cc_vers_major -eq 4 -a $cc_vers_minor -ge 3; then - H5_CFLAGS="$H5_CFLAGS -Wlogical-op" - # - # Lots of noise, questionable benefit: - # - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wlarger-than=2560" - # + H5_CFLAGS="$H5_CFLAGS -Wlogical-op -Wlarger-than=2560" fi # gcc 4.4 if test $cc_vers_major -ge 5 -o $cc_vers_major -eq 4 -a $cc_vers_minor -ge 4; then - H5_CFLAGS="$H5_CFLAGS -Wsync-nand -Wpacked-bitfield-compat" - # - # Lots of noise, questionable benefit: - # - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wframe-larger-than=16384" - # + H5_CFLAGS="$H5_CFLAGS -Wsync-nand -Wframe-larger-than=16384 -Wpacked-bitfield-compat" fi # gcc 4.5 if test $cc_vers_major -ge 5 -o $cc_vers_major -eq 4 -a $cc_vers_minor -ge 5; then - H5_CFLAGS="$H5_CFLAGS -Wstrict-overflow=5 -Wunsuffixed-float-constants" - # - # -Wjump-misses-init makes lots of noise for a questionable benefit. - # Can jumping over an initialization in C cause any harm, if - # the variable is never *used* before it has been initialized? - # - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wjump-misses-init" - # + H5_CFLAGS="$H5_CFLAGS -Wstrict-overflow=5 -Wjump-misses-init -Wunsuffixed-float-constants" fi # gcc 4.6 @@ -293,17 +244,7 @@ if test "X-gcc" = "X-$cc_vendor"; then # gcc 4.7 if test $cc_vers_major -ge 5 -o $cc_vers_major -eq 4 -a $cc_vers_minor -ge 7; then - # - # It's not clear that -Wvector-operation-performance warnings are - # actionable. - # - # -Wstack-usage=8192 warnings need better justification; - # if justifiable, should be enabled on a branch and swept up there - # before burdening the whole development team. - # - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wstack-usage=8192" - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wvector-operation-performance" - + H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance" DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn" NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=pure -Wno-suggest-attribute=noreturn" fi @@ -322,21 +263,11 @@ if test "X-gcc" = "X-$cc_vendor"; then # gcc 5 if test $cc_vers_major -ge 5; then H5_CFLAGS="$H5_CFLAGS -Warray-bounds=2 -Wc99-c11-compat" - H5_ECFLAGS="$H5_ECFLAGS -Werror=incompatible-pointer-types" fi # gcc 6 if test $cc_vers_major -ge 6; then - H5_CFLAGS="$H5_CFLAGS -Wunused-const-variable -Whsa -Wnormalized" - # - # Unacceptably noisy on HDF5 right now. - # - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wnull-dereference" - # - # Careful! -Wduplicated-cond, combined with HDF5's heavy use of - # macros, can make a lot of noise. - # - DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wduplicated-cond" + H5_CFLAGS="$H5_CFLAGS -Wnull-dereference -Wunused-const-variable -Wduplicated-cond -Whsa -Wnormalized" fi # gcc 7 @@ -347,13 +278,6 @@ if test "X-gcc" = "X-$cc_vendor"; then # gcc 8 if test $cc_vers_major -ge 8; then - # For GCC 8, promote maybe-initialized warnings to an error. GCC 8 - # reports 0 maybe-uninitialized warnings where earlier versions - # make many false reports. GCC 8 seems to analyze calls to static - # in order to detect initializations that occur there. It's possible - # that GCC 8 only performs that analysis at -O3, though. - H5_ECFLAGS="$H5_ECFLAGS -Werror=maybe-uninitialized" - H5_ECFLAGS="$H5_ECFLAGS -Werror=cast-function-type" DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wstringop-overflow=4 -Wsuggest-attribute=cold -Wsuggest-attribute=malloc" NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=cold -Wno-suggest-attribute=malloc" H5_CFLAGS="$H5_CFLAGS -Wattribute-alias -Wcast-align=strict -Wshift-overflow=2" diff --git a/config/netbsd b/config/netbsd deleted file mode 100644 index 9a9348b..0000000 --- a/config/netbsd +++ /dev/null @@ -1,56 +0,0 @@ -# -*- shell-script -*- -# -# Copyright by The HDF Group. -# Copyright by the Board of Trustees of the University of Illinois. -# All rights reserved. -# -# This file is part of HDF5. The full HDF5 copyright notice, including -# terms governing use, modification, and redistribution, is contained in -# the COPYING file, which can be found at the root of the source code -# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. -# If you do not have access to either file, you may request a copy from -# help@hdfgroup.org. - - -# This file is part of the HDF5 build script. It is processed shortly -# after configure starts and defines, among other things, flags for -# the various compile modes. -# -# See BlankForm in this directory for details. - -# The default compiler is `gcc' -if test "X-" = "X-$CC"; then - CC=gcc - CC_BASENAME=gcc -fi - -# Figure out C compiler flags -. $srcdir/config/gnu-flags - -# Figure out Intel C compiler flags -. $srcdir/config/intel-flags - -# The default Fortran 90 compiler -if test "X-" = "X-$FC"; then - case $CC_BASENAME in - gcc*|pgcc*) - FC=gfortran - FC_BASENAME=gfortran - ;; - icc*) - FC=ifort - FC_BASENAME=ifort - ;; - mpicc*) - FC=mpif90 - FC_BASENAME=mpif90 - ;; - esac -fi - -# Figure out FORTRAN compiler flags -. $srcdir/config/gnu-fflags - -# Figure out Intel F90 compiler flags -. $srcdir/config/intel-fflags - diff --git a/configure.ac b/configure.ac index edb6de3..9898005 100644 --- a/configure.ac +++ b/configure.ac @@ -103,11 +103,7 @@ AC_SUBST([AR_FLAGS]) ## H5_CFLAGS (and company) are for CFLAGS that should be used on HDF5, but ## not exported to h5cc (or h5fc, etc.) -## -## H5_ECFLAGS is for warnings that should be treated as errors. -## AC_SUBST([H5_CFLAGS]) -AC_SUBST([H5_ECFLAGS]) AC_SUBST([H5_CPPFLAGS]) AC_SUBST([H5_FCFLAGS]) AC_SUBST([H5_CXXFLAGS]) @@ -236,9 +232,6 @@ case $host_os in freebsd*) host_os_novers=freebsd ;; - netbsd*) - host_os_novers=netbsd - ;; solaris*) host_os_novers=solaris ;; @@ -2971,28 +2964,6 @@ fi AC_CACHE_SAVE ## ---------------------------------------------------------------------- -## Use custom examples path. -## -AC_MSG_CHECKING([for custom examples path definition]) -AC_ARG_WITH([examplesdir], - [AS_HELP_STRING([--with-examplesdir=location], - [Specify path for examples - [default="DATAROOTDIR/hdf5_examples"]])],, - withval="${datarootdir}/hdf5_examples") - -if test "X$withval" = "X"; then - AC_MSG_RESULT([default]) - examplesdir="${datarootdir}/hdf5_examples" -else - AC_MSG_RESULT([$withval]) - examplesdir=$withval -fi - -AC_SUBST([examplesdir]) -AC_DEFINE_UNQUOTED([EXAMPLESDIR], ["$examplesdir"], - [Define the examples directory]) - -## ---------------------------------------------------------------------- ## Enable custom plugin default path for library. It requires SHARED support. ## AC_MSG_CHECKING([for custom plugin default path definition]) @@ -3419,10 +3390,10 @@ esac AC_SUBST([DEFAULT_API_VERSION]) AC_MSG_CHECKING([which version of public symbols to use by default]) AC_ARG_WITH([default-api-version], - [AS_HELP_STRING([--with-default-api-version=(v16|v18|v110|v112|v114)], + [AS_HELP_STRING([--with-default-api-version=(v16|v18|v110|v112)], [Specify default release version of public symbols - [default=v114]])],, - [withval=v114]) + [default=v112]])],, + [withval=v112]) if test "X$withval" = "Xv16"; then AC_MSG_RESULT([v16]) @@ -3444,11 +3415,6 @@ elif test "X$withval" = "Xv112"; then DEFAULT_API_VERSION=v112 AC_DEFINE([USE_112_API_DEFAULT], [1], [Define using v1.12 public API symbols by default]) -elif test "X$withval" = "Xv114"; then - AC_MSG_RESULT([v114]) - DEFAULT_API_VERSION=v114 - AC_DEFINE([USE_114_API_DEFAULT], [1], - [Define using v1.14 public API symbols by default]) else AC_MSG_ERROR([invalid version of public symbols given]) fi @@ -3458,7 +3424,7 @@ fi ## if the user insists on doing this via the --enable-unsupported configure ## flag, we'll let them. if test "X${ALLOW_UNSUPPORTED}" != "Xyes"; then - if test "X${DEFAULT_API_VERSION}" != "Xv114" -a "X${DEPRECATED_SYMBOLS}" = "Xno" ; then + if test "X${DEFAULT_API_VERSION}" != "Xv112" -a "X${DEPRECATED_SYMBOLS}" = "Xno" ; then AC_MSG_ERROR([Removing old public API symbols not allowed when using them as default public API symbols. Use --enable-unsupported to override this error.]) fi fi diff --git a/examples/Makefile.am b/examples/Makefile.am index 5b428cd..131842c 100644 --- a/examples/Makefile.am +++ b/examples/Makefile.am @@ -86,8 +86,8 @@ CHECK_CLEANFILES+=$(EXTLINK_DIRS) # Example directory # Note: no '/' after DESTDIR. Explanation in commence.am -EXAMPLEDIR=$(examplesdir)/c -EXAMPLETOPDIR=$(examplesdir) +EXAMPLEDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/c +EXAMPLETOPDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples # List dependencies for each program. Normally, automake would take # care of this for us, but if we tell automake about the programs it diff --git a/examples/run-c-ex.sh.in b/examples/run-c-ex.sh.in index 90d5c6a..a70117f 100644 --- a/examples/run-c-ex.sh.in +++ b/examples/run-c-ex.sh.in @@ -18,7 +18,7 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # This script will compile and run the c examples from source files installed # -# in @examplesdir@/c using h5cc or h5pc. The order for running # +# in .../share/hdf5_examples/c using h5cc or h5pc. The order for running # # programs with RunTest in the MAIN section below is taken from the Makefile. # # The order is important since some of the test programs use data files created # # by earlier test programs. Any future additions should be placed accordingly. # @@ -28,33 +28,10 @@ # Initializations EXIT_SUCCESS=0 EXIT_FAILURE=1 - -# -# Try to derive the path to the installation $prefix established -# by ./configure relative to the examples directory established by -# ./configure. If successful, set `prefix_relto_examplesdir` to the -# relative path. Otherwise, set `prefix_relto_examplesdir` to the -# absolute installation $prefix. -# -# This script uses the value of `prefix` in the user's environment, if -# it is set, below. The content of $() is evaluated in a sub-shell, so -# if `prefix` is set in the user's environment, the shell statements in -# $() won't clobbered it. -# -prefix_relto_examplesdir=$( -prefix=@prefix@ -examplesdir=@examplesdir@ -if [ ${examplesdir##${prefix}/} != ${examplesdir} ]; then - echo $(echo ${examplesdir##${prefix}/} | \ - sed 's,[^/][^/]*,..,g') -else - echo $prefix -fi -) - + # Where the tool is installed. # default is relative path to installed location of the tools -prefix="${prefix:-../${prefix_relto_examplesdir}}" +prefix="${prefix:-../../..}" PARALLEL=@PARALLEL@ # Am I in parallel mode? AR="@AR@" RANLIB="@RANLIB@" diff --git a/fortran/examples/Makefile.am b/fortran/examples/Makefile.am index fb510bc..6bf2edb 100644 --- a/fortran/examples/Makefile.am +++ b/fortran/examples/Makefile.am @@ -74,8 +74,8 @@ endif # Tell automake how to install examples # Note: no '/' after DESTDIR. Explanation in commence.am -EXAMPLEDIR=$(examplesdir)/fortran -EXAMPLETOPDIR=$(examplesdir) +EXAMPLEDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/fortran +EXAMPLETOPDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples # List dependencies for each example. Normally, automake would take # care of this for us, but if we tell automake about the programs it diff --git a/fortran/examples/run-fortran-ex.sh.in b/fortran/examples/run-fortran-ex.sh.in index 81e54ea..aa17f89 100644 --- a/fortran/examples/run-fortran-ex.sh.in +++ b/fortran/examples/run-fortran-ex.sh.in @@ -18,7 +18,7 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # This script will compile and run the fortran examples from source files # -# installed in @examplesdir@/fortran using h5fc or h5pfc. The # +# installed in .../share/hdf5_examples/fortran using h5fc or h5pfc. The # # order for running programs with RunTest in the MAIN section below is taken # # from the Makefile. The order is important since some of the test programs # # use data files created by earlier test programs. Any future additions should # @@ -30,32 +30,9 @@ EXIT_SUCCESS=0 EXIT_FAILURE=1 -# -# Try to derive the path to the installation $prefix established -# by ./configure relative to the examples directory established by -# ./configure. If successful, set `prefix_relto_examplesdir` to the -# relative path. Otherwise, set `prefix_relto_examplesdir` to the -# absolute installation $prefix. -# -# This script uses the value of `prefix` in the user's environment, if -# it is set, below. The content of $() is evaluated in a sub-shell, so -# if `prefix` is set in the user's environment, the shell statements in -# $() won't clobbered it. -# -prefix_relto_examplesdir=$( -prefix=@prefix@ -examplesdir=@examplesdir@ -if [ ${examplesdir##${prefix}/} != ${examplesdir} ]; then - echo $(echo ${examplesdir##${prefix}/} | \ - sed 's,[^/][^/]*,..,g') -else - echo $prefix -fi -) - # Where the tool is installed. # default is relative path to installed location of the tools -prefix="${prefix:-../${prefix_relto_examplesdir}}" +prefix="${prefix:-../../..}" PARALLEL=@PARALLEL@ # Am I in parallel mode? AR="@AR@" RANLIB="@RANLIB@" diff --git a/hl/c++/examples/Makefile.am b/hl/c++/examples/Makefile.am index 592e8da..ce719f5 100644 --- a/hl/c++/examples/Makefile.am +++ b/hl/c++/examples/Makefile.am @@ -33,8 +33,8 @@ CXX_API=yes # Where to install examples # Note: no '/' after DESTDIR. Explanation in commence.am -EXAMPLEDIR=$(examplesdir)/hl/c++ -EXAMPLETOPDIR=$(examplesdir)/hl +EXAMPLEDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/hl/c++ +EXAMPLETOPDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/hl # How to build programs using h5c++ $(EXTRA_PROG): $(H5CPP) diff --git a/hl/c++/examples/run-hlc++-ex.sh.in b/hl/c++/examples/run-hlc++-ex.sh.in index 43831f5..eb688a1 100644 --- a/hl/c++/examples/run-hlc++-ex.sh.in +++ b/hl/c++/examples/run-hlc++-ex.sh.in @@ -18,7 +18,7 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # This script will compile and run the c++ examples from source files # -# installed in @examplesdir@/hl/c++ using h5c++. The # +# installed in .../share/hdf5_examples/hl/c++ using h5c++. The # # order for running programs with RunTest in the MAIN section below is taken # # from the Makefile. The order is important since some of the test programs # # use data files created by earlier test programs. Any future additions should # @@ -29,33 +29,9 @@ # Initializations EXIT_SUCCESS=0 EXIT_FAILURE=1 - -# -# Try to derive the path to the installation $prefix established -# by ./configure relative to the examples directory established by -# ./configure. If successful, set `prefix_relto_examplesdir` to the -# relative path. Otherwise, set `prefix_relto_examplesdir` to the -# absolute installation $prefix. -# -# This script uses the value of `prefix` in the user's environment, if -# it is set, below. The content of $() is evaluated in a sub-shell, so -# if `prefix` is set in the user's environment, the shell statements in -# $() won't clobbered it. -# -prefix_relto_examplesdir=$( -prefix=@prefix@ -examplesdir=@examplesdir@ -if [ ${examplesdir##${prefix}/} != ${examplesdir} ]; then - echo $(echo ${examplesdir##${prefix}/} | \ - sed 's,[^/][^/]*,..,g') -else - echo $prefix -fi -) - # Where the tool is installed. # default is relative path to installed location of the tools -prefix="${prefix:-../../${prefix_relto_examplesdir}}" +prefix="${prefix:-../../../..}" AR="@AR@" RANLIB="@RANLIB@" H5TOOL="h5c++" # The tool name diff --git a/hl/examples/Makefile.am b/hl/examples/Makefile.am index cc2d671..29e1a48 100644 --- a/hl/examples/Makefile.am +++ b/hl/examples/Makefile.am @@ -25,8 +25,8 @@ endif # Example directory # Note: no '/' after DESTDIR. Explanation in commence.am -EXAMPLEDIR=$(examplesdir)/hl/c -EXAMPLETOPDIR=$(examplesdir)/hl +EXAMPLEDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/hl/c +EXAMPLETOPDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/hl INSTALL_SCRIPT_FILES = run-hlc-ex.sh INSTALL_TOP_SCRIPT_FILES = run-hl-ex.sh diff --git a/hl/examples/run-hlc-ex.sh.in b/hl/examples/run-hlc-ex.sh.in index e6d0cc9..f51b165 100644 --- a/hl/examples/run-hlc-ex.sh.in +++ b/hl/examples/run-hlc-ex.sh.in @@ -18,7 +18,7 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # This script will compile and run the c examples from source files installed # -# in @examplesdir@/hl/c using h5cc or h5pc. The order for running # +# in .../share/hdf5_examples/hl/c using h5cc or h5pc. The order for running # # programs with RunTest in the MAIN section below is taken from the Makefile. # # The order is important since some of the test programs use data files created # # by earlier test programs. Any future additions should be placed accordingly. # @@ -29,32 +29,9 @@ EXIT_SUCCESS=0 EXIT_FAILURE=1 -# -# Try to derive the path to the installation $prefix established -# by ./configure relative to the examples directory established by -# ./configure. If successful, set `prefix_relto_examplesdir` to the -# relative path. Otherwise, set `prefix_relto_examplesdir` to the -# absolute installation $prefix. -# -# This script uses the value of `prefix` in the user's environment, if -# it is set, below. The content of $() is evaluated in a sub-shell, so -# if `prefix` is set in the user's environment, the shell statements in -# $() won't clobbered it. -# -prefix_relto_examplesdir=$( -prefix=@prefix@ -examplesdir=@examplesdir@ -if [ ${examplesdir##${prefix}/} != ${examplesdir} ]; then - echo $(echo ${examplesdir##${prefix}/} | \ - sed 's,[^/][^/]*,..,g') -else - echo $prefix -fi -) - # Where the tool is installed. # default is relative path to installed location of the tools -prefix="${prefix:-../../${prefix_relto_examplesdir}}" +prefix="${prefix:-../../../..}" PARALLEL=@PARALLEL@ # Am I in parallel mode? AR="@AR@" RANLIB="@RANLIB@" diff --git a/hl/fortran/examples/Makefile.am b/hl/fortran/examples/Makefile.am index b81cc6f..d383f9a 100644 --- a/hl/fortran/examples/Makefile.am +++ b/hl/fortran/examples/Makefile.am @@ -51,8 +51,8 @@ endif # Tell automake how to install examples # Note: no '/' after DESTDIR. Explanation in commence.am -EXAMPLEDIR=$(examplesdir)/hl/fortran -EXAMPLETOPDIR=$(examplesdir)/hl +EXAMPLEDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/hl/fortran +EXAMPLETOPDIR=${DESTDIR}$(exec_prefix)/share/hdf5_examples/hl # List dependencies for each example. Normally, automake would take # care of this for us, but if we tell automake about the programs it diff --git a/hl/fortran/examples/run-hlfortran-ex.sh.in b/hl/fortran/examples/run-hlfortran-ex.sh.in index d7de8e3..5f12ef0 100644 --- a/hl/fortran/examples/run-hlfortran-ex.sh.in +++ b/hl/fortran/examples/run-hlfortran-ex.sh.in @@ -18,7 +18,7 @@ # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # This script will compile and run the fortran examples from source files # -# installed in @examplesdir@/hl/fortran using h5fc or h5pfc. The # +# installed in .../share/hdf5_examples/hl/fortran using h5fc or h5pfc. The # # order for running programs with RunTest in the MAIN section below is taken # # from the Makefile. The order is important since some of the test programs # # use data files created by earlier test programs. Any future additions should # @@ -32,7 +32,7 @@ EXIT_FAILURE=1 # Where the tool is installed. # default is relative path to installed location of the tools -prefix="${prefix:-@prefix@}" +prefix="${prefix:-../../../..}" PARALLEL=@PARALLEL@ # Am I in parallel mode? AR="@AR@" RANLIB="@RANLIB@" diff --git a/hl/src/H5DS.c b/hl/src/H5DS.c index ce61028..b24f887 100644 --- a/hl/src/H5DS.c +++ b/hl/src/H5DS.c @@ -1434,10 +1434,6 @@ herr_t H5DSset_label(hid_t did, unsigned int idx, const char *label) char ** buf; /* discarding the 'const' qualifier in the free */ char const ** const_buf; /* buf calls */ } u; - - u.buf = NULL; - u.const_buf = NULL; - /*------------------------------------------------------------------------- * parameter checking *------------------------------------------------------------------------- diff --git a/hl/test/H5srcdir_str.h.in b/hl/test/H5srcdir_str.h.in index ba30a88..bab1df3 100644 --- a/hl/test/H5srcdir_str.h.in +++ b/hl/test/H5srcdir_str.h.in @@ -16,5 +16,5 @@ */ /* Set the 'srcdir' path from configure time */ -#define config_srcdir "@srcdir@" +static const char *config_srcdir = "@srcdir@"; diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java index f5be38d..2e80f2e 100644 --- a/java/src/hdf/hdf5lib/HDF5Constants.java +++ b/java/src/hdf/hdf5lib/HDF5Constants.java @@ -218,7 +218,6 @@ public class HDF5Constants { public static final int H5F_LIBVER_V18 = H5F_LIBVER_V18(); public static final int H5F_LIBVER_V110 = H5F_LIBVER_V110(); public static final int H5F_LIBVER_V112 = H5F_LIBVER_V112(); - public static final int H5F_LIBVER_V114 = H5F_LIBVER_V114(); public static final int H5F_LIBVER_NBOUNDS = H5F_LIBVER_NBOUNDS(); public static final int H5F_LIBVER_LATEST = H5F_LIBVER_LATEST(); public static final int H5F_OBJ_ALL = H5F_OBJ_ALL(); @@ -1060,8 +1059,6 @@ public class HDF5Constants { private static native final int H5F_LIBVER_V112(); - private static native final int H5F_LIBVER_V114(); - private static native final int H5F_LIBVER_NBOUNDS(); private static native final int H5F_LIBVER_LATEST(); diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c index 69adebd..9f52b3c 100644 --- a/java/src/jni/h5Constants.c +++ b/java/src/jni/h5Constants.c @@ -405,8 +405,6 @@ Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1V110(JNIEnv *env, jclass cls){return JNIEXPORT jint JNICALL Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1V112(JNIEnv *env, jclass cls){return H5F_LIBVER_V112;} JNIEXPORT jint JNICALL -Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1V114(JNIEnv *env, jclass cls){return H5F_LIBVER_V114;} -JNIEXPORT jint JNICALL Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1NBOUNDS(JNIEnv *env, jclass cls){return H5F_LIBVER_NBOUNDS;} JNIEXPORT jint JNICALL Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1LATEST(JNIEnv *env, jclass cls){return H5F_LIBVER_LATEST;} diff --git a/src/H5Aint.c b/src/H5Aint.c index 436fced..67f914f 100644 --- a/src/H5Aint.c +++ b/src/H5Aint.c @@ -109,7 +109,6 @@ const unsigned H5O_attr_ver_bounds[] = { H5O_ATTR_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5O_ATTR_VERSION_3, /* H5F_LIBVER_V18 */ H5O_ATTR_VERSION_3, /* H5F_LIBVER_V110 */ - H5O_ATTR_VERSION_3, /* H5F_LIBVER_V112 */ H5O_ATTR_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c index cb1d0e2..1f55e86 100644 --- a/src/H5Cdbg.c +++ b/src/H5Cdbg.c @@ -319,8 +319,9 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn) (int)(entry_ptr->is_dirty), entry_ptr->type->name); - HDfprintf(stdout, " node_ptr = %p, item = %p\n", - node_ptr, H5SL_item(node_ptr)); + HDfprintf(stdout, " node_ptr = 0x%llx, item = %p\n", + (unsigned long long)node_ptr, + H5SL_item(node_ptr)); /* increment node_ptr before we delete its target */ node_ptr = H5SL_next(node_ptr); diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h index b8648f0..9156c0d 100644 --- a/src/H5Cpkg.h +++ b/src/H5Cpkg.h @@ -4694,7 +4694,7 @@ struct H5C_t { uint32_t num_last_entries; #if H5C_DO_SANITY_CHECKS int32_t slist_len_increase; - int64_t slist_size_increase; + ssize_t slist_size_increase; #endif /* H5C_DO_SANITY_CHECKS */ /* Fields for maintaining list of tagged entries */ diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c index 46f356b..381ca4a 100644 --- a/src/H5Dchunk.c +++ b/src/H5Dchunk.c @@ -284,8 +284,7 @@ static int H5D__chunk_format_convert_cb(const H5D_chunk_rec_t *chunk_rec, void * static herr_t H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims, const hsize_t *curr_dims, const hsize_t *max_dims); static void *H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline); -static void *H5D__chunk_mem_xfree(void *chk, void *pline); -static void H5D__chunk_mem_xfree_wrapper(void *chk, void *pline); +static void *H5D__chunk_mem_xfree(void *chk, const void *pline); static void *H5D__chunk_mem_realloc(void *chk, size_t size, const H5O_pline_t *pline); static herr_t H5D__chunk_cinfo_cache_reset(H5D_chunk_cached_t *last); @@ -1104,8 +1103,11 @@ H5D__chunk_io_init(const H5D_io_info_t *io_info, const H5D_type_info_t *type_inf H5S_t *tmp_mspace = NULL; /* Temporary memory dataspace */ hssize_t old_offset[H5O_LAYOUT_NDIMS]; /* Old selection offset */ htri_t file_space_normalized = FALSE; /* File dataspace was normalized */ + H5T_t *file_type = NULL; /* Temporary copy of file datatype for iteration */ + hbool_t iter_init = FALSE; /* Selection iteration info has been initialized */ unsigned f_ndims; /* The number of dimensions of the file's dataspace */ int sm_ndims; /* The number of dimensions of the memory buffer's dataspace (signed) */ + char bogus; /* "bogus" buffer to pass to selection iterator */ unsigned u; /* Local index variable */ herr_t ret_value = SUCCEED; /* Return value */ @@ -1429,7 +1431,7 @@ H5D__chunk_mem_alloc(size_t size, const H5O_pline_t *pline) *------------------------------------------------------------------------- */ static void * -H5D__chunk_mem_xfree(void *chk, void *_pline) +H5D__chunk_mem_xfree(void *chk, const void *_pline) { const H5O_pline_t *pline = (const H5O_pline_t *)_pline; @@ -1445,17 +1447,6 @@ H5D__chunk_mem_xfree(void *chk, void *_pline) FUNC_LEAVE_NOAPI(NULL) } /* H5D__chunk_mem_xfree() */ -/* H5D__chunk_mem_xfree_wrapper() safely adapts the type of - * H5D__chunk_mem_xfree() to an H5MM_free_t callback, without making - * compilers warn. It is used with H5D__chunk_mem_xfree_wrapper(), for - * example. - */ -static void -H5D__chunk_mem_xfree_wrapper(void *chk, void *_pline) -{ - (void)H5D__chunk_mem_xfree(chk, _pline); -} - /*------------------------------------------------------------------------- * Function: H5D__chunk_mem_realloc @@ -4459,7 +4450,7 @@ H5D__chunk_allocate(const H5D_io_info_t *io_info, hbool_t full_overwrite, hsize_ /* (delay allocating fill buffer for VL datatypes until refilling) */ /* (casting away const OK - QAK) */ if(H5D__fill_init(&fb_info, NULL, (H5MM_allocate_t)H5D__chunk_mem_alloc, - (void *)pline, H5D__chunk_mem_xfree_wrapper, (void *)pline, + (void *)pline, (H5MM_free_t)H5D__chunk_mem_xfree, (void *)pline, &dset->shared->dcpl_cache.fill, dset->shared->type, dset->shared->type_id, (size_t)0, orig_chunk_size) < 0) HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "can't initialize fill buffer info") @@ -7161,6 +7152,7 @@ H5D__get_num_chunks(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_ hsize_t num_chunks = 0; /* Number of written chunks */ H5D_rdcc_ent_t *ent; /* Cache entry */ const H5D_rdcc_t *rdcc = NULL; /* Raw data chunk cache */ + const H5O_layout_t *layout; /* Dataset layout */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_TAG(dset->oloc.addr) @@ -7170,6 +7162,7 @@ H5D__get_num_chunks(const H5D_t *dset, const H5S_t H5_ATTR_UNUSED *space, hsize_ HDassert(space); HDassert(nchunks); + layout = &(dset->shared->layout); /* Dataset layout */ rdcc = &(dset->shared->cache.chunk); /* raw data chunk cache */ HDassert(rdcc); diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c index b882578..c71cdc4 100644 --- a/src/H5Dlayout.c +++ b/src/H5Dlayout.c @@ -51,7 +51,6 @@ const unsigned H5O_layout_ver_bounds[] = { H5O_LAYOUT_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5O_LAYOUT_VERSION_3, /* H5F_LIBVER_V18 */ /* H5O_LAYOUT_VERSION_DEFAULT */ H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V110 */ - H5O_LAYOUT_VERSION_4, /* H5F_LIBVER_V112 */ H5O_LAYOUT_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5F.c b/src/H5F.c index b395ccc..35176fc 100644 --- a/src/H5F.c +++ b/src/H5F.c @@ -768,7 +768,7 @@ H5Fopen(const char *filename, unsigned flags, hid_t fapl_id) /* Get the file object */ if(NULL == (vol_obj = H5VL_vol_object(ret_value))) - HGOTO_ERROR(H5E_FILE, H5E_CANTGET, H5I_INVALID_HID, "invalid object identifier") + HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5I_INVALID_HID, "invalid object identifier") /* Make the post open callback */ if(H5VL_file_specific(vol_obj, H5VL_FILE_POST_OPEN, H5P_DATASET_XFER_DEFAULT, H5_REQUEST_NULL) < 0) diff --git a/src/H5FDlog.c b/src/H5FDlog.c index 1c7d549..ac5667f 100644 --- a/src/H5FDlog.c +++ b/src/H5FDlog.c @@ -489,8 +489,8 @@ H5FD_log_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr) #endif #ifdef H5_HAVE_GETTIMEOFDAY struct timeval timeval_start; - struct timeval open_timeval_diff = {0, 0}; - struct timeval stat_timeval_diff = {0, 0}; + struct timeval open_timeval_diff; + struct timeval stat_timeval_diff; #endif /* H5_HAVE_GETTIMEOFDAY */ h5_stat_t sb; H5FD_t *ret_value = NULL; /* Return value */ diff --git a/src/H5FSsection.c b/src/H5FSsection.c index cf4a587..df67bd9 100644 --- a/src/H5FSsection.c +++ b/src/H5FSsection.c @@ -371,10 +371,10 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n" /* Check if section info lock count dropped to zero */ if(fspace->sinfo_lock_count == 0) { hbool_t release_sinfo_space = FALSE; /* Flag to indicate section info space in file should be released */ - hbool_t closing_or_flushing = f->shared->closing; /* Is closing or flushing in progress */ + hbool_t flush_in_progress = FALSE; /* Is flushing in progress */ - /* Check whether cache-flush is in progress if closing is not. */ - if(!closing_or_flushing && H5AC_get_cache_flush_in_progress(f->shared->cache, &closing_or_flushing) < 0) + /* Check whether cache is flush_in_progress */ + if(H5AC_get_cache_flush_in_progress(f->shared->cache, &flush_in_progress) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get flush_in_progress") /* Check if we actually protected the section info */ @@ -390,7 +390,7 @@ HDfprintf(stderr, "%s: fspace->alloc_sect_size = %Hu, fspace->sect_size = %Hu\n" cache_flags |= H5AC__DIRTIED_FLAG; /* On file close or flushing, does not allow section info to shrink in size */ - if(closing_or_flushing) { + if(f->shared->closing || flush_in_progress) { if(fspace->sect_size > fspace->alloc_sect_size) cache_flags |= H5AC__DELETED_FLAG | H5AC__TAKE_OWNERSHIP_FLAG; else @@ -441,7 +441,7 @@ HDfprintf(stderr, "%s: Relinquishing section info ownership\n", FUNC); /* Set flag to release section info space in file */ /* On file close or flushing, only need to release section info with size bigger than previous section */ - if(closing_or_flushing) { + if(f->shared->closing || flush_in_progress) { if(fspace->sect_size > fspace->alloc_sect_size) release_sinfo_space = TRUE; else diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h index 02568c9..49c8b4b 100644 --- a/src/H5Fpublic.h +++ b/src/H5Fpublic.h @@ -180,11 +180,10 @@ typedef enum H5F_libver_t { H5F_LIBVER_V18 = 1, /* Use the latest v18 format for storing objects */ H5F_LIBVER_V110 = 2, /* Use the latest v110 format for storing objects */ H5F_LIBVER_V112 = 3, /* Use the latest v112 format for storing objects */ - H5F_LIBVER_V114 = 4, /* Use the latest v114 format for storing objects */ H5F_LIBVER_NBOUNDS } H5F_libver_t; -#define H5F_LIBVER_LATEST H5F_LIBVER_V114 +#define H5F_LIBVER_LATEST H5F_LIBVER_V112 /* File space handling strategy */ typedef enum H5F_fspace_strategy_t { diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c index f8e8f3f..1f3b08c 100644 --- a/src/H5Fsuper.c +++ b/src/H5Fsuper.c @@ -77,7 +77,6 @@ static const unsigned HDF5_superblock_ver_bounds[] = { HDF5_SUPERBLOCK_VERSION_DEF, /* H5F_LIBVER_EARLIEST */ HDF5_SUPERBLOCK_VERSION_2, /* H5F_LIBVER_V18 */ HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V110 */ - HDF5_SUPERBLOCK_VERSION_3, /* H5F_LIBVER_V112 */ HDF5_SUPERBLOCK_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c index ce216a2..125d6cf 100644 --- a/src/H5Fsuper_cache.c +++ b/src/H5Fsuper_cache.c @@ -347,9 +347,9 @@ static herr_t H5F__cache_superblock_get_final_load_size(const void *_image, size_t image_len, void *_udata, size_t *actual_len) { - const uint8_t *image = _image; /* Pointer into raw data buffer */ + const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */ - H5F_super_t sblock; /* Temporary file superblock */ + H5F_super_t sblock; /* Temporary file superblock */ htri_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -361,15 +361,6 @@ H5F__cache_superblock_get_final_load_size(const void *_image, size_t image_len, HDassert(*actual_len == image_len); HDassert(image_len >= H5F_SUPERBLOCK_FIXED_SIZE + 6); - /* Initialize because GCC 5.5 does not realize that - * H5F__superblock_prefix_decode() initializes it. - * - * TBD condition on compiler version. - */ - sblock.super_vers = 0; - sblock.sizeof_addr = 0; - sblock.sizeof_size = 0; - /* Deserialize the file superblock's prefix */ if(H5F__superblock_prefix_decode(&sblock, &image, udata, TRUE) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "can't decode file superblock prefix") @@ -402,7 +393,7 @@ done: static htri_t H5F__cache_superblock_verify_chksum(const void *_image, size_t len, void *_udata) { - const uint8_t *image = _image; /* Pointer into raw data buffer */ + const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */ uint32_t stored_chksum; /* Stored metadata checksum value */ uint32_t computed_chksum; /* Computed metadata checksum value */ @@ -448,7 +439,7 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata, { H5F_super_t *sblock = NULL; /* File's superblock */ H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */ - const uint8_t *image = _image; /* Pointer into raw data buffer */ + const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ H5F_super_t *ret_value = NULL; /* Return value */ FUNC_ENTER_STATIC @@ -673,7 +664,7 @@ H5F__cache_superblock_serialize(const H5F_t *f, void *_image, size_t H5_ATTR_UNU void *_thing) { H5F_super_t *sblock = (H5F_super_t *)_thing; /* Pointer to the object */ - uint8_t *image = _image; /* Pointer into raw data buffer */ + uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */ haddr_t rel_eof; /* Relative EOF for file */ herr_t ret_value = SUCCEED; /* Return value */ @@ -879,7 +870,7 @@ static herr_t H5F__cache_drvrinfo_get_final_load_size(const void *_image, size_t image_len, void *_udata, size_t *actual_len) { - const uint8_t *image = _image; /* Pointer into raw data buffer */ + const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ H5F_drvrinfo_cache_ud_t *udata = (H5F_drvrinfo_cache_ud_t *)_udata; /* User data */ H5O_drvinfo_t drvrinfo; /* Driver info */ herr_t ret_value = SUCCEED; /* Return value */ @@ -893,7 +884,6 @@ H5F__cache_drvrinfo_get_final_load_size(const void *_image, size_t image_len, HDassert(*actual_len == image_len); HDassert(image_len == H5F_DRVINFOBLOCK_HDR_SIZE); - drvrinfo.len = 0; /* Deserialize the file driver info's prefix */ if(H5F__drvrinfo_prefix_decode(&drvrinfo, NULL, &image, udata, TRUE) < 0) HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "can't decode file driver info prefix") @@ -926,7 +916,7 @@ H5F__cache_drvrinfo_deserialize(const void *_image, size_t len, void *_udata, { H5O_drvinfo_t *drvinfo = NULL; /* Driver info */ H5F_drvrinfo_cache_ud_t *udata = (H5F_drvrinfo_cache_ud_t *)_udata; /* User data */ - const uint8_t *image = _image; /* Pointer into raw data buffer */ + const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */ char drv_name[9]; /* Name of driver */ H5O_drvinfo_t *ret_value = NULL; /* Return value */ @@ -1020,7 +1010,7 @@ H5F__cache_drvrinfo_serialize(const H5F_t *f, void *_image, size_t len, void *_thing) { H5O_drvinfo_t *drvinfo = (H5O_drvinfo_t *)_thing; /* Pointer to the object */ - uint8_t *image = _image; /* Pointer into raw data buffer */ + uint8_t *image = (uint8_t *)_image; /* Pointer into raw data buffer */ uint8_t *dbuf; /* Pointer to beginning of driver info */ herr_t ret_value = SUCCEED; /* Return value */ diff --git a/src/H5HFcache.c b/src/H5HFcache.c index ab448ef..2d1c1f2 100644 --- a/src/H5HFcache.c +++ b/src/H5HFcache.c @@ -406,7 +406,7 @@ static herr_t H5HF__cache_hdr_get_final_load_size(const void *_image, size_t image_len, void *_udata, size_t *actual_len) { - H5HF_hdr_t hdr; /* Temporary fractal heap header */ + H5HF_hdr_t hdr; /* Temporary fractal heap header */ const uint8_t *image = (const uint8_t *)_image; /* Pointer into into supplied image */ H5HF_hdr_cache_ud_t *udata = (H5HF_hdr_cache_ud_t *)_udata; /* User data for callback */ herr_t ret_value = SUCCEED; /* Return value */ @@ -419,12 +419,6 @@ H5HF__cache_hdr_get_final_load_size(const void *_image, size_t image_len, HDassert(actual_len); HDassert(*actual_len == image_len); - /* Initialize because GCC 5.5 does not realize that the - * H5HF__hdr_prefix_decode() call is sufficient to initialize. - * GCC 8 is clever enough to see that the variable is initialized. - * TBD condition on compiler version. - */ - hdr.filter_len = 0; /* Deserialize the fractal heap header's prefix */ if(H5HF__hdr_prefix_decode(&hdr, &image) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, FAIL, "can't decode fractal heap header prefix") diff --git a/src/H5HFtiny.c b/src/H5HFtiny.c index 79d790b..0c27180 100644 --- a/src/H5HFtiny.c +++ b/src/H5HFtiny.c @@ -377,7 +377,7 @@ done: herr_t H5HF_tiny_remove(H5HF_hdr_t *hdr, const uint8_t *id) { - size_t enc_obj_size = 0; /* Encoded object size */ + size_t enc_obj_size; /* Encoded object size */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_NOAPI_NOINIT diff --git a/src/H5HGcache.c b/src/H5HGcache.c index 9f6e73f..29e88df 100644 --- a/src/H5HGcache.c +++ b/src/H5HGcache.c @@ -205,7 +205,7 @@ static herr_t H5HG__cache_heap_get_final_load_size(const void *image, size_t image_len, void *udata, size_t *actual_len) { - H5HG_heap_t heap; /* Global heap */ + H5HG_heap_t heap; /* Global heap */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_STATIC @@ -217,12 +217,6 @@ H5HG__cache_heap_get_final_load_size(const void *image, size_t image_len, HDassert(*actual_len == image_len); HDassert(image_len == H5HG_MINSIZE); - /* Initialize because GCC 5.5 cannot see that - * H5HG__hdr_deserialize() initializes. - * - * TBD condition on compiler version. - */ - heap.size = 0; /* Deserialize the heap's header */ if(H5HG__hdr_deserialize(&heap, (const uint8_t *)image, (const H5F_t *)udata) < 0) HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, FAIL, "can't decode global heap prefix") diff --git a/src/H5Ofill.c b/src/H5Ofill.c index 0cc58a6..36a993f 100644 --- a/src/H5Ofill.c +++ b/src/H5Ofill.c @@ -158,7 +158,6 @@ const unsigned H5O_fill_ver_bounds[] = { H5O_FILL_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5O_FILL_VERSION_3, /* H5F_LIBVER_V18 */ H5O_FILL_VERSION_3, /* H5F_LIBVER_V110 */ - H5O_FILL_VERSION_3, /* H5F_LIBVER_V112 */ H5O_FILL_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Ofsinfo.c b/src/H5Ofsinfo.c index 5d66022..37165eb 100644 --- a/src/H5Ofsinfo.c +++ b/src/H5Ofsinfo.c @@ -71,7 +71,6 @@ static const unsigned H5O_fsinfo_ver_bounds[] = { H5O_INVALID_VERSION, /* H5F_LIBVER_EARLIEST */ H5O_INVALID_VERSION, /* H5F_LIBVER_V18 */ H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V110 */ - H5O_FSINFO_VERSION_1, /* H5F_LIBVER_V112 */ H5O_FSINFO_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; #define N_FSINFO_VERSION_BOUNDS H5F_LIBVER_NBOUNDS diff --git a/src/H5Oint.c b/src/H5Oint.c index 0029976..de64b49 100644 --- a/src/H5Oint.c +++ b/src/H5Oint.c @@ -31,7 +31,6 @@ /* Headers */ /***********/ #include "H5private.h" /* Generic Functions */ -#include "H5CXprivate.h" /* API contexts */ #include "H5Eprivate.h" /* Error handling */ #include "H5Fprivate.h" /* File access */ #include "H5FLprivate.h" /* Free lists */ @@ -132,7 +131,6 @@ const unsigned H5O_obj_ver_bounds[] = { H5O_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5O_VERSION_2, /* H5F_LIBVER_V18 */ H5O_VERSION_2, /* H5F_LIBVER_V110 */ - H5O_VERSION_2, /* H5F_LIBVER_V112 */ H5O_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Opline.c b/src/H5Opline.c index 05744e3..40df939 100644 --- a/src/H5Opline.c +++ b/src/H5Opline.c @@ -94,7 +94,6 @@ const unsigned H5O_pline_ver_bounds[] = { H5O_PLINE_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5O_PLINE_VERSION_2, /* H5F_LIBVER_V18 */ H5O_PLINE_VERSION_2, /* H5F_LIBVER_V110 */ - H5O_PLINE_VERSION_2, /* H5F_LIBVER_V112 */ H5O_PLINE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Pmapl.c b/src/H5Pmapl.c index 59da91a..fe5be0f 100644 --- a/src/H5Pmapl.c +++ b/src/H5Pmapl.c @@ -138,7 +138,6 @@ done: } /* end H5P__macc_reg_prop() */ -#ifdef H5_HAVE_MAP_API /*------------------------------------------------------------------------- * Function: H5Pset_map_iterate_hints * @@ -215,4 +214,4 @@ H5Pget_map_iterate_hints(hid_t mapl_id, size_t *key_prefetch_size, size_t *key_a done: FUNC_LEAVE_API(ret_value) } /* end H5Pget_map_iterate_hints() */ -#endif /* H5_HAVE_MAP_API */ + diff --git a/src/H5Rpublic.h b/src/H5Rpublic.h index 3577ef9..585cb85 100644 --- a/src/H5Rpublic.h +++ b/src/H5Rpublic.h @@ -70,18 +70,7 @@ typedef haddr_t hobj_ref_t; * machine (8 bytes currently) plus an int. * Note! This type can only be used with the "native" HDF5 VOL connector. */ -typedef struct { - unsigned char content[H5R_DSET_REG_REF_BUF_SIZE]; -} hdset_reg_ref_t; - -/** - * Opaque reference type. The same reference type is used for object, - * dataset region and attribute references. This is the type that - * should always be used with the current reference API. - */ -typedef struct { - unsigned char content[H5R_REF_BUF_SIZE]; -} H5R_ref_t; +typedef unsigned char hdset_reg_ref_t[H5R_DSET_REG_REF_BUF_SIZE]; /** * Opaque reference type. The same reference type is used for object, diff --git a/src/H5S.c b/src/H5S.c index ddfd064..9eda3ae 100644 --- a/src/H5S.c +++ b/src/H5S.c @@ -68,7 +68,6 @@ const unsigned H5O_sdspace_ver_bounds[] = { H5O_SDSPACE_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V18 */ H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V110 */ - H5O_SDSPACE_VERSION_2, /* H5F_LIBVER_V112 */ H5O_SDSPACE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5SL.c b/src/H5SL.c index ec06395..5f00fb8 100644 --- a/src/H5SL.c +++ b/src/H5SL.c @@ -1394,7 +1394,7 @@ H5SL_search(H5SL_t *slist, const void *key) { H5SL_node_t *x; /* Current node to examine */ uint32_t hashval = 0; /* Hash value for key */ - void *ret_value = NULL; /* Return value */ + void *ret_value; /* Return value */ FUNC_ENTER_NOAPI_NOINIT_NOERR @@ -1695,7 +1695,7 @@ H5SL_find(H5SL_t *slist, const void *key) { H5SL_node_t *x; /* Current node to examine */ uint32_t hashval = 0; /* Hash value for key */ - H5SL_node_t *ret_value = NULL; /* Return value */ + H5SL_node_t *ret_value; /* Return value */ FUNC_ENTER_NOAPI_NOINIT_NOERR diff --git a/src/H5SM.c b/src/H5SM.c index 91ef98b..290e575 100644 --- a/src/H5SM.c +++ b/src/H5SM.c @@ -313,7 +313,7 @@ ssize_t H5SM_get_index(const H5SM_master_table_t *table, unsigned type_id) { size_t x; - unsigned type_flag = 0; + unsigned type_flag; ssize_t ret_value = FAIL; FUNC_ENTER_NOAPI_NOINIT @@ -353,7 +353,7 @@ htri_t H5SM_type_shared(H5F_t *f, unsigned type_id) { H5SM_master_table_t *table = NULL; /* Shared object master table */ - unsigned type_flag = 0; /* Flag corresponding to message type */ + unsigned type_flag; /* Flag corresponding to message type */ size_t u; /* Local index variable */ htri_t ret_value = FALSE; /* Return value */ diff --git a/src/H5Shyper.c b/src/H5Shyper.c index c4d5052..42c0043 100644 --- a/src/H5Shyper.c +++ b/src/H5Shyper.c @@ -252,7 +252,6 @@ const unsigned H5O_sds_hyper_ver_bounds[] = { H5S_HYPER_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5S_HYPER_VERSION_1, /* H5F_LIBVER_V18 */ H5S_HYPER_VERSION_2, /* H5F_LIBVER_V110 */ - H5S_HYPER_VERSION_3, /* H5F_LIBVER_V112 */ H5S_HYPER_VERSION_3 /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Spoint.c b/src/H5Spoint.c index 721211e..5e2bc8b 100644 --- a/src/H5Spoint.c +++ b/src/H5Spoint.c @@ -140,7 +140,6 @@ const unsigned H5O_sds_point_ver_bounds[] = { H5S_POINT_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5S_POINT_VERSION_1, /* H5F_LIBVER_V18 */ H5S_POINT_VERSION_1, /* H5F_LIBVER_V110 */ - H5S_POINT_VERSION_2, /* H5F_LIBVER_V112 */ H5S_POINT_VERSION_2 /* H5F_LIBVER_LATEST */ }; diff --git a/src/H5Sselect.c b/src/H5Sselect.c index 664f4b5..eafb369 100644 --- a/src/H5Sselect.c +++ b/src/H5Sselect.c @@ -3043,7 +3043,7 @@ H5Ssel_iter_get_seq_list(hid_t sel_iter_id, size_t maxseq, size_t maxbytes, HGOTO_ERROR(H5E_DATASPACE, H5E_BADVALUE, FAIL, "length array pointer is NULL") /* Get the sequences of bytes */ - if(maxseq > 0 && maxbytes > 0 && sel_iter->elmt_left > 0) { + if(maxseq > 0 && maxbytes > 0) { if(H5S_SELECT_ITER_GET_SEQ_LIST(sel_iter, maxseq, maxbytes, nseq, nbytes, off, len) < 0) HGOTO_ERROR(H5E_DATASPACE, H5E_CANTGET, FAIL, "sequence length generation failed") } /* end if */ diff --git a/src/H5T.c b/src/H5T.c index 23c7ff6..daf5434 100644 --- a/src/H5T.c +++ b/src/H5T.c @@ -533,7 +533,6 @@ const unsigned H5O_dtype_ver_bounds[] = { H5O_DTYPE_VERSION_1, /* H5F_LIBVER_EARLIEST */ H5O_DTYPE_VERSION_3, /* H5F_LIBVER_V18 */ H5O_DTYPE_VERSION_3, /* H5F_LIBVER_V110 */ - H5O_DTYPE_VERSION_3, /* H5F_LIBVER_V112 */ H5O_DTYPE_VERSION_LATEST /* H5F_LIBVER_LATEST */ }; @@ -3321,7 +3320,7 @@ done: *------------------------------------------------------------------------- */ H5T_t * -H5T_copy(const H5T_t *old_dt, H5T_copy_t method) +H5T_copy(H5T_t *old_dt, H5T_copy_t method) { H5T_t *new_dt = NULL, *tmp = NULL; H5T_shared_t *reopened_fo = NULL; diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h index 86bc174..d8e98af 100644 --- a/src/H5Tprivate.h +++ b/src/H5Tprivate.h @@ -109,7 +109,7 @@ H5_DLLVAR H5T_order_t H5T_native_order_g; /* Private functions */ H5_DLL herr_t H5T_init(void); -H5_DLL H5T_t *H5T_copy(const H5T_t *old_dt, H5T_copy_t method); +H5_DLL H5T_t *H5T_copy(H5T_t *old_dt, H5T_copy_t method); H5_DLL herr_t H5T_lock(H5T_t *dt, hbool_t immutable); H5_DLL herr_t H5T_close(H5T_t *dt); H5_DLL herr_t H5T_close_real(H5T_t *dt); diff --git a/src/H5VLcallback.c b/src/H5VLcallback.c index 77df207..3f24ce2 100644 --- a/src/H5VLcallback.c +++ b/src/H5VLcallback.c @@ -82,10 +82,10 @@ static void *H5VL__dataset_open(void *obj, const H5VL_loc_params_t *loc_params, const H5VL_class_t *cls, const char *name, hid_t dapl_id, hid_t dxpl_id, void **req); static herr_t H5VL__dataset_read(void *dset, const H5VL_class_t *cls, - hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, + hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf, void **req); static herr_t H5VL__dataset_write(void *obj, const H5VL_class_t *cls, - hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, + hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, const void *buf, void **req); static herr_t H5VL__dataset_get(void *obj, const H5VL_class_t *cls, H5VL_dataset_get_t get_type, hid_t dxpl_id, void **req, va_list arguments); @@ -2015,7 +2015,7 @@ done: */ static herr_t H5VL__dataset_read(void *obj, const H5VL_class_t *cls, hid_t mem_type_id, - hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, void *buf, + hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf, void **req) { herr_t ret_value = SUCCEED; /* Return value */ @@ -2027,7 +2027,7 @@ H5VL__dataset_read(void *obj, const H5VL_class_t *cls, hid_t mem_type_id, HGOTO_ERROR(H5E_VOL, H5E_UNSUPPORTED, FAIL, "VOL connector has no 'dataset read' method") /* Call the corresponding VOL callback */ - if((cls->dataset_cls.read)(obj, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, req) < 0) + if((cls->dataset_cls.read)(obj, mem_type_id, mem_space_id, file_space_id, plist_id, buf, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_READERROR, FAIL, "dataset read failed") done: @@ -2047,7 +2047,7 @@ done: */ herr_t H5VL_dataset_read(const H5VL_object_t *vol_obj, hid_t mem_type_id, - hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, void *buf, + hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf, void **req) { hbool_t vol_wrapper_set = FALSE; /* Whether the VOL object wrapping context was set up */ @@ -2061,7 +2061,7 @@ H5VL_dataset_read(const H5VL_object_t *vol_obj, hid_t mem_type_id, vol_wrapper_set = TRUE; /* Call the corresponding internal VOL routine */ - if(H5VL__dataset_read(vol_obj->data, vol_obj->connector->cls, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, req) < 0) + if(H5VL__dataset_read(vol_obj->data, vol_obj->connector->cls, mem_type_id, mem_space_id, file_space_id, plist_id, buf, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_READERROR, FAIL, "dataset read failed") done: @@ -2085,14 +2085,14 @@ done: */ herr_t H5VLdataset_read(void *obj, hid_t connector_id, hid_t mem_type_id, hid_t mem_space_id, - hid_t file_space_id, hid_t dxpl_id, void *buf, void **req) + hid_t file_space_id, hid_t plist_id, void *buf, void **req) { H5VL_class_t *cls; /* VOL connector's class struct */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT H5TRACE8("e", "*xiiiii*x**x", obj, connector_id, mem_type_id, mem_space_id, - file_space_id, dxpl_id, buf, req); + file_space_id, plist_id, buf, req); /* Check args and get class pointer */ if(NULL == obj) @@ -2101,7 +2101,7 @@ H5VLdataset_read(void *obj, hid_t connector_id, hid_t mem_type_id, hid_t mem_spa HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID") /* Call the corresponding internal VOL routine */ - if(H5VL__dataset_read(obj, cls, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, req) < 0) + if(H5VL__dataset_read(obj, cls, mem_type_id, mem_space_id, file_space_id, plist_id, buf, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, FAIL, "unable to read dataset") done: @@ -2121,7 +2121,7 @@ done: */ static herr_t H5VL__dataset_write(void *obj, const H5VL_class_t *cls, hid_t mem_type_id, - hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, const void *buf, + hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, const void *buf, void **req) { herr_t ret_value = SUCCEED; /* Return value */ @@ -2133,7 +2133,7 @@ H5VL__dataset_write(void *obj, const H5VL_class_t *cls, hid_t mem_type_id, HGOTO_ERROR(H5E_VOL, H5E_UNSUPPORTED, FAIL, "VOL connector has no 'dataset write' method") /* Call the corresponding VOL callback */ - if((cls->dataset_cls.write)(obj, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, req) < 0) + if((cls->dataset_cls.write)(obj, mem_type_id, mem_space_id, file_space_id, plist_id, buf, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_WRITEERROR, FAIL, "dataset write failed") done: @@ -2153,7 +2153,7 @@ done: */ herr_t H5VL_dataset_write(const H5VL_object_t *vol_obj, hid_t mem_type_id, - hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, const void *buf, + hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, const void *buf, void **req) { hbool_t vol_wrapper_set = FALSE; /* Whether the VOL object wrapping context was set up */ @@ -2167,7 +2167,7 @@ H5VL_dataset_write(const H5VL_object_t *vol_obj, hid_t mem_type_id, vol_wrapper_set = TRUE; /* Call the corresponding internal VOL routine */ - if(H5VL__dataset_write(vol_obj->data, vol_obj->connector->cls, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, req) < 0) + if(H5VL__dataset_write(vol_obj->data, vol_obj->connector->cls, mem_type_id, mem_space_id, file_space_id, plist_id, buf, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_WRITEERROR, FAIL, "dataset write failed") done: @@ -2191,14 +2191,14 @@ done: */ herr_t H5VLdataset_write(void *obj, hid_t connector_id, hid_t mem_type_id, hid_t mem_space_id, - hid_t file_space_id, hid_t dxpl_id, const void *buf, void **req) + hid_t file_space_id, hid_t plist_id, const void *buf, void **req) { H5VL_class_t *cls; /* VOL connector's class struct */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_API_NOINIT H5TRACE8("e", "*xiiiii*x**x", obj, connector_id, mem_type_id, mem_space_id, - file_space_id, dxpl_id, buf, req); + file_space_id, plist_id, buf, req); /* Check args and get class pointer */ if(NULL == obj) @@ -2207,7 +2207,7 @@ H5VLdataset_write(void *obj, hid_t connector_id, hid_t mem_type_id, hid_t mem_sp HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a VOL connector ID") /* Call the corresponding internal VOL routine */ - if(H5VL__dataset_write(obj, cls, mem_type_id, mem_space_id, file_space_id, dxpl_id, buf, req) < 0) + if(H5VL__dataset_write(obj, cls, mem_type_id, mem_space_id, file_space_id, plist_id, buf, req) < 0) HGOTO_ERROR(H5E_VOL, H5E_CANTINIT, FAIL, "unable to write dataset") done: diff --git a/src/H5VLconnector.h b/src/H5VLconnector.h index 4fe08f0..2dd8fbe 100644 --- a/src/H5VLconnector.h +++ b/src/H5VLconnector.h @@ -287,9 +287,9 @@ typedef struct H5VL_dataset_class_t { void *(*open)(void *obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t dapl_id, hid_t dxpl_id, void **req); herr_t (*read)(void *dset, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, - hid_t dxpl_id, void * buf, void **req); + hid_t xfer_plist_id, void * buf, void **req); herr_t (*write)(void *dset, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, - hid_t dxpl_id, const void * buf, void **req); + hid_t xfer_plist_id, const void * buf, void **req); herr_t (*get)(void *obj, H5VL_dataset_get_t get_type, hid_t dxpl_id, void **req, va_list arguments); herr_t (*specific)(void *obj, H5VL_dataset_specific_t specific_type, hid_t dxpl_id, void **req, va_list arguments); @@ -341,10 +341,10 @@ typedef struct H5VL_link_class_t { hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req, va_list arguments); herr_t (*copy)(void *src_obj, const H5VL_loc_params_t *loc_params1, void *dst_obj, const H5VL_loc_params_t *loc_params2, - hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req); + hid_t lcpl, hid_t lapl, hid_t dxpl_id, void **req); herr_t (*move)(void *src_obj, const H5VL_loc_params_t *loc_params1, void *dst_obj, const H5VL_loc_params_t *loc_params2, - hid_t lcpl_id, hid_t lapl_id, hid_t dxpl_id, void **req); + hid_t lcpl, hid_t lapl, hid_t dxpl_id, void **req); herr_t (*get)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_link_get_t get_type, hid_t dxpl_id, void **req, va_list arguments); herr_t (*specific)(void *obj, const H5VL_loc_params_t *loc_params, H5VL_link_specific_t specific_type, diff --git a/src/H5VLpassthru.c b/src/H5VLpassthru.c index d9a207f..d8181bb 100644 --- a/src/H5VLpassthru.c +++ b/src/H5VLpassthru.c @@ -2580,7 +2580,7 @@ H5VL_pass_through_request_notify(void *obj, H5VL_request_notify_t cb, void *ctx) herr_t ret_value; #ifdef ENABLE_PASSTHRU_LOGGING - printf("------- PASS THROUGH VOL REQUEST Notify\n"); + printf("------- PASS THROUGH VOL REQUEST Wait\n"); #endif ret_value = H5VLrequest_notify(o->under_object, o->under_vol_id, cb, ctx); diff --git a/src/H5VLprivate.h b/src/H5VLprivate.h index c572b79..ca474a7 100644 --- a/src/H5VLprivate.h +++ b/src/H5VLprivate.h @@ -140,8 +140,8 @@ H5_DLL herr_t H5VL_attr_close(const H5VL_object_t *vol_obj, hid_t dxpl_id, void /* Dataset functions */ H5_DLL void *H5VL_dataset_create(const H5VL_object_t *vol_obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t lcpl_id, hid_t type_id, hid_t space_id, hid_t dcpl_id, hid_t dapl_id, hid_t dxpl_id, void **req); H5_DLL void *H5VL_dataset_open(const H5VL_object_t *vol_obj, const H5VL_loc_params_t *loc_params, const char *name, hid_t dapl_id, hid_t dxpl_id, void **req); -H5_DLL herr_t H5VL_dataset_read(const H5VL_object_t *vol_obj, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, void *buf, void **req); -H5_DLL herr_t H5VL_dataset_write(const H5VL_object_t *vol_obj, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t dxpl_id, const void *buf, void **req); +H5_DLL herr_t H5VL_dataset_read(const H5VL_object_t *vol_obj, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, void *buf, void **req); +H5_DLL herr_t H5VL_dataset_write(const H5VL_object_t *vol_obj, hid_t mem_type_id, hid_t mem_space_id, hid_t file_space_id, hid_t plist_id, const void *buf, void **req); H5_DLL herr_t H5VL_dataset_get(const H5VL_object_t *vol_obj, H5VL_dataset_get_t get_type, hid_t dxpl_id, void **req, ...); H5_DLL herr_t H5VL_dataset_specific(const H5VL_object_t *cls, H5VL_dataset_specific_t specific_type, hid_t dxpl_id, void **req, ...); H5_DLL herr_t H5VL_dataset_optional(const H5VL_object_t *vol_obj, hid_t dxpl_id, void **req, ...); diff --git a/src/H5private.h b/src/H5private.h index 2b35bd4..0bfc91b 100644 --- a/src/H5private.h +++ b/src/H5private.h @@ -345,14 +345,6 @@ #define FAIL (-1) #define UFAIL (unsigned)(-1) -/* The HDF5 library uses the symbol `ERR` frequently. So do - * header files for libraries such as curses(3), terminfo(3), etc. - * Remove its definition here to avoid clashes with HDF5. - */ -#ifdef ERR -#undef ERR -#endif - /* number of members in an array */ #ifndef NELMTS # define NELMTS(X) (sizeof(X)/sizeof(X[0])) @@ -1653,18 +1645,9 @@ extern char *strdup(const char *s); /* Assign a variable to one of a different size (think safer dst = (dsttype)src"). * The code generated by the macro checks for overflows. - * - * Use w##x##y##z instead of H5_GLUE4(w, x, y, z) because srctype - * or dsttype on some systems (e.g., NetBSD 8 and earlier) may - * supply some standard types using a macro---e.g., - * #define uint8_t __uint8_t. The preprocessor will expand the - * macros before it evaluates H5_GLUE4(), and that will generate - * an unexpected name such as ASSIGN___uint8_t_TO___uint16_t. - * The preprocessor does not expand macros in w##x##y##z, so - * that will always generate the expected name. */ #define H5_CHECKED_ASSIGN(dst, dsttype, src, srctype) \ - ASSIGN_##srctype##_TO_##dsttype(dst,dsttype,src,srctype)\ + H5_GLUE4(ASSIGN_,srctype,_TO_,dsttype)(dst,dsttype,src,srctype)\ #else /* NDEBUG */ #define H5_CHECKED_ASSIGN(dst, dsttype, src, srctype) \ @@ -2688,7 +2671,7 @@ H5_DLL herr_t H5_combine_path(const char *path1, const char *path2, char **ful #ifdef H5_HAVE_PARALLEL /* Generic MPI functions */ H5_DLL hsize_t H5_mpi_set_bigio_count(hsize_t new_count); -H5_DLL hsize_t H5_mpi_get_bigio_count(void); +H5_DLL hsize_t H5_mpi_get_bigio_count(); H5_DLL herr_t H5_mpi_comm_dup(MPI_Comm comm, MPI_Comm *comm_new); H5_DLL herr_t H5_mpi_info_dup(MPI_Info info, MPI_Info *info_new); H5_DLL herr_t H5_mpi_comm_free(MPI_Comm *comm); diff --git a/src/H5trace.c b/src/H5trace.c index 79dfbc8..2f40686 100644 --- a/src/H5trace.c +++ b/src/H5trace.c @@ -1059,11 +1059,7 @@ H5_trace(const double *returning, const char *func, const char *type, ...) break; case H5F_LIBVER_V112: - HDfprintf(out, "H5F_LIBVER_V112"); - break; - - case H5F_LIBVER_V114: - HDcompile_assert(H5F_LIBVER_LATEST == H5F_LIBVER_V114); + HDcompile_assert(H5F_LIBVER_LATEST == H5F_LIBVER_V112); HDfprintf(out, "H5F_LIBVER_LATEST"); break; diff --git a/src/libhdf5.settings.in b/src/libhdf5.settings.in index 0c2be75..1591bed 100644 --- a/src/libhdf5.settings.in +++ b/src/libhdf5.settings.in @@ -39,7 +39,7 @@ Languages: H5_CPPFLAGS: @H5_CPPFLAGS@ AM_CPPFLAGS: @AM_CPPFLAGS@ C Flags: @CFLAGS@ - H5 C Flags: @H5_CFLAGS@ @H5_ECFLAGS@ + H5 C Flags: @H5_CFLAGS@ AM C Flags: @AM_CFLAGS@ Shared C Library: @enable_shared@ Static C Library: @enable_static@ diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index d3f8ed8..c7a945e 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -16,11 +16,9 @@ set (TEST_LIB_SOURCES ${HDF5_TEST_SOURCE_DIR}/cache_common.c ${HDF5_TEST_SOURCE_DIR}/external_common.c ${HDF5_TEST_SOURCE_DIR}/swmr_common.c - ${HDF5_TEST_SOURCE_DIR}/vds_swmr_common.c ) set (TEST_LIB_HEADERS - ${HDF5_TEST_SOURCE_DIR}/H5srcdir.h ${HDF5_TEST_SOURCE_DIR}/h5test.h ${HDF5_TEST_SOURCE_DIR}/cache_common.h ${HDF5_TEST_SOURCE_DIR}/external_common.h diff --git a/test/H5srcdir.c b/test/H5srcdir.c deleted file mode 100644 index 8268d2c..0000000 --- a/test/H5srcdir.c +++ /dev/null @@ -1,61 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#include "H5private.h" -#include "H5srcdir.h" - -/* Buffer to construct path in and return pointer to */ -char srcdir_path[1024] = ""; - -/* Buffer to construct file in and return pointer to */ -char srcdir_testpath[1024] = ""; - -/* Just return the srcdir path */ -const char * -H5_get_srcdir(void) -{ - const char *srcdir = HDgetenv("srcdir"); - - /* Check for using the srcdir from configure time */ - if(NULL == srcdir) - srcdir = config_srcdir; - - /* Build path to all test files */ - if((HDstrlen(srcdir) + 2) < sizeof(srcdir_path)) { - HDsnprintf(srcdir_path, sizeof(srcdir_path), "%s/", srcdir); - return(srcdir_path); - } /* end if */ - else - return(NULL); -} /* end H5_get_srcdir() */ - -/* Append the test file name to the srcdir path and return the whole string */ -const char * -H5_get_srcdir_filename(const char *filename) -{ - const char *srcdir = H5_get_srcdir(); - - /* Check for error */ - if(NULL == srcdir) - return(NULL); - else { - /* Build path to test file */ - if((HDstrlen(srcdir) + HDstrlen(filename) + 1) < sizeof(srcdir_testpath)) { - HDsnprintf(srcdir_testpath, sizeof(srcdir_testpath), "%s%s", srcdir, filename); - return(srcdir_testpath); - } /* end if */ - else - return(NULL); - } /* end else */ -} /* end H5_get_srcdir_filename() */ - diff --git a/test/H5srcdir.h b/test/H5srcdir.h index d0a4bf7..32fe8c9 100644 --- a/test/H5srcdir.h +++ b/test/H5srcdir.h @@ -24,16 +24,47 @@ #include "H5srcdir_str.h" /* Buffer to construct path in and return pointer to */ -extern char srcdir_path[1024]; +static char srcdir_path[1024] = ""; /* Buffer to construct file in and return pointer to */ -extern char srcdir_testpath[1024]; +static char srcdir_testpath[1024] = ""; /* Just return the srcdir path */ -const char *H5_get_srcdir(void); +static const char * +H5_get_srcdir(void) +{ + const char *srcdir = HDgetenv("srcdir"); + + /* Check for using the srcdir from configure time */ + if(NULL == srcdir) + srcdir = config_srcdir; + + /* Build path to all test files */ + if((HDstrlen(srcdir) + 2) < sizeof(srcdir_path)) { + HDsnprintf(srcdir_path, sizeof(srcdir_path), "%s/", srcdir); + return(srcdir_path); + } /* end if */ + else + return(NULL); +} /* end H5_get_srcdir() */ /* Append the test file name to the srcdir path and return the whole string */ -const char *H5_get_srcdir_filename(const char *); +static const char *H5_get_srcdir_filename(const char *filename) +{ + const char *srcdir = H5_get_srcdir(); + /* Check for error */ + if(NULL == srcdir) + return(NULL); + else { + /* Build path to test file */ + if((HDstrlen(srcdir) + HDstrlen(filename) + 1) < sizeof(srcdir_testpath)) { + HDsnprintf(srcdir_testpath, sizeof(srcdir_testpath), "%s%s", srcdir, filename); + return(srcdir_testpath); + } /* end if */ + else + return(NULL); + } /* end else */ +} /* end H5_get_srcdir_filename() */ #endif /* _H5SRCDIR_H */ diff --git a/test/H5srcdir_str.h.in b/test/H5srcdir_str.h.in index ba30a88..bab1df3 100644 --- a/test/H5srcdir_str.h.in +++ b/test/H5srcdir_str.h.in @@ -16,5 +16,5 @@ */ /* Set the 'srcdir' path from configure time */ -#define config_srcdir "@srcdir@" +static const char *config_srcdir = "@srcdir@"; diff --git a/test/Makefile.am b/test/Makefile.am index d66200d..57080aa 100644 --- a/test/Makefile.am +++ b/test/Makefile.am @@ -135,7 +135,7 @@ else noinst_LTLIBRARIES=libh5test.la endif -libh5test_la_SOURCES=h5test.c testframe.c cache_common.c swmr_common.c external_common.c H5srcdir.c +libh5test_la_SOURCES=h5test.c testframe.c cache_common.c swmr_common.c external_common.c # Use libhd5test.la to compile all of the tests LDADD=libh5test.la $(LIBHDF5) @@ -145,12 +145,6 @@ ttsafe_SOURCES=ttsafe.c ttsafe_dcreate.c ttsafe_error.c ttsafe_cancel.c \ ttsafe_acreate.c cache_image_SOURCES=cache_image.c genall5.c -#filter_plugin_SOURCES=filter_plugin.c H5srcdir.c - -vds_swmr_gen_SOURCES=vds_swmr_gen.c vds_swmr_common.c -vds_swmr_writer_SOURCES=vds_swmr_writer.c vds_swmr_common.c -vds_swmr_reader_SOURCES=vds_swmr_reader.c vds_swmr_common.c - VFD_LIST = sec2 stdio core core_paged split multi family if DIRECT_VFD_CONDITIONAL VFD_LIST += direct diff --git a/test/accum.c b/test/accum.c index f7d02fd..91acf0f 100644 --- a/test/accum.c +++ b/test/accum.c @@ -1838,6 +1838,7 @@ test_swmr_write_big(hbool_t newest_format) pid_t pid; /* Process ID */ #endif /* H5_HAVE_UNISTD_H */ int status; /* Status returned from child process */ + char *new_argv[] = {NULL}; char *driver = NULL; /* VFD string (from env variable) */ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */ @@ -1967,13 +1968,6 @@ test_swmr_write_big(hbool_t newest_format) FAIL_STACK_ERROR; } else if(0 == pid) { /* Child process */ - /* By convention, argv[0] tells the name of program invoked. - * - * execv on NetBSD 8 will actually return EFAULT if there is a - * NULL at argv[0], so we follow the convention unconditionally. - */ - char swmr_reader[] = SWMR_READER; - char * const new_argv[] = {swmr_reader, NULL}; /* Run the reader */ status = HDexecv(SWMR_READER, new_argv); HDprintf("errno from execv = %s\n", strerror(errno)); diff --git a/test/cache.c b/test/cache.c index e59dc3e..1a726fa 100644 --- a/test/cache.c +++ b/test/cache.c @@ -16926,7 +16926,7 @@ check_move_entry_errs(unsigned paged) { herr_t result; H5F_t * file_ptr = NULL; - H5C_t * cache_ptr; + H5C_t * cache_ptr = NULL; test_entry_t * entry_ptr = NULL; test_entry_t * entry_0_0_ptr; test_entry_t * entry_0_1_ptr; diff --git a/test/cache_common.c b/test/cache_common.c index 740523e..24962bc 100644 --- a/test/cache_common.c +++ b/test/cache_common.c @@ -5522,7 +5522,7 @@ col_major_scan_backward(H5F_t * file_ptr, int mile_stone = 1; int32_t type; int32_t idx; - int32_t local_max_index[NUMBER_OF_ENTRY_TYPES] = {0}; + int32_t local_max_index[NUMBER_OF_ENTRY_TYPES]; if ( verbose ) HDfprintf(stdout, "%s: entering.\n", FUNC); diff --git a/test/cache_tagging.c b/test/cache_tagging.c index c3921ea..7ce4e88 100644 --- a/test/cache_tagging.c +++ b/test/cache_tagging.c @@ -528,7 +528,7 @@ check_file_open_tags(hid_t fcpl, int type) hid_t fid = -1; /* File Identifier */ int verbose = FALSE; /* verbose file outout */ hid_t fapl = -1; /* File access prop list */ - haddr_t root_tag = HADDR_UNDEF; /* Root Group Tag */ + haddr_t root_tag; /* Root Group Tag */ haddr_t sbe_tag; /* Sblock Extension Tag */ /* Testing Macro */ diff --git a/test/chunk_info.c b/test/chunk_info.c index 057991c..dd4dac6 100644 --- a/test/chunk_info.c +++ b/test/chunk_info.c @@ -53,7 +53,6 @@ const char *FILENAME[] = { "tchunk_info_v18", "tchunk_info_v110", "tchunk_info_v112", - "tchunk_info_v114", NULL }; diff --git a/test/del_many_dense_attrs.c b/test/del_many_dense_attrs.c index ce85d1b..ada7a6f 100644 --- a/test/del_many_dense_attrs.c +++ b/test/del_many_dense_attrs.c @@ -70,7 +70,7 @@ main(void) hid_t fapl = -1; /* File access property lists */ hid_t gcpl = -1; /* Group creation property list */ char aname[50]; /* Name of attribute */ - const char *basename="attr";/* Name prefix for attribute */ + char *basename="attr"; /* Name prefix for attribute */ char filename[100]; /* File name */ int i; /* Local index variable */ diff --git a/test/dsets.c b/test/dsets.c index aa84833..21d5431 100644 --- a/test/dsets.c +++ b/test/dsets.c @@ -7022,69 +7022,6 @@ error: return FAIL; } /* end test_missing_chunk() */ -/* Using Euclid's algorithm, find the greatest common divisor (GCD) of - * the two arguments and return it. - * - * The GCD is negative if the arguments have opposite sign. Otherwise, - * it is positive. - * - * If either argument is zero, then the result is undefined. - */ -static long -gcd(const long l0, const long r0) -{ - long magnitude, remainder; - bool negative = ((l0 < 0) != (r0 < 0)); - long l = labs(l0), r = labs(r0); - - do { - if (l < r) { - r = r % l; - remainder = r; - } else /* r <= l */ { - l = l % r; - remainder = l; - } - } while (remainder != 0); - - magnitude = (l == 0) ? r : l; - return negative ? -magnitude : magnitude; -} - -/* Choose a random offset into an array `nelts` elements long, and store - * it at `offsetp`. The offset will be in the range [0, nelts - 1]. - * Also choose a random increment, `inc`, that "generates" all - * indices in [0, nelts - 1] when it is added to itself repeatedly. - * That is, the range of the discrete function `f(i) = (i * inc) - * mod nelts` on the domain [0, nelts - 1] is [0, nelts - 1]. Store - * `inc` at `incp`. - * - * If `nelts <= 0`, results are undefined. - */ -static void -make_random_offset_and_increment(long nelts, long *offsetp, long *incp) -{ - long inc; - /* `maxinc` is chosen so that for any `x` in [0, nelts - 1], - * `x + maxinc` does not overflow a long. - */ - const long maxinc = MIN(nelts - 1, LONG_MAX - nelts); - - HDassert(0 < nelts); - - *offsetp = HDrandom() % nelts; - - /* Choose a random number in [1, nelts - 1]. If its greatest divisor - * in common with `nelts` is 1, then it will "generate" the additive ring - * [0, nelts - 1], so let it be our increment. Otherwise, choose a new - * number. - */ - do { - inc = 1 + HDrandom() % maxinc; - } while (gcd(inc, nelts) != 1); - - *incp = inc; -} /*------------------------------------------------------------------------- * Function: test_random_chunks_real @@ -7109,7 +7046,7 @@ test_random_chunks_real(const char *testname, hbool_t early_alloc, hid_t fapl) rbuf[NPOINTS], check2[20][20]; hsize_t coord[NPOINTS][2]; - const hsize_t dsize[2]={100,100}, dmax[2]={H5S_UNLIMITED, H5S_UNLIMITED}, csize[2]={10,10}, nsize[2]={200,200}; + hsize_t dsize[2]={100,100}, dmax[2]={H5S_UNLIMITED, H5S_UNLIMITED}, csize[2]={10,10}, nsize[2]={200,200}; hsize_t fixed_dmax[2] = {1000, 1000}; hsize_t msize[1]={NPOINTS}; const char dname[]="dataset"; @@ -7117,9 +7054,7 @@ test_random_chunks_real(const char *testname, hbool_t early_alloc, hid_t fapl) size_t i, j; H5D_chunk_index_t idx_type; /* Dataset chunk index type */ H5F_libver_t low; /* File format low bound */ - long ofs, inc; - long rows; - long cols; + TESTING(testname); @@ -7153,16 +7088,12 @@ test_random_chunks_real(const char *testname, hbool_t early_alloc, hid_t fapl) for(j=0; jname, (uintmax_t)start[0], (uintmax_t)start[1]); + HDfprintf(stderr, "Symbol = '%s', location = %lld\n", symbol->name, (long long)start); /* Read record from dataset */ record->rec_id = (uint64_t)ULLONG_MAX; @@ -126,7 +126,7 @@ check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol, symbol_t if(record->rec_id != start[1]) { HDfprintf(stderr, "*** ERROR ***\n"); HDfprintf(stderr, "Incorrect record value!\n"); - HDfprintf(stderr, "Symbol = '%s', location = %ju,%ju, record->rec_id = %" PRIu64 "\n", symbol->name, (uintmax_t)start[0], (uintmax_t)start[1], record->rec_id); + HDfprintf(stderr, "Symbol = '%s', location = %lld, record->rec_id = %llu\n", symbol->name, (long long)start, (unsigned long long)record->rec_id); return -1; } /* end if */ diff --git a/test/tfile.c b/test/tfile.c index 4fb2bc9..f6b92eb 100644 --- a/test/tfile.c +++ b/test/tfile.c @@ -5769,7 +5769,6 @@ test_libver_bounds_super_create(hid_t fapl, hid_t fcpl, htri_t is_swmr, htri_t n case H5F_LIBVER_V110: case H5F_LIBVER_V112: - case H5F_LIBVER_V114: ok = (f->shared->sblock->super_vers == HDF5_SUPERBLOCK_VERSION_3); VERIFY(ok, TRUE, "HDF5_superblock_ver_bounds"); break; diff --git a/test/tid.c b/test/tid.c index 7a839d2..d0ae3e4 100644 --- a/test/tid.c +++ b/test/tid.c @@ -19,13 +19,6 @@ #define H5I_FRIEND /*suppress error about including H5Ipkg */ #include "H5Ipkg.h" -static herr_t -free_wrapper(void *p) -{ - HDfree(p); - return SUCCEED; -} - /* Test basic functionality of registering and deleting types and IDs */ static int basic_id_test(void) { @@ -76,7 +69,7 @@ static int basic_id_test(void) goto out; /* Register a type */ - myType = H5Iregister_type((size_t)64, 0, free_wrapper); + myType = H5Iregister_type((size_t)64, 0, (H5I_free_t) free ); CHECK(myType, H5I_BADID, "H5Iregister_type"); if(myType == H5I_BADID) @@ -170,7 +163,7 @@ static int basic_id_test(void) H5E_END_TRY /* Register another type and another object in that type */ - myType = H5Iregister_type((size_t)64, 0, free_wrapper); + myType = H5Iregister_type((size_t)64, 0, (H5I_free_t) free ); CHECK(myType, H5I_BADID, "H5Iregister_type"); if(myType == H5I_BADID) @@ -245,7 +238,7 @@ out: /* A dummy search function for the next test */ -static int test_search_func(void H5_ATTR_UNUSED * ptr1, hid_t H5_ATTR_UNUSED id, void H5_ATTR_UNUSED * ptr2) { return 0; } +static int test_search_func(void H5_ATTR_UNUSED * ptr1, void H5_ATTR_UNUSED * ptr2) { return 0; } /* Ensure that public functions cannot access "predefined" ID types */ static int id_predefined_test(void ) @@ -271,7 +264,7 @@ static int id_predefined_test(void ) goto out; H5E_BEGIN_TRY - testPtr = H5Isearch(H5I_GENPROP_LST, test_search_func, testObj); + testPtr = H5Isearch(H5I_GENPROP_LST, (H5I_search_func_t) test_search_func, testObj); H5E_END_TRY CHECK_PTR_NULL(testPtr, "H5Isearch"); @@ -499,7 +492,7 @@ static int test_id_type_list(void) H5I_type_t testType; int i; /* Just a counter variable */ - startType = H5Iregister_type((size_t)8, 0, free_wrapper); + startType = H5Iregister_type((size_t)8, 0, (H5I_free_t) free ); CHECK(startType, H5I_BADID, "H5Iregister_type"); if(startType == H5I_BADID) goto out; @@ -514,7 +507,7 @@ static int test_id_type_list(void) /* Create types up to H5I_MAX_NUM_TYPES */ for(i = startType + 1; i < H5I_MAX_NUM_TYPES; i++) { - currentType = H5Iregister_type((size_t)8, 0, free_wrapper); + currentType = H5Iregister_type((size_t)8, 0, (H5I_free_t) free ); CHECK(currentType, H5I_BADID, "H5Iregister_type"); if(currentType == H5I_BADID) goto out; @@ -523,7 +516,7 @@ static int test_id_type_list(void) /* Wrap around to low type ID numbers */ for(i = H5I_NTYPES; i < startType; i++) { - currentType = H5Iregister_type((size_t)8, 0, free_wrapper); + currentType = H5Iregister_type((size_t)8, 0, (H5I_free_t) free ); CHECK(currentType, H5I_BADID, "H5Iregister_type"); if(currentType == H5I_BADID) goto out; @@ -531,7 +524,7 @@ static int test_id_type_list(void) /* There should be no room at the inn for a new ID type*/ H5E_BEGIN_TRY - testType = H5Iregister_type((size_t)8, 0, free_wrapper); + testType = H5Iregister_type((size_t)8, 0, (H5I_free_t) free ); H5E_END_TRY VERIFY(testType, H5I_BADID, "H5Iregister_type"); @@ -540,7 +533,7 @@ static int test_id_type_list(void) /* Now delete a type and try to insert again */ H5Idestroy_type(H5I_NTYPES); - testType = H5Iregister_type((size_t)8, 0, free_wrapper); + testType = H5Iregister_type((size_t)8, 0, (H5I_free_t) free ); VERIFY(testType, H5I_NTYPES, "H5Iregister_type"); if(testType != H5I_NTYPES) diff --git a/test/trefer.c b/test/trefer.c index b722b2b..0fce2a3 100644 --- a/test/trefer.c +++ b/test/trefer.c @@ -137,7 +137,7 @@ test_reference_params(void) CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); CHECK(ret, FAIL, "H5Dwrite"); /* Close Dataset */ @@ -357,7 +357,7 @@ test_reference_obj(void) CHECK(dataset, H5I_INVALID_HID, "H5Dcreate2"); /* Write selection to disk */ - ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, obuf); + ret = H5Dwrite(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf); CHECK(ret, FAIL, "H5Dwrite"); /* Close Dataset */ @@ -470,11 +470,11 @@ test_reference_obj(void) VERIFY(ret, SPACE1_DIM1, "H5Sget_simple_extent_npoints"); /* Read from disk */ - ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf); + ret = H5Dread(dset2, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, tbuf); CHECK(ret, FAIL, "H5Dread"); - for(i = 0; i < SPACE1_DIM1; i++) - VERIFY(ibuf[i], i * 3, "Data"); + for(tu32 = (unsigned *)tbuf, i = 0; i < SPACE1_DIM1; i++, tu32++) + VERIFY(*tu32, (uint32_t)(i*3), "Data"); /* Close dereferenced Dataset */ ret = H5Dclose(dset2); @@ -530,8 +530,7 @@ test_reference_obj(void) /* Free memory buffers */ HDfree(wbuf); HDfree(rbuf); - HDfree(ibuf); - HDfree(obuf); + HDfree(tbuf); } /* test_reference_obj() */ /**************************************************************** @@ -1735,7 +1734,6 @@ test_reference_attr(void) CHECK(ret, FAIL, "H5Dread"); /* Open attribute on dataset object */ - attr = H5Ropen_attr((const H5R_ref_t *)&ref_rbuf[0], H5P_DEFAULT, H5P_DEFAULT); CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); @@ -2271,9 +2269,9 @@ test_reference_compat_conv(void) ret = H5Sclose(sid3); CHECK(ret, FAIL, "H5Sclose"); - /* Open attribute on group object */ - attr = H5Ropen_attr(&ref_rbuf[2], H5P_DEFAULT, H5P_DEFAULT); - CHECK(attr, H5I_INVALID_HID, "H5Ropen_attr"); + /* Close file */ + ret = H5Fclose(fid1); + CHECK(ret, FAIL, "H5Fclose"); /* Re-open the file */ fid1 = H5Fopen(FILE_REF_COMPAT, H5F_ACC_RDWR, H5P_DEFAULT); diff --git a/test/tvlstr.c b/test/tvlstr.c index cc01084..731270c 100644 --- a/test/tvlstr.c +++ b/test/tvlstr.c @@ -861,12 +861,12 @@ static void test_write_same_element(void) hid_t file1, dataset1; hid_t mspace, fspace, dtype; hsize_t fdim[] = {SPACE1_DIM1}; - const char *val[SPACE1_DIM1] = {"But", "reuniting", "is a", "great joy"}; + char *val[SPACE1_DIM1] = {"But", "reuniting", "is a", "great joy"}; hsize_t marray[] = {NUMP}; hsize_t coord[SPACE1_RANK][NUMP]; herr_t ret; - const char *wdata[SPACE1_DIM1] = {"Parting", "is such a", "sweet", "sorrow."}; + char *wdata[SPACE1_DIM1] = {"Parting", "is such a", "sweet", "sorrow."}; file1 = H5Fcreate(DATAFILE3, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT); CHECK(file1, FAIL, "H5Fcreate"); diff --git a/test/vds_swmr.h b/test/vds_swmr.h index 18d6b35..eb2dcf4 100644 --- a/test/vds_swmr.h +++ b/test/vds_swmr.h @@ -84,17 +84,31 @@ #define N_PLANES_TO_WRITE 25 /* Planes */ -extern hsize_t PLANES[N_SOURCES][RANK]; +static hsize_t PLANES[N_SOURCES][RANK] = { + {1, SM_HEIGHT, WIDTH}, + {1, LG_HEIGHT, WIDTH}, + {1, SM_HEIGHT, WIDTH}, + {1, LG_HEIGHT, WIDTH}, + {1, SM_HEIGHT, WIDTH}, + {1, LG_HEIGHT, WIDTH} +}; /* File names for source datasets */ -extern char FILE_NAMES[N_SOURCES][NAME_LEN]; +static char FILE_NAMES[N_SOURCES][NAME_LEN] = { + {"vds_swmr_src_a.h5"}, + {"vds_swmr_src_b.h5"}, + {"vds_swmr_src_c.h5"}, + {"vds_swmr_src_d.h5"}, + {"vds_swmr_src_e.h5"}, + {"vds_swmr_src_f.h5"} +}; /* VDS file name */ -extern char VDS_FILE_NAME[NAME_LEN]; +static char VDS_FILE_NAME[NAME_LEN] = "vds_swmr.h5"; /* Dataset names */ -extern char SOURCE_DSET_PATH[NAME_LEN]; -extern char VDS_DSET_NAME[NAME_LEN]; +static char SOURCE_DSET_PATH[NAME_LEN] = "/source_dset"; +static char VDS_DSET_NAME[NAME_LEN] = "vds_dset"; /* Fill values */ #endif /* VDS_SWMR_H */ diff --git a/test/vds_swmr_common.c b/test/vds_swmr_common.c deleted file mode 100644 index d2b4bd6..0000000 --- a/test/vds_swmr_common.c +++ /dev/null @@ -1,36 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * Copyright by the Board of Trustees of the University of Illinois. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -#include "vds_swmr.h" - -hsize_t PLANES[N_SOURCES][RANK] = { - {1, SM_HEIGHT, WIDTH}, - {1, LG_HEIGHT, WIDTH}, - {1, SM_HEIGHT, WIDTH}, - {1, LG_HEIGHT, WIDTH}, - {1, SM_HEIGHT, WIDTH}, - {1, LG_HEIGHT, WIDTH} -}; - -char FILE_NAMES[N_SOURCES][NAME_LEN] = { - {"vds_swmr_src_a.h5"}, - {"vds_swmr_src_b.h5"}, - {"vds_swmr_src_c.h5"}, - {"vds_swmr_src_d.h5"}, - {"vds_swmr_src_e.h5"}, - {"vds_swmr_src_f.h5"} -}; - -char VDS_FILE_NAME[NAME_LEN] = "vds_swmr.h5"; -char SOURCE_DSET_PATH[NAME_LEN] = "/source_dset"; -char VDS_DSET_NAME[NAME_LEN] = "vds_dset"; diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt index 9795c65..51c3420 100644 --- a/testpar/CMakeLists.txt +++ b/testpar/CMakeLists.txt @@ -72,7 +72,6 @@ set (H5P_TESTS t_init_term t_shapesame t_filters_parallel - t_2Gio ) foreach (h5_testp ${H5P_TESTS}) diff --git a/testpar/Makefile.am b/testpar/Makefile.am index 0cdba24..0e7898e 100644 --- a/testpar/Makefile.am +++ b/testpar/Makefile.am @@ -30,7 +30,7 @@ check_SCRIPTS = $(TEST_SCRIPT_PARA) # Test programs. These are our main targets. # -TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio +TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel # t_pflush1 and t_pflush2 are used by testpflush.sh check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2 diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c deleted file mode 100644 index d4253d7..0000000 --- a/testpar/t_2Gio.c +++ /dev/null @@ -1,4974 +0,0 @@ -/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * - * Copyright by The HDF Group. * - * All rights reserved. * - * * - * This file is part of HDF5. The full HDF5 copyright notice, including * - * terms governing use, modification, and redistribution, is contained in * - * the COPYING file, which can be found at the root of the source code * - * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. * - * If you do not have access to either file, you may request a copy from * - * help@hdfgroup.org. * - * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ - -/* - * Parallel tests for datasets - */ - -/* - * Example of using the parallel HDF5 library to access datasets. - * - * This program contains three major parts. Part 1 tests fixed dimension - * datasets, for both independent and collective transfer modes. - * Part 2 tests extendible datasets, for independent transfer mode - * only. - * Part 3 tests extendible datasets, for collective transfer mode - * only. - */ - -#include -#include "hdf5.h" -#include "testphdf5.h" - -#include "mpi.h" - - -/* For this test, we don't want to inherit the RANK definition - * from testphdf5.h. We'll define MAX_RANK to accomodate 3D arrays - * and use that definition rather than RANK. - */ -#ifndef MAX_RANK -#define MAX_RANK 2 -#endif - -/* As with RANK vs MAX_RANK, we use BIG_X_FACTOR vs ROW_FACTOR - * and BIG_Y_FACTOR vs COL_FACTOR. We introduce BIG_Z_FACTOR - * for the 3rd dimension. - */ - -#ifndef BIG_X_FACTOR -#define BIG_X_FACTOR 1048576 -#endif -#ifndef BIG_Y_FACTOR -#define BIG_Y_FACTOR 32 -#endif -#ifndef BIG_Z_FACTOR -#define BIG_Z_FACTOR 2048 -#endif - -#ifndef PATH_MAX -#define PATH_MAX 512 -#endif /* !PATH_MAX */ - -/* global variables */ -int dim0; -int dim1; -int dim2; -int chunkdim0; -int chunkdim1; -int nerrors = 0; /* errors count */ -int ndatasets = 300; /* number of datasets to create*/ -int ngroups = 512; /* number of groups to create in root - * group. */ -int facc_type = FACC_MPIO; /*Test file access type */ -int dxfer_coll_type = DXFER_COLLECTIVE_IO; - -H5E_auto2_t old_func; /* previous error handler */ -void *old_client_data; /* previous error handler arg.*/ - -#define NFILENAME 3 -#define PARATESTFILE filenames[0] -const char *FILENAME[NFILENAME]={ - "ParaTest", - "Hugefile", - NULL}; -char filenames[NFILENAME][PATH_MAX]; -hid_t fapl; /* file access property list */ -MPI_Comm test_comm = MPI_COMM_WORLD; - -// static int enable_error_stack = 0; /* enable error stack; disable=0 enable=1 */ -// static const char *TestProgName = NULL; -// static void (*TestPrivateUsage)(void) = NULL; -// static int (*TestPrivateParser)(int ac, char *av[]) = NULL; - -/* - * The following are various utility routines used by the tests. - */ - - -/* - * Show command usage - */ -static void -usage(void) -{ - HDprintf(" [-r] [-w] [-m] [-n] " - "[-o] [-f ] [-d ]\n"); - HDprintf("\t-m" - "\tset number of datasets for the multiple dataset test\n"); - HDprintf("\t-n" - "\tset number of groups for the multiple group test\n"); - HDprintf("\t-f \tfilename prefix\n"); - HDprintf("\t-i\tuse Independent IO \n"); - HDprintf("\t-d \tdataset dimensions factors. Defaults (%d,%d)\n", - BIG_X_FACTOR, BIG_Y_FACTOR); - HDprintf("\t-c \tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n"); - HDprintf("\n"); -} - -/* - * parse the command line options - */ -static int -parse_options(int argc, char **argv) -{ - int mpi_size, mpi_rank; /* mpi variables */ - - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - /* setup default chunk-size. Make sure sizes are > 0 */ - - chunkdim0 = (dim0+9)/10; - chunkdim1 = (dim1+9)/10; - - while (--argc){ - if (**(++argv) != '-'){ - break; - }else{ - switch(*(*argv+1)){ - case 'm': ndatasets = atoi((*argv+1)+1); - if (ndatasets < 0){ - nerrors++; - return(1); - } - break; - case 'n': ngroups = atoi((*argv+1)+1); - if (ngroups < 0){ - nerrors++; - return(1); - } - break; - case 'f': if (--argc < 1) { - nerrors++; - return(1); - } - if (**(++argv) == '-') { - nerrors++; - return(1); - } - paraprefix = *argv; - break; - case 'i': /* Collective MPI-IO access with independent IO */ - dxfer_coll_type = DXFER_INDEPENDENT_IO; - break; - case 'd': /* dimension sizes */ - if (--argc < 2){ - nerrors++; - return(1); - } - dim0 = atoi(*(++argv))*mpi_size; - argc--; - dim1 = atoi(*(++argv))*mpi_size; - /* set default chunkdim sizes too */ - chunkdim0 = (dim0+9)/10; - chunkdim1 = (dim1+9)/10; - break; - case 'c': /* chunk dimensions */ - if (--argc < 2){ - nerrors++; - return(1); - } - chunkdim0 = atoi(*(++argv)); - argc--; - chunkdim1 = atoi(*(++argv)); - break; - case 'h': /* print help message--return with nerrors set */ - return(1); - default: HDprintf("Illegal option(%s)\n", *argv); - nerrors++; - return(1); - } - } - } /*while*/ - - /* check validity of dimension and chunk sizes */ - if (dim0 <= 0 || dim1 <= 0){ - HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1); - nerrors++; - return(1); - } - if (chunkdim0 <= 0 || chunkdim1 <= 0){ - HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1); - nerrors++; - return(1); - } - - /* Make sure datasets can be divided into equal portions by the processes */ - if ((dim0 % mpi_size) || (dim1 % mpi_size)){ - if (MAINPROCESS) - HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n", - dim0, dim1, mpi_size); - nerrors++; - return(1); - } - - /* compose the test filenames */ - { - int i, n; - - n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */ - - for (i=0; i < n; i++) - if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i])) - == NULL){ - HDprintf("h5_fixname failed\n"); - nerrors++; - return(1); - } - - if (MAINPROCESS) { - HDprintf("Test filenames are:\n"); - for (i=0; i < n; i++) - HDprintf(" %s\n", filenames[i]); - } - } - - return(0); -} - -/* - * Create the appropriate File access property list - */ -hid_t -create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type) -{ - hid_t ret_pl = -1; - herr_t ret; /* generic return value */ - int mpi_rank; /* mpi variables */ - - /* need the rank for error checking macros */ - MPI_Comm_rank(test_comm, &mpi_rank); - - ret_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), "H5P_FILE_ACCESS"); - - if (l_facc_type == FACC_DEFAULT) - return (ret_pl); - - if (l_facc_type == FACC_MPIO){ - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(ret_pl, comm, info); - VRFY((ret >= 0), ""); - ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE); - VRFY((ret >= 0), ""); - ret = H5Pset_coll_metadata_write(ret_pl, TRUE); - VRFY((ret >= 0), ""); - return(ret_pl); - } - - if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){ - hid_t mpio_pl; - - mpio_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((mpio_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_mpio(mpio_pl, comm, info); - VRFY((ret >= 0), ""); - - /* setup file access template */ - ret_pl = H5Pcreate (H5P_FILE_ACCESS); - VRFY((ret_pl >= 0), ""); - /* set Parallel access with communicator */ - ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl); - VRFY((ret >= 0), "H5Pset_fapl_split succeeded"); - H5Pclose(mpio_pl); - return(ret_pl); - } - - /* unknown file access types */ - return (ret_pl); -} - - -/* - * Setup the dimensions of the hyperslab. - * Two modes--by rows or by columns. - * Assume dimension rank is 2. - * BYROW divide into slabs of rows - * BYCOL divide into blocks of columns - * ZROW same as BYROW except process 0 gets 0 rows - * ZCOL same as BYCOL except process 0 gets 0 columns - */ -static void -slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[], - hsize_t stride[], hsize_t block[], int mode) -{ - switch (mode) { - case BYROW: - /* Each process takes a slabs of rows. */ - block[0] = dim0 / mpi_size; - block[1] = dim1; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = mpi_rank * block[0]; - start[1] = 0; - if (VERBOSE_MED) - HDprintf("slab_set BYROW\n"); - break; - case BYCOL: - /* Each process takes a block of columns. */ - block[0] = dim0; - block[1] = dim1 / mpi_size; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = mpi_rank * block[1]; - if (VERBOSE_MED) - HDprintf("slab_set BYCOL\n"); - break; - case ZROW: - /* Similar to BYROW except process 0 gets 0 row */ - block[0] = (mpi_rank ? dim0 / mpi_size : 0); - block[1] = dim1; - stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */ - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = (mpi_rank ? mpi_rank * block[0] : 0); - start[1] = 0; - if (VERBOSE_MED) - HDprintf("slab_set ZROW\n"); - break; - case ZCOL: - /* Similar to BYCOL except process 0 gets 0 column */ - block[0] = dim0; - block[1] = (mpi_rank ? dim1 / mpi_size : 0); - stride[0] = block[0]; - stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */ - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = (mpi_rank ? mpi_rank * block[1] : 0); - if (VERBOSE_MED) - HDprintf("slab_set ZCOL\n"); - break; - default: - /* Unknown mode. Set it to cover the whole dataset. */ - HDprintf("unknown slab_set mode (%d)\n", mode); - block[0] = dim0; - block[1] = dim1; - stride[0] = block[0]; - stride[1] = block[1]; - count[0] = 1; - count[1] = 1; - start[0] = 0; - start[1] = 0; - if (VERBOSE_MED) - HDprintf("slab_set wholeset\n"); - break; - } - if (VERBOSE_MED) { - HDprintf( - "start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n", - (unsigned long) start[0], (unsigned long) start[1], - (unsigned long) count[0], (unsigned long) count[1], - (unsigned long) stride[0], (unsigned long) stride[1], - (unsigned long) block[0], (unsigned long) block[1], - (unsigned long) (block[0] * block[1] * count[0] * count[1])); - } -} - -/* - * Setup the coordinates for point selection. - */ -void point_set(hsize_t start[], - hsize_t count[], - hsize_t stride[], - hsize_t block[], - size_t num_points, - hsize_t coords[], - int order) -{ - hsize_t i,j, k = 0, m ,n, s1 ,s2; - - // HDcompile_assert(MAX_RANK == 3); - HDcompile_assert(MAX_RANK == 2); - - if(OUT_OF_ORDER == order) - k = (num_points * MAX_RANK) - 1; - else if(IN_ORDER == order) - k = 0; - - s1 = start[0]; - s2 = start[1]; - - for(i = 0 ; i < count[0]; i++) - for(j = 0 ; j < count[1]; j++) - for(m = 0 ; m < block[0]; m++) - for(n = 0 ; n < block[1]; n++) - if(OUT_OF_ORDER == order) { - coords[k--] = s2 + (stride[1] * j) + n; - coords[k--] = s1 + (stride[0] * i) + m; - } - else if(IN_ORDER == order) { - coords[k++] = s1 + stride[0] * i + m; - coords[k++] = s2 + stride[1] * j + n; - } - - if(VERBOSE_MED) { - HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1], - (unsigned long)(block[0] * block[1] * count[0] * count[1])); - k = 0; - for(i = 0; i < num_points ; i++) { - HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]); - k += 2; - } - } -} - - -/* - * Fill the dataset with trivial data for testing. - * Assume dimension rank is 2 and data is stored contiguous. - */ -static void -dataset_fill(hsize_t start[], hsize_t block[], DATATYPE * dataset) -{ - DATATYPE *dataptr = dataset; - hsize_t i, j; - - /* put some trivial data in the data_array */ - for (i=0; i < block[0]; i++){ - for (j=0; j < block[1]; j++){ - *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1)); - dataptr++; - } - } -} - - -/* - * Print the content of the dataset. - */ -static void -dataset_print(hsize_t start[], hsize_t block[], DATATYPE * dataset) -{ - DATATYPE *dataptr = dataset; - hsize_t i, j; - - /* print the column heading */ - HDprintf("%-8s", "Cols:"); - for (j=0; j < block[1]; j++){ - HDprintf("%3lu ", (unsigned long)(start[1]+j)); - } - HDprintf("\n"); - - /* print the slab data */ - for (i=0; i < block[0]; i++){ - HDprintf("Row %2lu: ", (unsigned long)(i+start[0])); - for (j=0; j < block[1]; j++){ - HDprintf("%03d ", *dataptr++); - } - HDprintf("\n"); - } -} - - -/* - * Print the content of the dataset. - */ -int -dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, DATATYPE *original) -{ - hsize_t i, j; - int vrfyerrs; - - /* print it if VERBOSE_MED */ - if(VERBOSE_MED) { - HDprintf("dataset_vrfy dumping:::\n"); - HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n", - (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1], - (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]); - HDprintf("original values:\n"); - dataset_print(start, block, original); - HDprintf("compared values:\n"); - dataset_print(start, block, dataset); - } - - vrfyerrs = 0; - for (i=0; i < block[0]; i++){ - for (j=0; j < block[1]; j++){ - if(*dataset != *original){ - if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){ - HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n", - (unsigned long)i, (unsigned long)j, - (unsigned long)(i+start[0]), (unsigned long)(j+start[1]), - *(original), *(dataset)); - } - dataset++; - original++; - } - } - } - if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED) - HDprintf("[more errors ...]\n"); - if(vrfyerrs) - HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs); - return(vrfyerrs); -} - -/* NOTE: This is a memory intensive test and is only run - * with 2 MPI ranks and with $HDF5TestExpress == 0 - * i.e. Exhaustive test run is allowed. Otherwise - * the test is skipped. - * - * Thanks to l.ferraro@cineca.it for the following test:: - * - * This is a simple test case to reproduce a problem - * occurring on LUSTRE filesystem with the creation - * of a 4GB dataset using chunking with parallel HDF5. - * The test works correctly if disabling chunking or - * when the bytes assigned to each process is less - * that 4GB. if equal or more, either hangs or results - * in a PMPI_Waitall error. - * - * $> mpirun -genv I_MPI_EXTRA_FILESYSTEM on - * -genv I_MPI_EXTRA_FILESYSTEM_LIST gpfs - * -n 1 ./h5_mpi_big_dataset.x 1024 1024 1024 - */ - -#define H5FILE_NAME "hugefile.h5" -#define DATASETNAME "dataset" - -int MpioTest2G( MPI_Comm comm ) -{ - /* - * HDF5 APIs definitions - */ - herr_t status; - hid_t file_id, dset_id; /* file and dataset identifiers */ - hid_t plist_id; /* property list identifier */ - hid_t filespace; /* file and memory dataspace identifiers */ - int *data; /* pointer to data buffer to write */ - - hsize_t shape[3] = {1024, 1024, 1152}; - - /* - * MPI variables - */ - int mpi_size, mpi_rank; - MPI_Info info = MPI_INFO_NULL; - - MPI_Comm_size(comm, &mpi_size); - MPI_Comm_rank(comm, &mpi_rank); - - if(mpi_rank == 0) { - HDprintf("Using %d process on dataset shape [%llu, %llu, %llu]\n", - mpi_size, shape[0], shape[1], shape[2]); - } - - /* - * Set up file access property list with parallel I/O access - */ - plist_id = H5Pcreate(H5P_FILE_ACCESS); - VRFY((plist_id >= 0), "H5Pcreate file_access succeeded"); - status = H5Pset_fapl_mpio(plist_id, comm, info); - VRFY((status >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* - * Create a new file collectively and release property list identifier. - */ - file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id); - VRFY((file_id >= 0), "H5Fcreate succeeded"); - - H5Pclose(plist_id); - - /* - * Create the dataspace for the dataset. - */ - size_t tot_size_bytes = sizeof(int); - for (int i = 0; i < 3; i++) { - tot_size_bytes *= shape[i]; - } - if(mpi_rank == 0) { - HDprintf("Dataset of %llu bytes\n", tot_size_bytes); - } - filespace = H5Screate_simple(3, shape, NULL); - VRFY((filespace >= 0), "H5Screate_simple succeeded"); - - /* - * Select chunking - */ - hid_t dcpl_id = H5Pcreate (H5P_DATASET_CREATE); - VRFY((dcpl_id >= 0), "H5P_DATASET_CREATE"); - hsize_t chunk[3] = {4, shape[1], shape[2]}; - status = H5Pset_chunk(dcpl_id, 3, chunk); - VRFY((status >= 0), "H5Pset_chunk succeeded"); - - /* - * Create the dataset with default properties and close filespace. - */ - dset_id = H5Dcreate(file_id, DATASETNAME, - H5T_NATIVE_INT, filespace, - H5P_DEFAULT, dcpl_id, H5P_DEFAULT); - VRFY((dset_id >= 0), "H5Dcreate succeeded"); - H5Sclose(filespace); - - /* - * Create property list for collective dataset write. - */ - plist_id = H5Pcreate(H5P_DATASET_XFER); - VRFY((plist_id >= 0), "H5P_DATASET_XFER"); - status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE); - VRFY((status >= 0), ""); - - size_t slice_per_process = (shape[0] + mpi_size - 1) / mpi_size; - size_t data_size = slice_per_process * shape[1] * shape[2]; - size_t data_size_bytes = sizeof(int) * data_size; - data = HDmalloc(data_size_bytes); - VRFY((data != NULL), "data HDmalloc succeeded"); - - for (size_t i = 0; i < data_size; i++) { - data[i] = mpi_rank; - } - - hsize_t h5_counts[3] = { slice_per_process, shape[1], shape[2] }; - hsize_t h5_offsets[3] = { mpi_rank * slice_per_process, 0, 0}; - hid_t filedataspace = H5Screate_simple(3, shape, NULL); - VRFY((filedataspace >= 0), "H5Screate_simple succeeded"); - - // fix reminder along first dimension multiple of chunk[0] - if ( h5_offsets[0] + h5_counts[0] > shape[0]) { - h5_counts[0] = shape[0] - h5_offsets[0]; - } - - status = H5Sselect_hyperslab(filedataspace, H5S_SELECT_SET, - h5_offsets, NULL, h5_counts, NULL); - VRFY((status >= 0), "H5Sselect_hyperslab succeeded"); - - hid_t memorydataspace = H5Screate_simple(3, h5_counts, NULL); - VRFY((memorydataspace >= 0), "H5Screate_simple succeeded"); - - status = H5Dwrite(dset_id, H5T_NATIVE_INT, - memorydataspace, filedataspace, plist_id, data); - VRFY((status >= 0), "H5Dwrite succeeded"); - H5Pclose(plist_id); - - /* - * Close/release resources. - */ - H5Sclose(filedataspace); - H5Sclose(memorydataspace); - H5Dclose(dset_id); - H5Fclose(file_id); - - free(data); - HDprintf("Proc %d - MpioTest2G test succeeded\n", mpi_rank, data_size_bytes); - - if (mpi_rank == 0) - HDremove(FILENAME[1]); - return 0; -} - - -/* - * Part 1.a--Independent read/write for fixed dimension datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two datasets - * in one HDF5 files with parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. - */ - -void -dataset_writeInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - hsize_t dims[MAX_RANK] = {1,}; /* dataset dim sizes */ - hsize_t data_size; - DATATYPE *data_array1 = NULL; /* data buffer */ - const char *filename; - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK]; - hsize_t stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* allocate memory for data buffer */ - data_size = sizeof(DATATYPE); - data_size *= (hsize_t)dim0 * (hsize_t)dim1; - data_array1 = (DATATYPE *)HDmalloc(data_size); - VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - - /* ---------------------------------------- - * CREATE AN HDF5 FILE WITH PARALLEL ACCESS - * ---------------------------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - - /* --------------------------------------------- - * Define the dimensions of the overall datasets - * and the slabs local to the MPI process. - * ------------------------------------------- */ - /* setup dimensionality object */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - - /* create a dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - - /* - * To test the independent orders of writes between processes, all - * even number processes write to dataset1 first, then dataset2. - * All odd number processes write to dataset2 first, then dataset1. - */ - - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* write data independently */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); - - /* setup dimensions again to write with zero rows for process 0 */ - if(VERBOSE_MED) - HDprintf("writeInd by some with zero row\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("writeInd by some with zero row"); -if((mpi_rank/2)*2 != mpi_rank){ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); -} -#ifdef BARRIER_CHECKS -MPI_Barrier(test_comm); -#endif /* BARRIER_CHECKS */ - - /* release dataspace ID */ - H5Sclose(file_dataspace); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* release all IDs created */ - H5Sclose(sid); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(data_array1) HDfree(data_array1); -} - -/* Example of using the parallel HDF5 library to read a dataset */ -void -dataset_readInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - const char *filename; - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl); - VRFY((fid >= 0), ""); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), ""); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset2 >= 0), ""); - - - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - - /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), ""); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), ""); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), ""); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), ""); - - /* release all IDs created */ - H5Sclose(file_dataspace); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(data_array1) HDfree(data_array1); - if(data_origin1) HDfree(data_origin1); -} - - -/* - * Part 1.b--Collective read/write for fixed dimension datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two datasets - * in one HDF5 file with collective parallel access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and - * each process controls a hyperslab within.] - */ - -void -dataset_writeAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */ - hid_t dataset5, dataset6, dataset7; /* Dataset ID */ - hid_t datatype; /* Datatype ID */ - hsize_t dims[MAX_RANK] = {1,}; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - const char *filename; - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK]; - hsize_t stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - hsize_t current_dims; /* for point selection */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Collective write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* set up the coords array selection */ - num_points = dim1; - coords = (hsize_t *)HDmalloc(dim1 * MAX_RANK * sizeof(hsize_t)); - VRFY((coords != NULL), "coords malloc succeeded"); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - - /* -------------------------- - * Define the dimensions of the overall datasets - * and create the dataset - * ------------------------- */ - /* setup 2-D dimensionality object */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - - /* create a dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another dataset collectively */ - datatype = H5Tcopy(H5T_NATIVE_INT); - ret = H5Tset_order(datatype, H5T_ORDER_LE); - VRFY((ret >= 0), "H5Tset_order succeeded"); - - dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 2 succeeded"); - - /* create a third dataset collectively */ - dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset3 >= 0), "H5Dcreate2 succeeded"); - - dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset5 >= 0), "H5Dcreate2 succeeded"); - dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset6 >= 0), "H5Dcreate2 succeeded"); - dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset7 >= 0), "H5Dcreate2 succeeded"); - - /* release 2-D space ID created */ - H5Sclose(sid); - - /* setup scalar dimensionality object */ - sid = H5Screate(H5S_SCALAR); - VRFY((sid >= 0), "H5Screate succeeded"); - - /* create a fourth dataset collectively */ - dataset4 = H5Dcreate2(fid, DATASETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset4 >= 0), "H5Dcreate2 succeeded"); - - /* release scalar space ID created */ - H5Sclose(sid); - - /* - * Set up dimensions of the slab this process accesses. - */ - - /* Dataset1: each process takes a block of rows. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill the local slab with some trivial data */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* write data collectively */ - MESG("writeAll by Row"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - - /* setup dimensions again to writeAll with zero rows for process 0 */ - if(VERBOSE_MED) - HDprintf("writeAll by some with zero row\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("writeAll by some with zero row"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded"); - - /* release all temporary handles. */ - /* Could have used them for dataset2 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset2: each process takes a block of columns. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill the local slab with some trivial data */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); - - /* setup dimensions again to writeAll with zero columns for process 0 */ - if(VERBOSE_MED) - HDprintf("writeAll by some with zero col\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("writeAll by some with zero col"); - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded"); - - /* release all temporary handles. */ - /* Could have used them for dataset3 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - - /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset3); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - if(MAINPROCESS) { - ret = H5Sselect_none(file_dataspace); - VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded"); - } /* end if */ - else { - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); - } /* end else */ - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - if(MAINPROCESS) { - ret = H5Sselect_none(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded"); - } /* end if */ - - /* fill the local slab with some trivial data */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } /* end if */ - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* write data collectively */ - MESG("writeAll with none"); - ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); - - /* write data collectively (with datatype conversion) */ - MESG("writeAll with none"); - ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset3 succeeded"); - - /* release all temporary handles. */ - /* Could have used them for dataset4 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset4: each process writes no data, except process zero uses "all" selection. */ - /* Additionally, these are in a scalar dataspace */ - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset4); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - if(MAINPROCESS) { - ret = H5Sselect_none(file_dataspace); - VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded"); - } /* end if */ - else { - ret = H5Sselect_all(file_dataspace); - VRFY((ret >= 0), "H5Sselect_none succeeded"); - } /* end else */ - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate(H5S_SCALAR); - VRFY((mem_dataspace >= 0), ""); - if(MAINPROCESS) { - ret = H5Sselect_none(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded"); - } /* end if */ - else { - ret = H5Sselect_all(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_none succeeded"); - } /* end else */ - - /* fill the local slab with some trivial data */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } /* end if */ - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - /* write data collectively */ - MESG("writeAll with scalar dataspace"); - ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); - - /* write data collectively (with datatype conversion) */ - MESG("writeAll with scalar dataspace"); - ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset4 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - - if(data_array1) free(data_array1); - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - block[0] = 1; - block[1] = dim1; - stride[0] = 1; - stride[1] = dim1; - count[0] = 1; - count[1] = 1; - start[0] = dim0/mpi_size * mpi_rank; - start[1] = 0; - - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* Dataset5: point selection in File - Hyperslab selection in Memory*/ - /* create a file dataspace independently */ - point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER); - file_dataspace = H5Dget_space (dataset5); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - start[0] = 0; - start[1] = 0; - mem_dataspace = H5Dget_space (dataset5); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - /* write data collectively */ - ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset5 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset6: point selection in File - Point selection in Memory*/ - /* create a file dataspace independently */ - start[0] = dim0/mpi_size * mpi_rank; - start[1] = 0; - point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER); - file_dataspace = H5Dget_space (dataset6); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - start[0] = 0; - start[1] = 0; - point_set (start, count, stride, block, num_points, coords, IN_ORDER); - mem_dataspace = H5Dget_space (dataset6); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - /* write data collectively */ - ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset6 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset7: point selection in File - All selection in Memory*/ - /* create a file dataspace independently */ - start[0] = dim0/mpi_size * mpi_rank; - start[1] = 0; - point_set (start, count, stride, block, num_points, coords, IN_ORDER); - file_dataspace = H5Dget_space (dataset7); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - current_dims = num_points; - mem_dataspace = H5Screate_simple (1, ¤t_dims, NULL); - VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded"); - - ret = H5Sselect_all(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_all succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - /* write data collectively */ - ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite dataset7 succeeded"); - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* - * All writes completed. Close datasets collectively - */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - ret = H5Dclose(dataset3); - VRFY((ret >= 0), "H5Dclose3 succeeded"); - ret = H5Dclose(dataset4); - VRFY((ret >= 0), "H5Dclose4 succeeded"); - ret = H5Dclose(dataset5); - VRFY((ret >= 0), "H5Dclose5 succeeded"); - ret = H5Dclose(dataset6); - VRFY((ret >= 0), "H5Dclose6 succeeded"); - ret = H5Dclose(dataset7); - VRFY((ret >= 0), "H5Dclose7 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(coords) HDfree(coords); - if(data_array1) HDfree(data_array1); -} - -/* - * Example of using the parallel HDF5 library to read two datasets - * in one HDF5 file with collective parallel access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and - * each process controls a hyperslab within.] - */ - -void -dataset_readAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - const char *filename; - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - size_t num_points; /* for point selection */ - hsize_t *coords = NULL; /* for point selection */ - int i,j,k; - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Collective read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* set up the coords array selection */ - num_points = dim1; - coords = (hsize_t *)HDmalloc(dim0 * dim1 * MAX_RANK * sizeof(hsize_t)); - VRFY((coords != NULL), "coords malloc succeeded"); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl); - VRFY((fid >= 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - - /* -------------------------- - * Open the datasets in it - * ------------------------- */ - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded"); - - /* open another dataset collectively */ - dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT); - VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded"); - dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT); - VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded"); - dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT); - VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded"); - - /* - * Set up dimensions of the slab this process accesses. - */ - - /* Dataset1: each process takes a block of columns. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_origin1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* read data collectively */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset1 succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; - - /* setup dimensions again to readAll with zero columns for process 0 */ - if(VERBOSE_MED) - HDprintf("readAll by some with zero col\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("readAll by some with zero col"); - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; - - /* release all temporary handles. */ - /* Could have used them for dataset2 but it is cleaner */ - /* to create them again.*/ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* Dataset2: each process takes a block of rows. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_origin1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* read data collectively */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset2 succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; - - /* setup dimensions again to readAll with zero rows for process 0 */ - if(VERBOSE_MED) - HDprintf("readAll by some with zero row\n"); - slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - /* need to make mem_dataspace to match for process 0 */ - if(MAINPROCESS){ - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded"); - } - MESG("readAll by some with zero row"); - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - if(data_array1) free(data_array1); - if(data_origin1) free(data_origin1); - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded"); - - block[0] = 1; - block[1] = dim1; - stride[0] = 1; - stride[1] = dim1; - count[0] = 1; - count[1] = 1; - start[0] = dim0/mpi_size * mpi_rank; - start[1] = 0; - - dataset_fill(start, block, data_origin1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_origin1); - } - - /* Dataset5: point selection in memory - Hyperslab selection in file*/ - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset5); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - start[0] = 0; - start[1] = 0; - point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER); - mem_dataspace = H5Dget_space (dataset5); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset5 succeeded"); - - - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - if(data_array1) free(data_array1); - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* Dataset6: point selection in File - Point selection in Memory*/ - /* create a file dataspace independently */ - start[0] = dim0/mpi_size * mpi_rank; - start[1] = 0; - point_set (start, count, stride, block, num_points, coords, IN_ORDER); - file_dataspace = H5Dget_space (dataset6); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - start[0] = 0; - start[1] = 0; - point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER); - mem_dataspace = H5Dget_space (dataset6); - VRFY((mem_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset6 succeeded"); - - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - if(ret) nerrors++; - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - if(data_array1) free(data_array1); - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 malloc succeeded"); - - /* Dataset7: point selection in memory - All selection in file*/ - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset7); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_all(file_dataspace); - VRFY((ret >= 0), "H5Sselect_all succeeded"); - - num_points = dim0 * dim1; - k=0; - for (i=0 ; i= 0), "H5Dget_space succeeded"); - ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords); - VRFY((ret >= 0), "H5Sselect_elements succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), ""); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pcreate xfer succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - /* read data collectively */ - ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread dataset7 succeeded"); - - start[0] = dim0/mpi_size * mpi_rank; - start[1] = 0; - ret = dataset_vrfy(start, count, stride, block, data_array1+(dim0/mpi_size * dim1 * mpi_rank), data_origin1); - if(ret) nerrors++; - - /* release all temporary handles. */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - /* - * All reads completed. Close datasets collectively - */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - ret = H5Dclose(dataset5); - VRFY((ret >= 0), "H5Dclose5 succeeded"); - ret = H5Dclose(dataset6); - VRFY((ret >= 0), "H5Dclose6 succeeded"); - ret = H5Dclose(dataset7); - VRFY((ret >= 0), "H5Dclose7 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(coords) HDfree(coords); - if(data_array1) HDfree(data_array1); - if(data_origin1) HDfree(data_origin1); -} - - -/* - * Part 2--Independent read/write for extendible datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two extendible - * datasets in one HDF5 file with independent parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. - */ - -void -extend_writeInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - hsize_t max_dims[MAX_RANK] = - {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK]; /* for hyperslab setting */ - hsize_t stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Extend independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = chunkdim0; - chunk_dims[1] = chunkdim1; - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - -/* Reduce the number of metadata cache slots, so that there are cache - * collisions during the raw data I/O on the chunked dataset. This stresses - * the metadata cache and tests for cache bugs. -QAK - */ -{ - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - - ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0); - VRFY((ret >= 0), "H5Pget_cache succeeded"); - mdc_nelmts=4; - ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0); - VRFY((ret >= 0), "H5Pset_cache succeeded"); -} - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - if(VERBOSE_MED) - HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); - dataset_pl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - /* start out with no rows, extend it later. */ - dims[0] = dims[1] = 0; - sid = H5Screate_simple (MAX_RANK, dims, max_dims); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another extendible dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* release resource */ - H5Sclose(sid); - H5Pclose(dataset_pl); - - - - /* ------------------------- - * Test writing to dataset1 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Extend its current dim sizes before writing */ - dims[0] = dim0; - dims[1] = dim1; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - - - /* ------------------------- - * Test writing to dataset2 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Try write to dataset2 beyond its current dim sizes. Should fail. */ - /* Temporary turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently. Should fail. */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret < 0), "H5Dwrite failed as expected"); - - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); - H5Sclose(file_dataspace); - - /* Extend dataset2 and try again. Should succeed. */ - dims[0] = dim0; - dims[1] = dim1; - ret = H5Dset_extent(dataset2, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(data_array1) HDfree(data_array1); -} - -/* - * Example of using the parallel HDF5 library to create an extendable dataset - * and perform I/O on it in a way that verifies that the chunk cache is - * bypassed for parallel I/O. - */ - -void -extend_writeInd2(void) -{ - const char *filename; - hid_t fid; /* HDF5 file ID */ - hid_t fapl; /* File access templates */ - hid_t fs; /* File dataspace ID */ - hid_t ms; /* Memory dataspace ID */ - hid_t dataset; /* Dataset ID */ - hsize_t orig_size=10; /* Original dataset dim size */ - hsize_t new_size=20; /* Extended dataset dim size */ - hsize_t one=1; - hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */ - hsize_t chunk_size = 16384; /* chunk size */ - hid_t dcpl; /* dataset create prop. list */ - int written[10], /* Data to write */ - retrieved[10]; /* Data read in */ - int mpi_size, mpi_rank; /* MPI settings */ - int i; /* Local index variable */ - herr_t ret; /* Generic return value */ - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Extend independent write test #2 on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - fapl = create_faccess_plist(test_comm, MPI_INFO_NULL, facc_type); - VRFY((fapl >= 0), "create_faccess_plist succeeded"); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(fapl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dcpl, 1, &chunk_size); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - fs = H5Screate_simple (1, &orig_size, &max_size); - VRFY((fs >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreat2e succeeded"); - - /* release resource */ - ret = H5Pclose(dcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - - /* ------------------------- - * Test writing to dataset - * -------------------------*/ - /* create a memory dataspace independently */ - ms = H5Screate_simple(1, &orig_size, &max_size); - VRFY((ms >= 0), "H5Screate_simple succeeded"); - - /* put some trivial data in the data_array */ - for(i = 0; i < (int)orig_size; i++) - written[i] = i; - MESG("data array initialized"); - if(VERBOSE_MED) { - MESG("writing at offset zero: "); - for(i = 0; i < (int)orig_size; i++) - HDprintf("%s%d", i?", ":"", written[i]); - HDprintf("\n"); - } - ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* ------------------------- - * Read initial data from dataset. - * -------------------------*/ - ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); - VRFY((ret >= 0), "H5Dread succeeded"); - for (i=0; i<(int)orig_size; i++) - if(written[i]!=retrieved[i]) { - HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__, - i,written[i], i,retrieved[i]); - nerrors++; - } - if(VERBOSE_MED){ - MESG("read at offset zero: "); - for (i=0; i<(int)orig_size; i++) - HDprintf("%s%d", i?", ":"", retrieved[i]); - HDprintf("\n"); - } - - /* ------------------------- - * Extend the dataset & retrieve new dataspace - * -------------------------*/ - ret = H5Dset_extent(dataset, &new_size); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - ret = H5Sclose(fs); - VRFY((ret >= 0), "H5Sclose succeeded"); - fs = H5Dget_space(dataset); - VRFY((fs >= 0), "H5Dget_space succeeded"); - - /* ------------------------- - * Write to the second half of the dataset - * -------------------------*/ - for (i=0; i<(int)orig_size; i++) - written[i] = orig_size + i; - MESG("data array re-initialized"); - if(VERBOSE_MED) { - MESG("writing at offset 10: "); - for (i=0; i<(int)orig_size; i++) - HDprintf("%s%d", i?", ":"", written[i]); - HDprintf("\n"); - } - ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size); - VRFY((ret >= 0), "H5Sselect_hyperslab succeeded"); - ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* ------------------------- - * Read the new data - * -------------------------*/ - ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved); - VRFY((ret >= 0), "H5Dread succeeded"); - for (i=0; i<(int)orig_size; i++) - if(written[i]!=retrieved[i]) { - HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__, - i,written[i], i,retrieved[i]); - nerrors++; - } - if(VERBOSE_MED){ - MESG("read at offset 10: "); - for (i=0; i<(int)orig_size; i++) - HDprintf("%s%d", i?", ":"", retrieved[i]); - HDprintf("\n"); - } - - - /* Close dataset collectively */ - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - - /* Close the file collectively */ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); -} - -/* Example of using the parallel HDF5 library to read an extendible dataset */ -void -extend_readInd(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_array2 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - const char *filename; - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Extend independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - data_array2 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl); - VRFY((fid >= 0), ""); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), ""); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset2 >= 0), ""); - - /* Try extend dataset1 which is open RDONLY. Should fail. */ - /* first turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); - VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); - dims[0]++; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret < 0), "H5Dset_extent failed as expected"); - - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); - H5Sclose(file_dataspace); - - - /* Read dataset1 using BYROW pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset1 read verified correct"); - if(ret) nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - - - /* Read dataset2 using BYCOL pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset2 read verified correct"); - if(ret) nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), ""); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), ""); - - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(data_array1) HDfree(data_array1); - if(data_array2) HDfree(data_array2); - if(data_origin1) HDfree(data_origin1); -} - -/* - * Part 3--Collective read/write for extendible datasets. - */ - -/* - * Example of using the parallel HDF5 library to create two extendible - * datasets in one HDF5 file with collective parallel MPIO access support. - * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1. - * Each process controls only a slab of size dim0 x dim1 within each - * dataset. - */ - -void -extend_writeAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - hsize_t max_dims[MAX_RANK] = - {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK]; /* for hyperslab setting */ - hsize_t stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Extend independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = chunkdim0; - chunk_dims[1] = chunkdim1; - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - -/* Reduce the number of metadata cache slots, so that there are cache - * collisions during the raw data I/O on the chunked dataset. This stresses - * the metadata cache and tests for cache bugs. -QAK - */ -{ - int mdc_nelmts; - size_t rdcc_nelmts; - size_t rdcc_nbytes; - double rdcc_w0; - - ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0); - VRFY((ret >= 0), "H5Pget_cache succeeded"); - mdc_nelmts=4; - ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0); - VRFY((ret >= 0), "H5Pset_cache succeeded"); -} - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - if(VERBOSE_MED) - HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); - dataset_pl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - /* start out with no rows, extend it later. */ - dims[0] = dims[1] = 0; - sid = H5Screate_simple (MAX_RANK, dims, max_dims); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another extendible dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* release resource */ - H5Sclose(sid); - H5Pclose(dataset_pl); - - - - /* ------------------------- - * Test writing to dataset1 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED) { - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Extend its current dim sizes before writing */ - dims[0] = dim0; - dims[1] = dim1; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* write data collectively */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - H5Sclose(file_dataspace); - H5Sclose(mem_dataspace); - H5Pclose(xfer_plist); - - - /* ------------------------- - * Test writing to dataset2 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* put some trivial data in the data_array */ - dataset_fill(start, block, data_array1); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* Try write to dataset2 beyond its current dim sizes. Should fail. */ - /* Temporary turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently. Should fail. */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret < 0), "H5Dwrite failed as expected"); - - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); - H5Sclose(file_dataspace); - - /* Extend dataset2 and try again. Should succeed. */ - dims[0] = dim0; - dims[1] = dim1; - ret = H5Dset_extent(dataset2, dims); - VRFY((ret >= 0), "H5Dset_extent succeeded"); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* write data independently */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* release resource */ - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(data_array1) HDfree(data_array1); -} - -/* Example of using the parallel HDF5 library to read an extendible dataset */ -void -extend_readAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - DATATYPE *data_array1 = NULL; /* data buffer */ - DATATYPE *data_array2 = NULL; /* data buffer */ - DATATYPE *data_origin1 = NULL; /* expected data buffer */ - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Extend independent read test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* allocate memory for data buffer */ - data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded"); - data_array2 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded"); - data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE)); - VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded"); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl); - VRFY((fid >= 0), ""); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* open the dataset1 collectively */ - dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset1 >= 0), ""); - - /* open another dataset collectively */ - dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT); - VRFY((dataset2 >= 0), ""); - - /* Try extend dataset1 which is open RDONLY. Should fail. */ - /* first turn off auto error reporting */ - H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data); - H5Eset_auto2(H5E_DEFAULT, NULL, NULL); - - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL); - VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded"); - dims[0]++; - ret = H5Dset_extent(dataset1, dims); - VRFY((ret < 0), "H5Dset_extent failed as expected"); - - /* restore auto error reporting */ - H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data); - H5Sclose(file_dataspace); - - - /* Read dataset1 using BYROW pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* read data collectively */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset1 read verified correct"); - if(ret) nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - H5Pclose(xfer_plist); - - - /* Read dataset2 using BYCOL pattern */ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset2); - VRFY((file_dataspace >= 0), ""); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), ""); - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* fill dataset with test data */ - dataset_fill(start, block, data_origin1); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(start, block, data_array1); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* read data collectively */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_array1); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* verify the read data with original expected data */ - ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1); - VRFY((ret == 0), "dataset2 read verified correct"); - if(ret) nerrors++; - - H5Sclose(mem_dataspace); - H5Sclose(file_dataspace); - H5Pclose(xfer_plist); - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), ""); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), ""); - - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(data_array1) HDfree(data_array1); - if(data_array2) HDfree(data_array2); - if(data_origin1) HDfree(data_origin1); -} - -/* - * Example of using the parallel HDF5 library to read a compressed - * dataset in an HDF5 file with collective parallel access support. - */ -#ifdef H5_HAVE_FILTER_DEFLATE -void -compress_readAll(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t dcpl; /* Dataset creation property list */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t dataspace; /* Dataspace ID */ - hid_t dataset; /* Dataset ID */ - int rank=1; /* Dataspace rank */ - hsize_t dim=dim0; /* Dataspace dimensions */ - unsigned u; /* Local index variable */ - unsigned chunk_opts; /* Chunk options */ - unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */ - DATATYPE *data_read = NULL; /* data buffer */ - DATATYPE *data_orig = NULL; /* expected data buffer */ - const char *filename; - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - int mpi_size, mpi_rank; - herr_t ret; /* Generic return value */ - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Collective chunked dataset read test on file %s\n", filename); - - /* Retrieve MPI parameters */ - MPI_Comm_size(comm,&mpi_size); - MPI_Comm_rank(comm,&mpi_rank); - - /* Allocate data buffer */ - data_orig = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE)); - VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded"); - data_read = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE)); - VRFY((data_read != NULL), "data_array1 HDmalloc succeeded"); - - /* Initialize data buffers */ - for(u=0; u 0), "H5Fcreate succeeded"); - - /* Create property list for chunking and compression */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl > 0), "H5Pcreate succeeded"); - - ret = H5Pset_layout(dcpl, H5D_CHUNKED); - VRFY((ret >= 0), "H5Pset_layout succeeded"); - - /* Use eight chunks */ - chunk_dim = dim / 8; - ret = H5Pset_chunk(dcpl, rank, &chunk_dim); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* Set chunk options appropriately */ - if(disable_partial_chunk_filters) { - ret = H5Pget_chunk_opts(dcpl, &chunk_opts); - VRFY((ret>=0),"H5Pget_chunk_opts succeeded"); - - chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS; - - ret = H5Pset_chunk_opts(dcpl, chunk_opts); - VRFY((ret>=0),"H5Pset_chunk_opts succeeded"); - } /* end if */ - - ret = H5Pset_deflate(dcpl, 9); - VRFY((ret >= 0), "H5Pset_deflate succeeded"); - - /* Create dataspace */ - dataspace = H5Screate_simple(rank, &dim, NULL); - VRFY((dataspace > 0), "H5Screate_simple succeeded"); - - /* Create dataset */ - dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset > 0), "H5Dcreate2 succeeded"); - - /* Write compressed data */ - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* Close objects */ - ret = H5Pclose(dcpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Sclose(dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - /* Wait for file to be created */ - MPI_Barrier(comm); - - /* ------------------- - * OPEN AN HDF5 FILE - * -------------------*/ - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl); - VRFY((fid > 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - - /* Open dataset with compressed chunks */ - dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT); - VRFY((dataset > 0), "H5Dopen2 succeeded"); - - /* Try reading & writing data */ - if(dataset>0) { - /* Create dataset transfer property list */ - xfer_plist = H5Pcreate(H5P_DATASET_XFER); - VRFY((xfer_plist > 0), "H5Pcreate succeeded"); - - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - if(dxfer_coll_type == DXFER_INDEPENDENT_IO) { - ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO); - VRFY((ret>= 0),"set independent IO collectively succeeded"); - } - - - /* Try reading the data */ - ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); - VRFY((ret >= 0), "H5Dread succeeded"); - - /* Verify data read */ - for(u=0; u= 3 - ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read); - VRFY((ret >= 0), "H5Dwrite succeeded"); -#endif - - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - ret = H5Dclose(dataset); - VRFY((ret >= 0), "H5Dclose succeeded"); - } /* end if */ - - /* Close file */ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } /* end for */ - - /* release data buffers */ - if(data_read) HDfree(data_read); - if(data_orig) HDfree(data_orig); -} -#endif /* H5_HAVE_FILTER_DEFLATE */ - -/* - * Part 4--Non-selection for chunked dataset - */ - -/* - * Example of using the parallel HDF5 library to create chunked - * dataset in one HDF5 file with collective and independent parallel - * MPIO access support. The Datasets are of sizes dim0 x dim1. - * Each process controls only a slab of size dim0 x dim1 within the - * dataset with the exception that one processor selects no element. - */ - -void -none_selection_chunk(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t xfer_plist; /* Dataset transfer properties list */ - hid_t sid; /* Dataspace ID */ - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* memory dataspace ID */ - hid_t dataset1, dataset2; /* Dataset ID */ - const char *filename; - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - DATATYPE *data_origin = NULL; /* data buffer */ - DATATYPE *data_array = NULL; /* data buffer */ - hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */ - hid_t dataset_pl; /* dataset create prop. list */ - - hsize_t start[MAX_RANK]; /* for hyperslab setting */ - hsize_t count[MAX_RANK]; /* for hyperslab setting */ - hsize_t stride[MAX_RANK]; /* for hyperslab setting */ - hsize_t block[MAX_RANK]; /* for hyperslab setting */ - hsize_t mstart[MAX_RANK]; /* for data buffer in memory */ - - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - filename = GetTestParameters(); - if(VERBOSE_MED) - HDprintf("Extend independent write test on file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - /* setup chunk-size. Make sure sizes are > 0 */ - chunk_dims[0] = chunkdim0; - chunk_dims[1] = chunkdim1; - - /* ------------------- - * START AN HDF5 FILE - * -------------------*/ - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), ""); - - /* -------------------------------------------------------------- - * Define the dimensions of the overall datasets and create them. - * ------------------------------------------------------------- */ - - /* set up dataset storage chunk sizes and creation property list */ - if(VERBOSE_MED) - HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]); - dataset_pl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dataset_pl >= 0), "H5Pcreate succeeded"); - ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims); - VRFY((ret >= 0), "H5Pset_chunk succeeded"); - - /* setup dimensionality object */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple(MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create an extendible dataset collectively */ - dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - /* create another extendible dataset collectively */ - dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* release resource */ - H5Sclose(sid); - H5Pclose(dataset_pl); - - /* ------------------------- - * Test collective writing to dataset1 - * -------------------------*/ - /* set up dimensions of the slab this process accesses */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - /* allocate memory for data buffer. Only allocate enough buffer for - * each processor's data. */ - if(mpi_rank) { - data_origin = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE)); - VRFY((data_origin != NULL), "data_origin HDmalloc succeeded"); - - data_array = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE)); - VRFY((data_array != NULL), "data_array HDmalloc succeeded"); - - /* put some trivial data in the data_array */ - mstart[0] = mstart[1] = 0; - dataset_fill(mstart, block, data_origin); - MESG("data_array initialized"); - if(VERBOSE_MED){ - MESG("data_array created"); - dataset_print(mstart, block, data_origin); - } - } - - /* create a memory dataspace independently */ - mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL); - VRFY((mem_dataspace >= 0), ""); - - /* Process 0 has no selection */ - if(!mpi_rank) { - ret = H5Sselect_none(mem_dataspace); - VRFY((ret >= 0), "H5Sselect_none succeeded"); - } - - /* create a file dataspace independently */ - file_dataspace = H5Dget_space (dataset1); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* Process 0 has no selection */ - if(!mpi_rank) { - ret = H5Sselect_none(file_dataspace); - VRFY((ret >= 0), "H5Sselect_none succeeded"); - } - - /* set up the collective transfer properties list */ - xfer_plist = H5Pcreate (H5P_DATASET_XFER); - VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded"); - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* write data collectively */ - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_origin); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* read data independently */ - ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array); - VRFY((ret >= 0), ""); - - /* verify the read data with original expected data */ - if(mpi_rank) { - ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); - if(ret) nerrors++; - } - - /* ------------------------- - * Test independent writing to dataset2 - * -------------------------*/ - ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* write data collectively */ - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - xfer_plist, data_origin); - VRFY((ret >= 0), "H5Dwrite succeeded"); - - /* read data independently */ - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, data_array); - VRFY((ret >= 0), ""); - - /* verify the read data with original expected data */ - if(mpi_rank) { - ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin); - if(ret) nerrors++; - } - - /* release resource */ - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Pclose(xfer_plist); - VRFY((ret >= 0), "H5Pclose succeeded"); - - - /* close dataset collectively */ - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose1 succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose2 succeeded"); - - /* close the file collectively */ - H5Fclose(fid); - - /* release data buffers */ - if(data_origin) HDfree(data_origin); - if(data_array) HDfree(data_array); -} - - -/* Function: test_actual_io_mode - * - * Purpose: tests one specific case of collective I/O and checks that the - * actual_chunk_opt_mode property and the actual_io_mode - * properties in the DXPL have the correct values. - * - * Input: selection_mode: changes the way processes select data from the space, as well - * as some dxpl flags to get collective I/O to break in different ways. - * - * The relevant I/O function and expected response for each mode: - * TEST_ACTUAL_IO_MULTI_CHUNK_IND: - * H5D_mpi_chunk_collective_io, each process reports independent I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_COL: - * H5D_mpi_chunk_collective_io, each process reports collective I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_MIX: - * H5D_mpi_chunk_collective_io, each process reports mixed I/O - * - * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: - * H5D_mpi_chunk_collective_io, processes disagree. The root reports - * collective, the rest report independent I/O - * - * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: - * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND. - * Set directly go to multi-chunk-io without num threshold calc. - * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: - * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL. - * Set directly go to multi-chunk-io without num threshold calc. - * - * TEST_ACTUAL_IO_LINK_CHUNK: - * H5D_link_chunk_collective_io, processes report linked chunk I/O - * - * TEST_ACTUAL_IO_CONTIGUOUS: - * H5D__contig_collective_write or H5D__contig_collective_read - * each process reports contiguous collective I/O - * - * TEST_ACTUAL_IO_NO_COLLECTIVE: - * Simple independent I/O. This tests that the defaults are properly set. - * - * TEST_ACTUAL_IO_RESET: - * Perfroms collective and then independent I/O wit hthe same dxpl to - * make sure the peroperty is correctly reset to the default on each use. - * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE - * (The most complex case that works on all builds) and then performs - * an independent read and write with the same dxpls. - * - * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE - * is not needed as they are covered by DIRECT_CHUNK_MIX and - * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing - * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold. - * - * Modification: - * - Refctore to remove multi-chunk-without-opimization test and update for - * testing direct to multi-chunk-io - * Programmer: Jonathan Kim - * Date: 2012-10-10 - * - * - * Programmer: Jacob Gruber - * Date: 2011-04-06 - */ -static void -test_actual_io_mode(int selection_mode) { - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1; - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1; - H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1; - H5D_mpio_actual_io_mode_t actual_io_mode_write = -1; - H5D_mpio_actual_io_mode_t actual_io_mode_read = -1; - H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1; - const char * filename; - const char * test_name; - hbool_t direct_multi_chunk_io; - hbool_t multi_chunk_io; - hbool_t is_chunked; - hbool_t is_collective; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int * buffer; - int i; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; - hid_t fid = -1; - hid_t sid = -1; - hid_t dataset = -1; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl = -1; - hid_t mem_space = -1; - hid_t file_space = -1; - hid_t dcpl = -1; - hid_t dxpl_write = -1; - hid_t dxpl_read = -1; - hsize_t dims[MAX_RANK]; - hsize_t chunk_dims[MAX_RANK]; - hsize_t start[MAX_RANK]; - hsize_t stride[MAX_RANK]; - hsize_t count[MAX_RANK]; - hsize_t block[MAX_RANK]; - char message[256]; - herr_t ret; - - /* Set up some flags to make some future if statements slightly more readable */ - direct_multi_chunk_io = ( - selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND || - selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL ); - - /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then - * tests independent I/O - */ - multi_chunk_io = ( - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX || - selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE || - selection_mode == TEST_ACTUAL_IO_RESET ); - - is_chunked = ( - selection_mode != TEST_ACTUAL_IO_CONTIGUOUS && - selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE); - - is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE; - - /* Set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - MPI_Barrier(test_comm); - - HDassert(mpi_size >= 1); - - mpi_comm = test_comm; - mpi_info = MPI_INFO_NULL; - - filename = (const char *)GetTestParameters(); - HDassert(filename != NULL); - - /* Setup the file access template */ - fapl = create_faccess_plist(mpi_comm, mpi_info, facc_type); - VRFY((fapl >= 0), "create_faccess_plist() succeeded"); - - /* Create the file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Create the basic Space */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* Create the dataset creation plist */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation plist created successfully"); - - /* If we are not testing contiguous datasets */ - if(is_chunked) { - /* Set up chunk information. */ - chunk_dims[0] = dims[0]/mpi_size; - chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0),"chunk creation property list succeeded"); - } - - /* Create the dataset */ - dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT, - dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - - /* Create the file dataspace */ - file_space = H5Dget_space(dataset); - VRFY((file_space >= 0), "H5Dget_space succeeded"); - - /* Choose a selection method based on the type of I/O we want to occur, - * and also set up some selection-dependeent test info. */ - switch(selection_mode) { - - /* Independent I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_IND: - case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND: - /* Since the dataset is chunked by row and each process selects a row, - * each process writes to a different chunk. This forces all I/O to be - * independent. - */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Multi Chunk - Independent"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - break; - - /* Collective I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_COL: - case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL: - /* The dataset is chunked by rows, so each process takes a column which - * spans all chunks. Since the processes write non-overlapping regular - * selections to each chunk, the operation is purely collective. - */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - - test_name = "Multi Chunk - Collective"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - if(mpi_size > 1) - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - else - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - break; - - /* Mixed I/O with optimization */ - case TEST_ACTUAL_IO_MULTI_CHUNK_MIX: - /* A chunk will be assigned collective I/O only if it is selected by each - * process. To get mixed I/O, have the root select all chunks and each - * subsequent process select the first and nth chunk. The first chunk, - * accessed by all, will be assigned collective I/O while each other chunk - * will be accessed only by the root and the nth procecess and will be - * assigned independent I/O. Each process will access one chunk collectively - * and at least one chunk independently, reporting mixed I/O. - */ - - if(mpi_rank == 0) { - /* Select the first column */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - } else { - /* Select the first and the nth chunk in the nth column */ - block[0] = dim0 / mpi_size; - block[1] = dim1 / mpi_size; - count[0] = 2; - count[1] = 1; - stride[0] = mpi_rank * block[0]; - stride[1] = 1; - start[0] = 0; - start[1] = mpi_rank*block[1]; - } - - test_name = "Multi Chunk - Mixed"; - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; - break; - - /* RESET tests that the properties are properly reset to defaults each time I/O is - * performed. To acheive this, we have RESET perform collective I/O (which would change - * the values from the defaults) followed by independent I/O (which should report the - * default values). RESET doesn't need to have a unique selection, so we reuse - * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works - * on all builds. The independent section of RESET can be found at the end of this function. - */ - case TEST_ACTUAL_IO_RESET: - - /* Mixed I/O with optimization and internal disagreement */ - case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE: - /* A chunk will be assigned collective I/O only if it is selected by each - * process. To get mixed I/O with disagreement, assign process n to the - * first chunk and the nth chunk. The first chunk, selected by all, is - * assgigned collective I/O, while each other process gets independent I/O. - * Since the root process with only access the first chunk, it will report - * collective I/O. The subsequent processes will access the first chunk - * collectively, and their other chunk indpendently, reporting mixed I/O. - */ - - if(mpi_rank == 0) { - /* Select the first chunk in the first column */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL); - block[0] = block[0] / mpi_size; - } else { - /* Select the first and the nth chunk in the nth column */ - block[0] = dim0 / mpi_size; - block[1] = dim1 / mpi_size; - count[0] = 2; - count[1] = 1; - stride[0] = mpi_rank * block[0]; - stride[1] = 1; - start[0] = 0; - start[1] = mpi_rank*block[1]; - } - - /* If the testname was not already set by the RESET case */ - if (selection_mode == TEST_ACTUAL_IO_RESET) - test_name = "RESET"; - else - test_name = "Multi Chunk - Mixed (Disagreement)"; - - actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK; - if(mpi_size > 1) { - if(mpi_rank == 0) - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - else - actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED; - } - else - actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT; - - break; - - /* Linked Chunk I/O */ - case TEST_ACTUAL_IO_LINK_CHUNK: - /* Nothing special; link chunk I/O is forced in the dxpl settings. */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Link Chunk"; - actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK; - actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE; - break; - - /* Contiguous Dataset */ - case TEST_ACTUAL_IO_CONTIGUOUS: - /* A non overlapping, regular selection in a contiguous dataset leads to - * collective I/O */ - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Contiguous"; - actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE; - break; - - case TEST_ACTUAL_IO_NO_COLLECTIVE: - slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW); - - test_name = "Independent"; - actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION; - actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE; - break; - - default: - test_name = "Undefined Selection Mode"; - actual_chunk_opt_mode_expected = -1; - actual_io_mode_expected = -1; - break; - } - - ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* Create a memory dataspace mirroring the dataset and select the same hyperslab - * as in the file space. - */ - mem_space = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((mem_space >= 0), "mem_space created"); - - ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* Get the number of elements in the selection */ - length = dim0 * dim1; - - /* Allocate and initialize the buffer */ - buffer = (int *)HDmalloc(sizeof(int) * length); - VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); - for(i = 0; i < length; i++) - buffer[i] = i; - - /* Set up the dxpl for the write */ - dxpl_write = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - /* Set collective I/O properties in the dxpl. */ - if(is_collective) { - /* Request collective I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Set the threshold number of processes per chunk to twice mpi_size. - * This will prevent the threshold from ever being met, thus forcing - * multi chunk io instead of link chunk io. - * This is via deault. - */ - if(multi_chunk_io) { - /* force multi-chunk-io by threshold */ - ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned) mpi_size*2); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded"); - - /* set this to manipulate testing senario about allocating processes - * to chunks */ - ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned) 99); - VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded"); - } - - /* Set directly go to multi-chunk-io without threshold calc. */ - if(direct_multi_chunk_io) { - /* set for multi chunk io by property*/ - ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - } - - /* Make a copy of the dxpl to test the read operation */ - dxpl_read = H5Pcopy(dxpl_write); - VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); - - /* Write */ - ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Retreive Actual io valuess */ - ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); - VRFY((ret >= 0), "retriving actual io mode suceeded" ); - - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); - VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" ); - - /* Read */ - ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Retreive Actual io values */ - ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); - VRFY((ret >= 0), "retriving actual io mode succeeded" ); - - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); - VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" ); - - /* Check write vs read */ - VRFY((actual_io_mode_read == actual_io_mode_write), - "reading and writing are the same for actual_io_mode"); - VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write), - "reading and writing are the same for actual_chunk_opt_mode"); - - /* Test values */ - if(actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t) -1 && actual_io_mode_expected != (H5D_mpio_actual_io_mode_t) -1) { - HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name); - VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message); - HDsprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name); - VRFY((actual_io_mode_write == actual_io_mode_expected), message); - } else { - HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank, - actual_chunk_opt_mode_write, actual_io_mode_write); - } - - /* To test that the property is succesfully reset to the default, we perform some - * independent I/O after the collective I/O - */ - if (selection_mode == TEST_ACTUAL_IO_RESET) { - if (mpi_rank == 0) { - /* Switch to independent io */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Write */ - ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Check Properties */ - ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write); - VRFY( (ret >= 0), "retriving actual io mode succeeded" ); - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write); - VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" ); - - VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION, - "actual_chunk_opt_mode has correct value for reset write (independent)"); - VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE, - "actual_io_mode has correct value for reset write (independent)"); - - /* Read */ - ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - /* Check Properties */ - ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read); - VRFY( (ret >= 0), "retriving actual io mode succeeded" ); - ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read); - VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" ); - - VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION, - "actual_chunk_opt_mode has correct value for reset read (independent)"); - VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE, - "actual_io_mode has correct value for reset read (independent)"); - } - } - - /* Release some resources */ - ret = H5Sclose(sid); - ret = H5Pclose(fapl); - ret = H5Pclose(dcpl); - ret = H5Pclose(dxpl_write); - ret = H5Pclose(dxpl_read); - ret = H5Dclose(dataset); - ret = H5Sclose(mem_space); - ret = H5Sclose(file_space); - ret = H5Fclose(fid); - HDfree(buffer); - return; -} - - -/* Function: actual_io_mode_tests - * - * Purpose: Tests all possible cases of the actual_io_mode property. - * - * Programmer: Jacob Gruber - * Date: 2011-04-06 - */ -void -actual_io_mode_tests(void) { - int mpi_size = -1; - int mpi_rank = -1; - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_size(test_comm, &mpi_rank); - - test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE); - - /* - * Test multi-chunk-io via proc_num threshold - */ - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL); - - /* The Multi Chunk Mixed test requires atleast three processes. */ - if (mpi_size > 2) - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX); - else - HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n"); - - test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE); - - /* - * Test multi-chunk-io via setting direct property - */ - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND); - test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL); - - test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK); - test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS); - - test_actual_io_mode(TEST_ACTUAL_IO_RESET); - return; -} - -/* - * Function: test_no_collective_cause_mode - * - * Purpose: - * tests cases for broken collective I/O and checks that the - * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values. - * - * Input: - * selection_mode: various mode to cause broken collective I/O - * Note: Originally, each TEST case is supposed to be used alone. - * After some discussion, this is updated to take multiple TEST cases - * with '|'. However there is no error check for any of combined - * test cases, so a tester is responsible to understand and feed - * proper combination of TESTs if needed. - * - * - * TEST_COLLECTIVE: - * Test for regular collective I/O without cause of breaking. - * Just to test normal behavior. - * - * TEST_SET_INDEPENDENT: - * Test for Independent I/O as the cause of breaking collective I/O. - * - * TEST_DATATYPE_CONVERSION: - * Test for Data Type Conversion as the cause of breaking collective I/O. - * - * TEST_DATA_TRANSFORMS: - * Test for Data Transfrom feature as the cause of breaking collective I/O. - * - * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES: - * Test for NULL dataspace as the cause of breaking collective I/O. - * - * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT: - * Test for Compact layout as the cause of breaking collective I/O. - * - * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL: - * Test for Externl-File storage as the cause of breaking collective I/O. - * - * TEST_FILTERS: - * Test for using filter (checksum) as the cause of breaking collective I/O. - * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead. - * - * - * Programmer: Jonathan Kim - * Date: Aug, 2012 - */ -#define DSET_NOCOLCAUSE "nocolcause" -#define NELM 2 -#define FILE_EXTERNAL "nocolcause_extern.data" -static void -test_no_collective_cause_mode(int selection_mode) -{ - uint32_t no_collective_cause_local_write = 0; - uint32_t no_collective_cause_local_read = 0; - uint32_t no_collective_cause_local_expected = 0; - uint32_t no_collective_cause_global_write = 0; - uint32_t no_collective_cause_global_read = 0; - uint32_t no_collective_cause_global_expected = 0; - // hsize_t coord[NELM][MAX_RANK]; - - const char * filename; - const char * test_name; - hbool_t is_chunked=1; - hbool_t is_independent=0; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int * buffer; - int i; - MPI_Comm mpi_comm; - MPI_Info mpi_info; - hid_t fid = -1; - hid_t sid = -1; - hid_t dataset = -1; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl = -1; - hid_t dcpl = -1; - hid_t dxpl_write = -1; - hid_t dxpl_read = -1; - hsize_t dims[MAX_RANK]; - hid_t mem_space = -1; - hid_t file_space = -1; - hsize_t chunk_dims[MAX_RANK]; - herr_t ret; -#ifdef LATER /* fletcher32 */ - H5Z_filter_t filter_info; -#endif /* LATER */ - /* set to global value as default */ - int l_facc_type = facc_type; - char message[256]; - - /* Set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - MPI_Barrier(test_comm); - - HDassert(mpi_size >= 1); - - mpi_comm = test_comm; - mpi_info = MPI_INFO_NULL; - - /* Create the dataset creation plist */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation plist created successfully"); - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { - ret = H5Pset_layout (dcpl, H5D_COMPACT); - VRFY((ret >= 0),"set COMPACT layout succeeded"); - is_chunked = 0; - } - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - ret = H5Pset_external (dcpl, FILE_EXTERNAL, (off_t) 0, H5F_UNLIMITED); - VRFY((ret >= 0),"set EXTERNAL file layout succeeded"); - is_chunked = 0; - } - -#ifdef LATER /* fletcher32 */ - if (selection_mode & TEST_FILTERS) { - ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32); - VRFY ((ret >=0 ), "Fletcher32 filter is available.\n"); - - ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, &filter_info); - VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n"); - - ret = H5Pset_fletcher32(dcpl); - VRFY((ret >= 0),"set filter (flecher32) succeeded"); - } -#endif /* LATER */ - - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { - sid = H5Screate(H5S_NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - is_chunked = 0; - } - else { - /* Create the basic Space */ - /* if this is a compact dataset, create a small dataspace that does not exceed 64K */ - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) { - dims[0] = BIG_X_FACTOR * 6; - dims[1] = BIG_Y_FACTOR * 6; - } - else { - dims[0] = dim0; - dims[1] = dim1; - } - sid = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - } - - - filename = (const char *)GetTestParameters(); - HDassert(filename != NULL); - - /* Setup the file access template */ - fapl = create_faccess_plist(mpi_comm, mpi_info, l_facc_type); - VRFY((fapl >= 0), "create_faccess_plist() succeeded"); - - /* Create the file */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl); - - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* If we are not testing contiguous datasets */ - if(is_chunked) { - /* Set up chunk information. */ - chunk_dims[0] = dims[0]/mpi_size; - chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0),"chunk creation property list succeeded"); - } - - - /* Create the dataset */ - dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - - - /* - * Set expected causes and some tweaks based on the type of test - */ - if (selection_mode & TEST_DATATYPE_CONVERSION) { - test_name = "Broken Collective I/O - Datatype Conversion"; - no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION; - no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION; - /* set different sign to trigger type conversion */ - data_type = H5T_NATIVE_UINT; - } - - if (selection_mode & TEST_DATA_TRANSFORMS) { - test_name = "Broken Collective I/O - DATA Transfroms"; - no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS; - no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS; - } - - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) { - test_name = "Broken Collective I/O - No Simple or Scalar DataSpace"; - no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; - no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES; - } - - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT || - selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset"; - no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; - no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET; - } - -#ifdef LATER /* fletcher32 */ - if (selection_mode & TEST_FILTERS) { - test_name = "Broken Collective I/O - Filter is required"; - no_collective_cause_local_expected |= H5D_MPIO_FILTERS; - no_collective_cause_global_expected |= H5D_MPIO_FILTERS; - } -#endif /* LATER */ - - if (selection_mode & TEST_COLLECTIVE) { - test_name = "Broken Collective I/O - Not Broken"; - no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE; - no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE; - } - - if (selection_mode & TEST_SET_INDEPENDENT) { - test_name = "Broken Collective I/O - Independent"; - no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT; - no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT; - /* switch to independent io */ - is_independent = 1; - } - - /* use all spaces for certain tests */ - if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES || - selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) { - file_space = H5S_ALL; - mem_space = H5S_ALL; - } - else { - /* Get the file dataspace */ - file_space = H5Dget_space(dataset); - VRFY((file_space >= 0), "H5Dget_space succeeded"); - - /* Create the memory dataspace */ - mem_space = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((mem_space >= 0), "mem_space created"); - } - - /* Get the number of elements in the selection */ - length = dims[0] * dims[1]; - - /* Allocate and initialize the buffer */ - buffer = (int *)HDmalloc(sizeof(int) * length); - VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); - for(i = 0; i < length; i++) - buffer[i] = i; - - /* Set up the dxpl for the write */ - dxpl_write = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - if(is_independent) { - /* Set Independent I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - else { - /* Set Collective I/O */ - ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - } - - if (selection_mode & TEST_DATA_TRANSFORMS) { - ret = H5Pset_data_transform (dxpl_write, "x+1"); - VRFY((ret >= 0), "H5Pset_data_transform succeeded"); - } - - /*--------------------- - * Test Write access - *---------------------*/ - - /* Write */ - ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer); - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause (dxpl_write, &no_collective_cause_local_write, &no_collective_cause_global_write); - VRFY((ret >= 0), "retriving no collective cause succeeded" ); - - - /*--------------------- - * Test Read access - *---------------------*/ - - /* Make a copy of the dxpl to test the read operation */ - dxpl_read = H5Pcopy(dxpl_write); - VRFY((dxpl_read >= 0), "H5Pcopy succeeded"); - - /* Read */ - ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer); - - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause (dxpl_read, &no_collective_cause_local_read, &no_collective_cause_global_read); - VRFY((ret >= 0), "retriving no collective cause succeeded" ); - - /* Check write vs read */ - VRFY((no_collective_cause_local_read == no_collective_cause_local_write), - "reading and writing are the same for local cause of Broken Collective I/O"); - VRFY((no_collective_cause_global_read == no_collective_cause_global_write), - "reading and writing are the same for global cause of Broken Collective I/O"); - - /* Test values */ - HDmemset (message, 0, sizeof (message)); - HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name); - VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message); - HDmemset (message, 0, sizeof (message)); - HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name); - VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message); - - /* Release some resources */ - if (sid) - H5Sclose(sid); - if (fapl) - H5Pclose(fapl); - if (dcpl) - H5Pclose(dcpl); - if (dxpl_write) - H5Pclose(dxpl_write); - if (dxpl_read) - H5Pclose(dxpl_read); - if (dataset) - H5Dclose(dataset); - if (mem_space) - H5Sclose(mem_space); - if (file_space) - H5Sclose(file_space); - if (fid) - H5Fclose(fid); - HDfree(buffer); - - /* clean up external file */ - if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) - HDremove(FILE_EXTERNAL); - - return; -} - - -#if 0 -/* - * Function: test_no_collective_cause_mode_filter - * - * Purpose: - * Test specific for using filter as a caus of broken collective I/O and - * checks that the H5Pget_mpio_no_collective_cause properties in the DXPL - * have the correct values. - * - * NOTE: - * This is a temporary function. - * test_no_collective_cause_mode(TEST_FILTERS) will replace this when - * H5Dcreate and H5write support for mpio and filter feature. - * - * Input: - * TEST_FILTERS_READ: - * Test for using filter (checksum) as the cause of breaking collective I/O. - * - * Programmer: Jonathan Kim - * Date: Aug, 2012 - */ -static void -test_no_collective_cause_mode_filter(int selection_mode) -{ - uint32_t no_collective_cause_local_read = 0; - uint32_t no_collective_cause_local_expected = 0; - uint32_t no_collective_cause_global_read = 0; - uint32_t no_collective_cause_global_expected = 0; - - const char * filename; - const char * test_name; - hbool_t is_chunked=1; - int mpi_size = -1; - int mpi_rank = -1; - int length; - int * buffer; - int i; - MPI_Comm mpi_comm = MPI_COMM_NULL; - MPI_Info mpi_info = MPI_INFO_NULL; - hid_t fid = -1; - hid_t sid = -1; - hid_t dataset = -1; - hid_t data_type = H5T_NATIVE_INT; - hid_t fapl_write = -1; - hid_t fapl_read = -1; - hid_t dcpl = -1; - hid_t dxpl = -1; - hsize_t dims[MAX_RANK]; - hid_t mem_space = -1; - hid_t file_space = -1; - hsize_t chunk_dims[MAX_RANK]; - herr_t ret; -#ifdef LATER /* fletcher32 */ - H5Z_filter_t filter_info; -#endif /* LATER */ - char message[256]; - - /* Set up MPI parameters */ - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - MPI_Barrier(test_comm); - - HDassert(mpi_size >= 1); - - mpi_comm = test_comm; - mpi_info = MPI_INFO_NULL; - - /* Create the dataset creation plist */ - dcpl = H5Pcreate(H5P_DATASET_CREATE); - VRFY((dcpl >= 0), "dataset creation plist created successfully"); - - if (selection_mode == TEST_FILTERS_READ ) { -#ifdef LATER /* fletcher32 */ - ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32); - VRFY ((ret >=0 ), "Fletcher32 filter is available.\n"); - - ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, (unsigned int *) &filter_info); - VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n"); - - ret = H5Pset_fletcher32(dcpl); - VRFY((ret >= 0),"set filter (flecher32) succeeded"); -#endif /* LATER */ - } - else { - VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ."); - } - - /* Create the basic Space */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - - filename = (const char *)GetTestParameters(); - HDassert(filename != NULL); - - /* Setup the file access template */ - fapl_write = create_faccess_plist(mpi_comm, mpi_info, FACC_DEFAULT); - VRFY((fapl_write >= 0), "create_faccess_plist() succeeded"); - - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_write); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* If we are not testing contiguous datasets */ - if(is_chunked) { - /* Set up chunk information. */ - chunk_dims[0] = dims[0]/mpi_size; - chunk_dims[1] = dims[1]; - ret = H5Pset_chunk(dcpl, 2, chunk_dims); - VRFY((ret >= 0),"chunk creation property list succeeded"); - } - - - /* Create the dataset */ - dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT); - VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded"); - -#ifdef LATER /* fletcher32 */ - /* Set expected cause */ - test_name = "Broken Collective I/O - Filter is required"; - no_collective_cause_local_expected = H5D_MPIO_FILTERS; - no_collective_cause_global_expected = H5D_MPIO_FILTERS; -#endif /* LATER */ - - /* Get the file dataspace */ - file_space = H5Dget_space(dataset); - VRFY((file_space >= 0), "H5Dget_space succeeded"); - - /* Create the memory dataspace */ - mem_space = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((mem_space >= 0), "mem_space created"); - - /* Get the number of elements in the selection */ - length = dim0 * dim1; - - /* Allocate and initialize the buffer */ - buffer = (int *)HDmalloc(sizeof(int) * length); - VRFY((buffer != NULL), "HDmalloc of buffer succeeded"); - for(i = 0; i < length; i++) - buffer[i] = i; - - /* Set up the dxpl for the write */ - dxpl = H5Pcreate(H5P_DATASET_XFER); - VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded"); - - if (selection_mode == TEST_FILTERS_READ) { - /* To test read in collective I/O mode , write in independent mode - * because write fails with mpio + filter */ - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - else { - /* To test write in collective I/O mode. */ - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - } - - - /* Write */ - ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer); - - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - - - /* Make a copy of the dxpl to test the read operation */ - dxpl = H5Pcopy(dxpl); - VRFY((dxpl >= 0), "H5Pcopy succeeded"); - - if (dataset) - H5Dclose(dataset); - if (fapl_write) - H5Pclose(fapl_write); - if (fid) - H5Fclose(fid); - - - /*--------------------- - * Test Read access - *---------------------*/ - - /* Setup the file access template */ - fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type); - VRFY((fapl_read >= 0), "create_faccess_plist() succeeded"); - - fid = H5Fopen (filename, H5F_ACC_RDONLY, fapl_read); - dataset = H5Dopen2 (fid, DSET_NOCOLCAUSE, H5P_DEFAULT); - - /* Set collective I/O properties in the dxpl. */ - ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE); - VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded"); - - /* Read */ - ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer); - - if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout); - VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded"); - - /* Get the cause of broken collective I/O */ - ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read); - VRFY((ret >= 0), "retriving no collective cause succeeded" ); - - /* Test values */ - HDmemset (message, 0, sizeof (message)); - HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name); - VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message); - HDmemset (message, 0, sizeof (message)); - HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name); - VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message); - - /* Release some resources */ - if (sid) - H5Sclose(sid); - if (fapl_read) - H5Pclose(fapl_read); - if (dcpl) - H5Pclose(dcpl); - if (dxpl) - H5Pclose(dxpl); - if (dataset) - H5Dclose(dataset); - if (mem_space) - H5Sclose(mem_space); - if (file_space) - H5Sclose(file_space); - if (fid) - H5Fclose(fid); - HDfree(buffer); - return; -} -#endif - -/* Function: no_collective_cause_tests - * - * Purpose: Tests cases for broken collective IO. - * - * Programmer: Jonathan Kim - * Date: Aug, 2012 - */ -void -no_collective_cause_tests(void) -{ - /* - * Test individual cause - */ - test_no_collective_cause_mode (TEST_COLLECTIVE); - test_no_collective_cause_mode (TEST_SET_INDEPENDENT); - test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode (TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode (TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES); - test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT); - test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL); -#ifdef LATER /* fletcher32 */ - /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and - * H5Dwrite is ready for mpio + filter feature. - */ - /* test_no_collective_cause_mode (TEST_FILTERS); */ - test_no_collective_cause_mode_filter (TEST_FILTERS_READ); -#endif /* LATER */ - - /* - * Test combined causes - */ - test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION); - test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); - test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS); - - return; -} - -/* - * Test consistency semantics of atomic mode - */ - -/* - * Example of using the parallel HDF5 library to create a dataset, - * where process 0 writes and the other processes read at the same - * time. If atomic mode is set correctly, the other processes should - * read the old values in the dataset or the new ones. - */ - -void -dataset_atomicity(void) -{ - hid_t fid; /* HDF5 file ID */ - hid_t acc_tpl; /* File access templates */ - hid_t sid; /* Dataspace ID */ - hid_t dataset1; /* Dataset IDs */ - hsize_t dims[MAX_RANK]; /* dataset dim sizes */ - int *write_buf = NULL; /* data buffer */ - int *read_buf = NULL; /* data buffer */ - int buf_size; - hid_t dataset2; - hid_t file_dataspace; /* File dataspace ID */ - hid_t mem_dataspace; /* Memory dataspace ID */ - hsize_t start[MAX_RANK]; - hsize_t stride[MAX_RANK]; - hsize_t count[MAX_RANK]; - hsize_t block[MAX_RANK]; - const char *filename; - herr_t ret; /* Generic return value */ - int mpi_size, mpi_rank; - int i, j, k; - hbool_t atomicity = FALSE; - MPI_Comm comm = test_comm; - MPI_Info info = MPI_INFO_NULL; - - dim0 = 64; dim1 = 32; - filename = GetTestParameters(); - if (facc_type != FACC_MPIO) { - HDprintf("Atomicity tests will not work without the MPIO VFD\n"); - return; - } - if(VERBOSE_MED) - HDprintf("atomic writes to file %s\n", filename); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - buf_size = dim0 * dim1; - /* allocate memory for data buffer */ - write_buf = (int *)HDcalloc(buf_size, sizeof(int)); - VRFY((write_buf != NULL), "write_buf HDcalloc succeeded"); - /* allocate memory for data buffer */ - read_buf = (int *)HDcalloc(buf_size, sizeof(int)); - VRFY((read_buf != NULL), "read_buf HDcalloc succeeded"); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* create the file collectively */ - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl); - VRFY((fid >= 0), "H5Fcreate succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - /* setup dimensionality object */ - dims[0] = dim0; - dims[1] = dim1; - sid = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((sid >= 0), "H5Screate_simple succeeded"); - - /* create datasets */ - dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dcreate2 succeeded"); - - dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid, - H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dcreate2 succeeded"); - - /* initialize datasets to 0s */ - if (0 == mpi_rank) { - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, write_buf); - VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, - H5P_DEFAULT, write_buf); - VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); - } - - ret = H5Dclose(dataset1); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(sid); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - - MPI_Barrier (comm); - - /* make sure setting atomicity fails on a serial file ID */ - /* file locking allows only one file open (serial) for writing */ - if(MAINPROCESS){ - fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT); - VRFY((fid >= 0), "H5Fopen succeeed"); - } - - /* should fail */ - ret = H5Fset_mpi_atomicity(fid , TRUE); - VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed"); - - if(MAINPROCESS){ - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - } - - MPI_Barrier (comm); - - /* setup file access template */ - acc_tpl = create_faccess_plist(comm, info, facc_type); - VRFY((acc_tpl >= 0), ""); - - /* open the file collectively */ - fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl); - VRFY((fid >= 0), "H5Fopen succeeded"); - - /* Release file-access template */ - ret = H5Pclose(acc_tpl); - VRFY((ret >= 0), "H5Pclose succeeded"); - - ret = H5Fset_mpi_atomicity(fid , TRUE); - VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded"); - - /* open dataset1 (contiguous case) */ - dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT); - VRFY((dataset1 >= 0), "H5Dopen2 succeeded"); - - if (0 == mpi_rank) { - for (i=0 ; i= 0), "atomcity get failed"); - VRFY((atomicity == TRUE), "atomcity set failed"); - - MPI_Barrier (comm); - - /* Process 0 writes contiguously to the entire dataset */ - if (0 == mpi_rank) { - ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf); - VRFY((ret >= 0), "H5Dwrite dataset1 succeeded"); - } - /* The other processes read the entire dataset */ - else { - ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf); - VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded"); - } - - if(VERBOSE_MED) { - i=0;j=0;k=0; - for (i=0 ; i= 0), "H5D close succeeded"); - - /* release data buffers */ - if(write_buf) HDfree(write_buf); - if(read_buf) HDfree(read_buf); - - /* open dataset2 (non-contiguous case) */ - dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT); - VRFY((dataset2 >= 0), "H5Dopen2 succeeded"); - - /* allocate memory for data buffer */ - write_buf = (int *)HDcalloc(buf_size, sizeof(int)); - VRFY((write_buf != NULL), "write_buf HDcalloc succeeded"); - /* allocate memory for data buffer */ - read_buf = (int *)HDcalloc(buf_size, sizeof(int)); - VRFY((read_buf != NULL), "read_buf HDcalloc succeeded"); - - for (i=0 ; i= 0), "atomcity get failed"); - VRFY((atomicity == TRUE), "atomcity set failed"); - - - block[0] = dim0/mpi_size - 1; - block[1] = dim1/mpi_size - 1; - stride[0] = block[0] + 1; - stride[1] = block[1] + 1; - count[0] = mpi_size; - count[1] = mpi_size; - start[0] = 0; - start[1] = 0; - - /* create a file dataspace */ - file_dataspace = H5Dget_space (dataset2); - VRFY((file_dataspace >= 0), "H5Dget_space succeeded"); - ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - /* create a memory dataspace */ - mem_dataspace = H5Screate_simple (MAX_RANK, dims, NULL); - VRFY((mem_dataspace >= 0), ""); - - ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block); - VRFY((ret >= 0), "H5Sset_hyperslab succeeded"); - - MPI_Barrier (comm); - - /* Process 0 writes to the dataset */ - if (0 == mpi_rank) { - ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, write_buf); - VRFY((ret >= 0), "H5Dwrite dataset2 succeeded"); - } - /* All processes wait for the write to finish. This works because - atomicity is set to true */ - MPI_Barrier (comm); - /* The other processes read the entire dataset */ - if (0 != mpi_rank) { - ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace, - H5P_DEFAULT, read_buf); - VRFY((ret >= 0), "H5Dread dataset2 succeeded"); - } - - if(VERBOSE_MED) { - if (mpi_rank == 1) { - i=0;j=0;k=0; - for (i=0 ; i= mpi_rank*(block[0]+1)) { - break; - } - if ((i+1)%(block[0]+1)==0) { - k += dim1; - continue; - } - for (j=0 ; j= mpi_rank*(block[1]+1)) { - k += dim1 - mpi_rank*(block[1]+1); - break; - } - if ((j+1)%(block[1]+1)==0) { - k++; - continue; - } - else if (compare != read_buf[k]) { - HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare); - nerrors++; - } - k ++; - } - } - } - - ret = H5Dclose(dataset2); - VRFY((ret >= 0), "H5Dclose succeeded"); - ret = H5Sclose(file_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - ret = H5Sclose(mem_dataspace); - VRFY((ret >= 0), "H5Sclose succeeded"); - - /* release data buffers */ - if(write_buf) HDfree(write_buf); - if(read_buf) HDfree(read_buf); - - ret = H5Fclose(fid); - VRFY((ret >= 0), "H5Fclose succeeded"); - -} - -/* Function: dense_attr_test - * - * Purpose: Test cases for writing dense attributes in parallel - * - * Programmer: Quincey Koziol - * Date: April, 2013 - */ -void -test_dense_attr(void) -{ - int mpi_size, mpi_rank; - hid_t fpid, fid; - hid_t gid, gpid; - hid_t atFileSpace, atid; - hsize_t atDims[1] = {10000}; - herr_t status; - const char *filename; - - /* get filename */ - filename = (const char *)GetTestParameters(); - HDassert( filename != NULL ); - - /* set up MPI parameters */ - MPI_Comm_size(test_comm,&mpi_size); - MPI_Comm_rank(test_comm,&mpi_rank); - - fpid = H5Pcreate(H5P_FILE_ACCESS); - VRFY((fpid > 0), "H5Pcreate succeeded"); - status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST); - VRFY((status >= 0), "H5Pset_libver_bounds succeeded"); - status = H5Pset_fapl_mpio(fpid, test_comm, MPI_INFO_NULL); - VRFY((status >= 0), "H5Pset_fapl_mpio succeeded"); - fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid); - VRFY((fid > 0), "H5Fcreate succeeded"); - status = H5Pclose(fpid); - VRFY((status >= 0), "H5Pclose succeeded"); - - gpid = H5Pcreate(H5P_GROUP_CREATE); - VRFY((gpid > 0), "H5Pcreate succeeded"); - status = H5Pset_attr_phase_change(gpid, 0, 0); - VRFY((status >= 0), "H5Pset_attr_phase_change succeeded"); - gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT); - VRFY((gid > 0), "H5Gcreate2 succeeded"); - status = H5Pclose(gpid); - VRFY((status >= 0), "H5Pclose succeeded"); - - atFileSpace = H5Screate_simple(1, atDims, NULL); - VRFY((atFileSpace > 0), "H5Screate_simple succeeded"); - atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT); - VRFY((atid > 0), "H5Acreate succeeded"); - status = H5Sclose(atFileSpace); - VRFY((status >= 0), "H5Sclose succeeded"); - - status = H5Aclose(atid); - VRFY((status >= 0), "H5Aclose succeeded"); - - status = H5Gclose(gid); - VRFY((status >= 0), "H5Gclose succeeded"); - status = H5Fclose(fid); - VRFY((status >= 0), "H5Fclose succeeded"); - - return; -} - - -int -main(int argc, char **argv) -{ - int express_test; - int mpi_size, mpi_rank; /* mpi variables */ - hsize_t oldsize, newsize = 1048576; - -#ifndef H5_HAVE_WIN32_API - /* Un-buffer the stdout and stderr */ - HDsetbuf(stderr, NULL); - HDsetbuf(stdout, NULL); -#endif - - - MPI_Init(&argc, &argv); - MPI_Comm_size(test_comm, &mpi_size); - MPI_Comm_rank(test_comm, &mpi_rank); - - dim0 = BIG_X_FACTOR; - dim1 = BIG_Y_FACTOR; - dim2 = BIG_Z_FACTOR; - - if (MAINPROCESS){ - HDprintf("===================================\n"); - HDprintf("2 GByte IO TESTS START\n"); - HDprintf("2 MPI ranks will run the tests...\n"); - HDprintf("===================================\n"); - h5_show_hostname(); - } - - if (H5dont_atexit() < 0){ - HDprintf("Failed to turn off atexit processing. Continue.\n"); - }; - H5open(); - /* Set the internal transition size to allow use of derived datatypes - * without having to actually read or write large datasets (>2GB). - */ - oldsize = H5_mpi_set_bigio_count(newsize); - - if (mpi_size > 2) { - int rank_color = 0; - if (mpi_rank >= 2) rank_color = 1; - if (MPI_Comm_split(test_comm, rank_color, mpi_rank, &test_comm) != MPI_SUCCESS) { - HDprintf("MPI returned an error. Exiting\n"); - } - } - - /* Initialize testing framework */ - if (mpi_rank < 2) { - TestInit(argv[0], usage, parse_options); - - /* Parse command line arguments */ - TestParseCmdLine(argc, argv); - - AddTest("idsetw", dataset_writeInd, NULL, - "dataset independent write", PARATESTFILE); - - AddTest("idsetr", dataset_readInd, NULL, - "dataset independent read", PARATESTFILE); - - AddTest("cdsetw", dataset_writeAll, NULL, - "dataset collective write", PARATESTFILE); - - AddTest("cdsetr", dataset_readAll, NULL, - "dataset collective read", PARATESTFILE); - - AddTest("eidsetw2", extend_writeInd2, NULL, - "extendible dataset independent write #2", PARATESTFILE); - - AddTest("selnone", none_selection_chunk, NULL, - "chunked dataset with none-selection", PARATESTFILE); - -#ifdef H5_HAVE_FILTER_DEFLATE - AddTest("cmpdsetr", compress_readAll, NULL, - "compressed dataset collective read", PARATESTFILE); -#endif /* H5_HAVE_FILTER_DEFLATE */ - - /* Display testing information */ - if (MAINPROCESS) - TestInfo(argv[0]); - - /* setup file access property list */ - fapl = H5Pcreate (H5P_FILE_ACCESS); - H5Pset_fapl_mpio(fapl, test_comm, MPI_INFO_NULL); - - /* Perform requested testing */ - PerformTests(); - } - - MPI_Barrier(MPI_COMM_WORLD); - - /* Restore the default bigio setting */ - H5_mpi_set_bigio_count(oldsize); - - express_test = GetTestExpress(); - if ((express_test == 0) && (mpi_rank < 2)) { - MpioTest2G(test_comm); - } - - MPI_Barrier(MPI_COMM_WORLD); - - if (mpi_rank == 0) - HDremove(FILENAME[0]); - - H5close(); - if (test_comm != MPI_COMM_WORLD) { - MPI_Comm_free(&test_comm); - } - MPI_Finalize(); - return 0; -} diff --git a/tools/lib/h5diff.c b/tools/lib/h5diff.c index 1b4bc56..c2153e5 100644 --- a/tools/lib/h5diff.c +++ b/tools/lib/h5diff.c @@ -258,8 +258,6 @@ free_exclude_path_list(diff_opt_t *opts) * Parameter: * table_out [OUT] : return the list *------------------------------------------------------------------------*/ -#pragma GCC diagnostic push -#pragma GCC diagnostic warning "-Wunused-but-set-variable" static void build_match_list (const char *objname1, trav_info_t *info1, const char *objname2, trav_info_t *info2, trav_table_t ** table_out, diff_opt_t *opts) @@ -376,7 +374,6 @@ done: *table_out = table; h5difftrace("build_match_list finish\n"); } -#pragma GCC diagnostic pop /*------------------------------------------------------------------------- diff --git a/tools/lib/h5diff_array.c b/tools/lib/h5diff_array.c index 55ad65c..2a45913 100644 --- a/tools/lib/h5diff_array.c +++ b/tools/lib/h5diff_array.c @@ -2245,9 +2245,6 @@ static hsize_t character_compare(char *mem1, char *mem2, hsize_t i, size_t u, *------------------------------------------------------------------------- */ -#pragma GCC diagnostic push -#pragma GCC diagnostic warning "-Wunused-but-set-variable" - static hsize_t character_compare_opt(unsigned char *mem1, unsigned char *mem2, hsize_t i, int rank, hsize_t *dims, hsize_t *acc, hsize_t *pos, diff_opt_t *opts, const char *obj1, const char *obj2, int *ph) { @@ -2308,7 +2305,6 @@ static hsize_t character_compare_opt(unsigned char *mem1, unsigned char *mem2, return nfound; } -#pragma GCC diagnostic pop /*------------------------------------------------------------------------- * Function: diff_float diff --git a/tools/lib/h5tools_utils.c b/tools/lib/h5tools_utils.c index 6fc9de4..47ce690 100644 --- a/tools/lib/h5tools_utils.c +++ b/tools/lib/h5tools_utils.c @@ -1279,7 +1279,7 @@ done: int h5tools_set_configured_fapl(hid_t fapl_id, const char vfd_name[], - void *fapl_t_ptr H5_ATTR_UNUSED) + void *fapl_t_ptr) { int ret_value = 1; diff --git a/tools/src/h5import/h5import.c b/tools/src/h5import/h5import.c index 65c2359..1eef5ab 100644 --- a/tools/src/h5import/h5import.c +++ b/tools/src/h5import/h5import.c @@ -1428,7 +1428,7 @@ static int processConfigurationFile(char *infile, struct Input *in) /* Initialize machine endian */ volatile uint32_t ibyte=0x01234567; /* 0 for big endian, 1 for little endian. */ - if ((*((volatile uint8_t*)(&ibyte))) == 0x67) { + if ((*((uint8_t*)(&ibyte))) == 0x67) { if ((kindex = OutputByteOrderStrToInt("LE")) == -1) { (void) HDfprintf(stderr, "%s", err11e); return (-1); diff --git a/tools/src/h5repack/h5repack_main.c b/tools/src/h5repack/h5repack_main.c index 16899a3..df8c7ef 100644 --- a/tools/src/h5repack/h5repack_main.c +++ b/tools/src/h5repack/h5repack_main.c @@ -139,8 +139,7 @@ static void usage(const char *prog) { PRINTVALSTREAM(rawoutstream, " 1: This is H5F_LIBVER_V18 in H5F_libver_t struct\n"); PRINTVALSTREAM(rawoutstream, " 2: This is H5F_LIBVER_V110 in H5F_libver_t struct\n"); PRINTVALSTREAM(rawoutstream, " 3: This is H5F_LIBVER_V112 in H5F_libver_t struct\n"); - PRINTVALSTREAM(rawoutstream, " 4: This is H5F_LIBVER_V114 in H5F_libver_t struct\n"); - PRINTVALSTREAM(rawoutstream, " (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V114 for this release\n"); + PRINTVALSTREAM(rawoutstream, " (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V112 for this release\n"); PRINTVALSTREAM(rawoutstream, "\n"); PRINTVALSTREAM(rawoutstream, " FS_STRATEGY is a string indicating the file space strategy used:\n"); PRINTVALSTREAM(rawoutstream, " FSM_AGGR:\n"); @@ -281,7 +280,7 @@ int read_info(const char *filename, pack_opt_t *options) char comp_info[1024]; FILE *fp = NULL; char c; - int i; + int i, rc = 1; int ret_value = EXIT_SUCCESS; if (NULL == (fp = HDfopen(filename, "r"))) { @@ -412,7 +411,7 @@ set_sort_order(const char *form) static int parse_command_line(int argc, const char **argv, pack_opt_t* options) { - int bound, opt; + int opt; int ret_value = 0; /* parse command line options */ @@ -492,21 +491,19 @@ int parse_command_line(int argc, const char **argv, pack_opt_t* options) break; case 'j': - bound = HDatoi(opt_arg); - if (bound < H5F_LIBVER_EARLIEST || bound > H5F_LIBVER_LATEST) { + options->low_bound = (H5F_libver_t)HDatoi(opt_arg); + if (options->low_bound < H5F_LIBVER_EARLIEST || options->low_bound > H5F_LIBVER_LATEST) { error_msg("in parsing low bound\n"); goto done; } - options->low_bound = bound; break; case 'k': - bound = HDatoi(opt_arg); - if (bound < H5F_LIBVER_EARLIEST || bound > H5F_LIBVER_LATEST) { + options->high_bound = (H5F_libver_t)HDatoi(opt_arg); + if (options->high_bound < H5F_LIBVER_EARLIEST || options->high_bound > H5F_LIBVER_LATEST) { error_msg("in parsing high bound\n"); goto done; } - options->high_bound = bound; break; case 'c': diff --git a/tools/test/h5jam/testh5jam.sh.in b/tools/test/h5jam/testh5jam.sh.in index bf705b4..3ae180b 100644 --- a/tools/test/h5jam/testh5jam.sh.in +++ b/tools/test/h5jam/testh5jam.sh.in @@ -474,9 +474,9 @@ UNJAMTEST () { # TOOLTEST_OUTPUT() { - if [ "$1" = "JAM" ]; then + if [ "$1" == "JAM" ]; then TOOLCMD=$JAM_BIN/$JAM - elif [ "$1" = "UNJAM" ]; then + elif [ "$1" == "UNJAM" ]; then TOOLCMD=$JAM_BIN/$UNJAM fi shift diff --git a/tools/test/h5repack/testfiles/h5repack-help.txt b/tools/test/h5repack/testfiles/h5repack-help.txt index 130cd72..5c67541 100644 --- a/tools/test/h5repack/testfiles/h5repack-help.txt +++ b/tools/test/h5repack/testfiles/h5repack-help.txt @@ -61,8 +61,7 @@ usage: h5repack [OPTIONS] file1 file2 1: This is H5F_LIBVER_V18 in H5F_libver_t struct 2: This is H5F_LIBVER_V110 in H5F_libver_t struct 3: This is H5F_LIBVER_V112 in H5F_libver_t struct - 4: This is H5F_LIBVER_V114 in H5F_libver_t struct - (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V114 for this release + (H5F_LIBVER_LATEST is aliased to H5F_LIBVER_V112 for this release FS_STRATEGY is a string indicating the file space strategy used: FSM_AGGR: -- cgit v0.12