summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorM. Scot Breitenfeld <brtnfld@hdfgroup.org>2017-07-27 13:54:44 (GMT)
committerM. Scot Breitenfeld <brtnfld@hdfgroup.org>2017-07-27 13:54:44 (GMT)
commit972beffb59f0f2ec15cd3829b5ae1cbfd8d0793f (patch)
tree0f9e937a75a3581e6de5364aeb3ca737601efa10
parentb7f19967d76f50890ff6ef1a7a756dc37c555538 (diff)
parent634cc5f99d3e46ff615f003087d0a38d9a843898 (diff)
downloadhdf5-972beffb59f0f2ec15cd3829b5ae1cbfd8d0793f.zip
hdf5-972beffb59f0f2ec15cd3829b5ae1cbfd8d0793f.tar.gz
hdf5-972beffb59f0f2ec15cd3829b5ae1cbfd8d0793f.tar.bz2
Merge branch 'develop' of ssh://bitbucket.hdfgroup.org:7999/~brtnfld/hdf5_msb into develop
-rw-r--r--MANIFEST4
-rw-r--r--c++/src/H5IdComponent.cpp4
-rw-r--r--c++/src/H5PropList.cpp18
-rw-r--r--config/cmake_ext_mod/HDFLibMacros.cmake10
-rw-r--r--configure.ac39
-rw-r--r--hl/tools/h5watch/CMakeTests.cmake11
-rw-r--r--java/src/jni/h5Constants.c1
-rw-r--r--m4/aclocal_fc.f902
-rw-r--r--m4/aclocal_fc.m41
-rw-r--r--release_docs/RELEASE.txt45
-rw-r--r--src/H5.c7
-rw-r--r--src/H5Dchunk.c7
-rw-r--r--src/H5Ddeprec.c5
-rw-r--r--src/H5Dint.c14
-rw-r--r--src/H5FD.c279
-rw-r--r--src/H5FSint.c26
-rw-r--r--src/H5FSprivate.h3
-rw-r--r--src/H5Fprivate.h4
-rw-r--r--src/H5I.c4
-rw-r--r--src/H5Ipublic.h36
-rw-r--r--src/H5MF.c2
-rw-r--r--src/H5MFsection.c34
-rw-r--r--src/H5Oflush.c38
-rw-r--r--src/H5Pfcpl.c3
-rw-r--r--src/H5R.c10
-rw-r--r--src/H5Smpio.c504
-rw-r--r--src/H5VMprivate.h6
-rw-r--r--test/CMakeLists.txt1
-rw-r--r--test/CMakeTests.cmake17
-rw-r--r--test/Makefile.am12
-rw-r--r--test/dsets.c97
-rw-r--r--test/fheap.c16
-rw-r--r--test/filenotclosed.c146
-rw-r--r--test/h5test.c100
-rw-r--r--test/h5test.h1
-rw-r--r--test/plugin.c3
-rw-r--r--test/test_filenotclosed.sh.in41
-rw-r--r--test/tfile.c17
-rw-r--r--test/tmisc.c322
-rw-r--r--testpar/CMakeLists.txt1
-rw-r--r--testpar/Makefile.am2
-rw-r--r--testpar/t_bigio.c2153
-rw-r--r--tools/lib/h5diff.c2
-rw-r--r--tools/lib/h5diff_array.c20
-rw-r--r--tools/lib/h5tools.c6
-rw-r--r--tools/lib/h5tools_dump.c6
-rw-r--r--tools/lib/h5tools_ref.c2
-rw-r--r--tools/lib/h5tools_str.c8
-rw-r--r--tools/lib/h5tools_utils.c10
-rw-r--r--tools/lib/io_timer.c7
-rw-r--r--tools/lib/ph5diff.h4
-rw-r--r--tools/src/h5copy/h5copy.c2
-rw-r--r--tools/src/h5diff/h5diff_common.c2
-rw-r--r--tools/src/h5diff/h5diff_main.c3
-rw-r--r--tools/src/h5diff/ph5diff_main.c3
-rw-r--r--tools/src/h5dump/h5dump.c2
-rw-r--r--tools/src/h5dump/h5dump_ddl.c2
-rw-r--r--tools/src/h5dump/h5dump_xml.c2
-rw-r--r--tools/src/h5repack/h5repack.c4
-rw-r--r--tools/src/h5stat/h5stat.c2
-rw-r--r--tools/src/misc/h5mkgrp.c2
-rw-r--r--tools/src/misc/h5repart.c34
-rw-r--r--tools/test/h5copy/h5copygentest.c1
-rw-r--r--tools/test/h5diff/CMakeTests.cmake8
-rw-r--r--tools/test/h5diff/testfiles/h5diff_vlstr.txt16
-rw-r--r--tools/test/h5diff/testh5diff.sh.in5
-rw-r--r--tools/test/h5dump/h5dumpgentest.c1
-rw-r--r--tools/test/h5import/h5importtest.c1
-rw-r--r--tools/test/h5jam/h5jamgentest.c2
-rw-r--r--tools/test/h5jam/tellub.c6
-rw-r--r--tools/test/h5repack/testh5repack_detect_szip.c1
-rw-r--r--tools/test/misc/repart_test.c95
-rw-r--r--tools/test/misc/talign.c3
-rw-r--r--tools/test/perform/CMakeTests.cmake1
74 files changed, 3617 insertions, 692 deletions
diff --git a/MANIFEST b/MANIFEST
index b890352..e5a038d 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -956,6 +956,7 @@
./test/filespace_1_8.h5
./test/filespace_1_6.h5
./test/freespace.c
+./test/filenotclosed.c
./test/file_image.c
./test/file_image_core_test.h5
./test/fill_old.h5
@@ -1057,6 +1058,7 @@
./test/testcheck_version.sh.in
./test/testerror.sh.in
./test/testlinks_env.sh.in
+./test/test_filenotclosed.sh.in
./test/testflushrefresh.sh.in
./test/testframe.c
./test/testhdf5.c
@@ -1220,6 +1222,7 @@
./testpar/COPYING
./testpar/Makefile.am
+./testpar/t_bigio.c
./testpar/t_cache.c
./testpar/t_cache_image.c
./testpar/t_chunk_alloc.c
@@ -2395,6 +2398,7 @@
./tools/test/h5diff/testfiles/h5diff_udfail.txt
./tools/test/h5diff/testfiles/diff_strings1.h5
./tools/test/h5diff/testfiles/diff_strings2.h5
+./tools/test/h5diff/testfiles/h5diff_vlstr.txt
#vds
./tools/test/h5diff/testfiles/h5diff_v1.txt
./tools/test/h5diff/testfiles/h5diff_v2.txt
diff --git a/c++/src/H5IdComponent.cpp b/c++/src/H5IdComponent.cpp
index f9a08cd..284c6c5 100644
--- a/c++/src/H5IdComponent.cpp
+++ b/c++/src/H5IdComponent.cpp
@@ -174,7 +174,7 @@ H5I_type_t IdComponent::getHDFObjType() const
/// \li \c H5I_DATASPACE
/// \li \c H5I_DATASET
/// \li \c H5I_ATTR
-/// \li \c H5I_REFERENCE
+/// \li \c H5I_REFERENCE (DEPRECATED)
/// \li \c H5I_VFL
/// \li \c H5I_GENPROP_CLS
/// \li \c H5I_GENPROP_LST
@@ -227,7 +227,7 @@ bool IdComponent::isValid(hid_t an_id)
/// \li \c H5I_DATASPACE
/// \li \c H5I_DATASET
/// \li \c H5I_ATTR
-/// \li \c H5I_REFERENCE
+/// \li \c H5I_REFERENCE (DEPRECATED)
/// \li \c H5I_VFL
/// \li \c H5I_GENPROP_CLS
/// \li \c H5I_GENPROP_LST
diff --git a/c++/src/H5PropList.cpp b/c++/src/H5PropList.cpp
index dd7b21a..16a6316 100644
--- a/c++/src/H5PropList.cpp
+++ b/c++/src/H5PropList.cpp
@@ -138,6 +138,24 @@ PropList::PropList(const hid_t plist_id) : IdComponent()
throw PropListIException("PropList constructor", "H5Pcopy failed");
}
break;
+ /* These should really be error cases, but changing that breaks
+ * the stated behavior and causes test failures.
+ * (DER, July 2017)
+ */
+ case H5I_BADID:
+ case H5I_FILE:
+ case H5I_GROUP:
+ case H5I_DATATYPE:
+ case H5I_DATASPACE:
+ case H5I_DATASET:
+ case H5I_ATTR:
+ case H5I_REFERENCE:
+ case H5I_VFL:
+ case H5I_ERROR_CLASS:
+ case H5I_ERROR_MSG:
+ case H5I_ERROR_STACK:
+ case H5I_NTYPES:
+ case H5I_UNINIT:
default:
id = H5P_DEFAULT;
break;
diff --git a/config/cmake_ext_mod/HDFLibMacros.cmake b/config/cmake_ext_mod/HDFLibMacros.cmake
index f2e03d7..54e408b 100644
--- a/config/cmake_ext_mod/HDFLibMacros.cmake
+++ b/config/cmake_ext_mod/HDFLibMacros.cmake
@@ -74,14 +74,14 @@ macro (EXTERNAL_JPEG_LIBRARY compress_type jpeg_pic)
HDF_IMPORT_SET_LIB_OPTIONS (jpeg-static "jpeg" STATIC "")
add_dependencies (JPEG jpeg-static)
set (JPEG_STATIC_LIBRARY "jpeg-static")
- set (JPEG_LIBRARIES ${JPEG_static_LIBRARY})
+ set (JPEG_LIBRARIES ${JPEG_STATIC_LIBRARY})
if (BUILD_SHARED_LIBS)
# Create imported target jpeg-shared
add_library(jpeg-shared SHARED IMPORTED)
HDF_IMPORT_SET_LIB_OPTIONS (jpeg-shared "jpeg" SHARED "")
add_dependencies (JPEG jpeg-shared)
set (JPEG_SHARED_LIBRARY "jpeg-shared")
- set (JPEG_LIBRARIES ${JPEG_LIBRARIES} ${JPEG_shared_LIBRARY})
+ set (JPEG_LIBRARIES ${JPEG_LIBRARIES} ${JPEG_SHARED_LIBRARY})
endif ()
set (JPEG_INCLUDE_DIR_GEN "${BINARY_DIR}")
@@ -167,14 +167,14 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding)
HDF_IMPORT_SET_LIB_OPTIONS (szip-static "szip" STATIC "")
add_dependencies (SZIP szip-static)
set (SZIP_STATIC_LIBRARY "szip-static")
- set (SZIP_LIBRARIES ${SZIP_static_LIBRARY})
+ set (SZIP_LIBRARIES ${SZIP_STATIC_LIBRARY})
if (BUILD_SHARED_LIBS)
# Create imported target szip-shared
add_library(szip-shared SHARED IMPORTED)
HDF_IMPORT_SET_LIB_OPTIONS (szip-shared "szip" SHARED "")
add_dependencies (SZIP szip-shared)
set (SZIP_SHARED_LIBRARY "szip-shared")
- set (SZIP_LIBRARIES ${SZIP_LIBRARIES} ${SZIP_shared_LIBRARY})
+ set (SZIP_LIBRARIES ${SZIP_LIBRARIES} ${SZIP_SHARED_LIBRARY})
endif ()
set (SZIP_INCLUDE_DIR_GEN "${BINARY_DIR}")
@@ -262,7 +262,7 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type)
HDF_IMPORT_SET_LIB_OPTIONS (zlib-static ${ZLIB_LIB_NAME} STATIC "")
add_dependencies (ZLIB zlib-static)
set (ZLIB_STATIC_LIBRARY "zlib-static")
- set (ZLIB_LIBRARIES ${ZLIB_static_LIBRARY})
+ set (ZLIB_LIBRARIES ${ZLIB_STATIC_LIBRARY})
if (BUILD_SHARED_LIBS)
# Create imported target zlib-shared
add_library(zlib-shared SHARED IMPORTED)
diff --git a/configure.ac b/configure.ac
index 9695fe3..163f6d3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -585,11 +585,11 @@ if test "X$HDF_FORTRAN" = "Xyes"; then
AC_DEFINE([FORTRAN_HAVE_STORAGE_SIZE], [1], [Define if we have Fortran intrinsic STORAGE_SIZE])
fi
- if test "X$HAVE_C_SIZEOF_FORTRAN" = "Xyes"; then
+ if test "X$HAVE_C_SIZEOF_FORTRAN" = "Xyes"; then
AC_DEFINE([FORTRAN_HAVE_C_SIZEOF], [1], [Define if we have Fortran intrinsic C_SIZEOF])
fi
- if test "X$HAVE_SIZEOF_FORTRAN" = "Xyes"; then
+ if test "X$HAVE_SIZEOF_FORTRAN" = "Xyes"; then
AC_DEFINE([FORTRAN_HAVE_SIZEOF], [1], [Define if we have Fortran intrinsic SIZEOF])
fi
@@ -604,7 +604,7 @@ if test "X$HDF_FORTRAN" = "Xyes"; then
## Is C_LONG_DOUBLE different from C_DOUBLE
FORTRAN_C_LONG_DOUBLE_IS_UNIQUE="0"
- if test "X$FORTRAN_HAVE_C_LONG_DOUBLE"; then
+ if test "$FORTRAN_HAVE_C_LONG_DOUBLE" = "1"; then
PAC_PROG_FC_C_LONG_DOUBLE_EQ_C_DOUBLE
if test "X$C_LONG_DOUBLE_IS_UNIQUE_FORTRAN" = "Xyes"; then
FORTRAN_C_LONG_DOUBLE_IS_UNIQUE="1"
@@ -1251,10 +1251,11 @@ AC_ARG_WITH([fnord],
])
## ----------------------------------------------------------------------
-## Is the dmalloc present? It has a header file `dmalloc.h' and a library
-## `-ldmalloc' and their locations might be specified with the `--with-dmalloc'
-## command-line switch. The value is an include path and/or a library path.
-## If the library path is specified then it must be preceded by a comma.
+## Is dmalloc (debug malloc library) requested? It has a header file
+## `dmalloc.h' and a library `-ldmalloc' and their locations might be
+## specified with the `--with-dmalloc' command-line switch. The value
+## is an include path and/or a library path. If the library path is
+## specified then it must be preceded by a comma.
##
AC_SUBST([HAVE_DMALLOC])
@@ -1940,26 +1941,17 @@ AC_DEFINE_UNQUOTED([PRINTF_LL_WIDTH], ["$hdf5_cv_printf_ll"],
[Width for printf() for type `long long' or `__int64', use `ll'])
## ----------------------------------------------------------------------
-## Deprecate old ways of determining debug/production build
-## These can probably be removed in the future (1.10.1?)
+## Remove old ways of determining debug/production build.
+## These were used in 1.8.x and earlier. We should probably keep these checks
+## around to help people migrate to 1.10.x and newer versions.
##
AC_ARG_ENABLE([debug],
- [AS_HELP_STRING([--enable-debug],
- [DEPRECATED: use --enable-build-mode=debug])],
- [DEPRECATED_DEBUG=$enableval])
-
-if test "X-$DEPRECATED_DEBUG" != "X-" ; then
- AC_MSG_ERROR([--enable-debug is deprecated, use --enable-build-mode=debug instead.])
-fi
+ [AS_HELP_STRING([--enable-debug], [DEPRECATED: use --enable-build-mode=debug])],
+ [AC_MSG_ERROR([--enable-debug is deprecated, use --enable-build-mode=debug instead.])])
AC_ARG_ENABLE([production],
- [AS_HELP_STRING([--enable-production],
- [DEPRECATED: use --enable-build-mode=production])],
- [DEPRECATED_PRODUCTION=$enableval])
-
-if test "X-$DEPRECATED_PRODUCTION" != "X-" ; then
- AC_MSG_ERROR([--enable-production is deprecated, use --enable-build-mode=production instead.])
-fi
+ [AS_HELP_STRING([--enable-production], [DEPRECATED: use --enable-build-mode=production])],
+ [AC_MSG_ERROR([--enable-production is deprecated, use --enable-build-mode=production instead.])])
## ----------------------------------------------------------------------
@@ -3361,6 +3353,7 @@ AC_CONFIG_FILES([src/libhdf5.settings
test/H5srcdir_str.h
test/testlibinfo.sh
test/testlinks_env.sh
+ test/test_filenotclosed.sh
test/testswmr.sh
test/test_plugin.sh
test/test_usecases.sh
diff --git a/hl/tools/h5watch/CMakeTests.cmake b/hl/tools/h5watch/CMakeTests.cmake
index 35e7829..0b7b4d4 100644
--- a/hl/tools/h5watch/CMakeTests.cmake
+++ b/hl/tools/h5watch/CMakeTests.cmake
@@ -56,18 +56,11 @@ set (H5WATCH_TEST_FILES
# make test dir
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
-add_custom_target(h5watch-files ALL COMMENT "Copying files needed by h5watch tests")
foreach (h5watch_file ${H5WATCH_TEST_FILES})
- set (dest "${PROJECT_BINARY_DIR}/testfiles/${h5watch_file}")
- #message (STATUS " Copying ${h5watch_file}")
- add_custom_command (
- TARGET h5watch-files
- POST_BUILD
- COMMAND ${CMAKE_COMMAND}
- ARGS -E copy_if_different ${HDF5_HL_TOOLS_DIR}/testfiles/${h5watch_file} ${dest}
- )
+ HDFTEST_COPY_FILE("${HDF5_HL_TOOLS_DIR}/testfiles/${h5watch_file}" "${PROJECT_BINARY_DIR}/testfiles/${h5watch_file}" "H5WATCH_files")
endforeach ()
+add_custom_target(H5WATCH_files ALL COMMENT "Copying files needed by H5WATCH tests" DEPENDS ${H5WATCH_files_list})
##############################################################################
##############################################################################
diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c
index d4511e1..f6f8bfa 100644
--- a/java/src/jni/h5Constants.c
+++ b/java/src/jni/h5Constants.c
@@ -26,6 +26,7 @@ extern "C" {
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wmissing-prototypes"
+#pragma GCC diagnostic ignored "-Wunused-parameter"
JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1QUARTER_1HADDR_1MAX(JNIEnv *env, jclass cls) { return (hsize_t)HADDR_MAX/4; }
diff --git a/m4/aclocal_fc.f90 b/m4/aclocal_fc.f90
index 4c58e4e..664a3c6 100644
--- a/m4/aclocal_fc.f90
+++ b/m4/aclocal_fc.f90
@@ -58,7 +58,7 @@ END PROGRAM PROG_FC_HAVE_F2003_REQUIREMENTS
!---- START ----- Check to see C_LONG_DOUBLE is different from C_DOUBLE
MODULE type_mod
USE ISO_C_BINDING
- INTERFACE h5t
+ INTERFACE h5t
MODULE PROCEDURE h5t_c_double
MODULE PROCEDURE h5t_c_long_double
END INTERFACE
diff --git a/m4/aclocal_fc.m4 b/m4/aclocal_fc.m4
index 0bf3cb1..ab45bbc 100644
--- a/m4/aclocal_fc.m4
+++ b/m4/aclocal_fc.m4
@@ -476,4 +476,3 @@ rm -f pac_Cconftest.out
],[])
])
-
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index 20d58b3..cd29900 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -62,7 +62,29 @@ New Features
Parallel Library:
-----------------
- -
+ - Large MPI-IO transfers
+
+ Previous releases of PHDF5 would fail when attempting to
+ read or write greater than 2GB of data in a single IO operation.
+ This issue stems principally from an MPI API whose definitions
+ utilize 32 bit integers to describe the number of data elements
+ and datatype that MPI should use to effect a data transfer.
+ Historically, HDF5 has invoked MPI-IO with the number of
+ elements in a contiguous buffer represented as the length
+ of that buffer in bytes.
+
+ Resolving the issue and thus enabling larger MPI-IO transfers
+ is accomplished first, by detecting when a user IO request would
+ exceed the 2GB limit as described above. Once a transfer request
+ is identified as requiring special handling, PHDF5 now creates a
+ derived datatype consisting of a vector of fixed sized blocks
+ which is in turn wrapped within a single MPI_Type_struct to
+ contain the vector and any remaining data. The newly created
+ datatype is then used in place of MPI_BYTE and can be used to
+ fulfill the original user request without encountering API
+ errors.
+
+ (RAW – 2017/07/11, HDFFV-8839)
Fortran Library:
----------------
@@ -105,7 +127,15 @@ Bug Fixes since HDF5-1.10.1 release
Configuration
-------------
- -
+ - cmake
+
+ Too many commands for POST_BUILD step caused command line to be
+ too big on windows.
+
+ Changed foreach of copy command to use a custom command with the
+ use of the HDFTEST_COPY_FILE macro.
+
+ (ADB - 2017/07/12, HDFFV-10254)
Performance
-------------
@@ -117,6 +147,15 @@ Bug Fixes since HDF5-1.10.1 release
Tools
-----
+ - h5diff
+
+ h5diff segfaulted on compare of a NULL variable length string.
+
+ Improved h5diff compare of strings by adding a check for
+ NULL strings and setting the lengths to zero.
+
+ (ADB - 2017/07/25, HDFFV-10246)
+
- h5import
h5import crashed trying to import data from a subset of a dataset.
@@ -127,7 +166,7 @@ Bug Fixes since HDF5-1.10.1 release
The import from h5dump function expects the binary files to use native
types (FILE '-b' option) in the binary file.
- (ADB - 2017/06/15, HDFFV-102191)
+ (ADB - 2017/06/15, HDFFV-10219)
- h5repack
diff --git a/src/H5.c b/src/H5.c
index 1068fc6..59984dc 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -29,6 +29,7 @@
#include "H5Pprivate.h" /* Property lists */
#include "H5SLprivate.h" /* Skip lists */
#include "H5Tprivate.h" /* Datatypes */
+#include "H5FSprivate.h" /* File free space */
/****************/
/* Local Macros */
@@ -204,6 +205,10 @@ H5_init_library(void)
* property classes.
* The link interface needs to be initialized so that link property lists
* have their properties registered.
+ * The FS module needs to be initialized as a result of the fix for HDFFV-10160:
+ * It might not be initialized during normal file open.
+ * When the application does not close the file, routines in the module might
+ * be called via H5_term_library() when shutting down the file.
*/
if(H5E_init() < 0)
HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL, "unable to initialize error interface")
@@ -217,6 +222,8 @@ H5_init_library(void)
HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL, "unable to initialize metadata caching interface")
if(H5L_init() < 0)
HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL, "unable to initialize link interface")
+ if(H5FS_init() < 0)
+ HGOTO_ERROR(H5E_FUNC, H5E_CANTINIT, FAIL, "unable to initialize FS interface")
/* Debugging? */
H5_debug_mask("-all");
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index d693466..b7b8b03 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -1019,11 +1019,16 @@ H5D__chunk_init(H5F_t *f, hid_t dxpl_id, const H5D_t *dset, hid_t dapl_id)
unsigned u; /* Local index value */
for(u = 0; u < dset->shared->ndims; u++) {
+ hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
+
/* Initial scaled dimension sizes */
rdcc->scaled_dims[u] = dset->shared->curr_dims[u] / dset->shared->layout.u.chunk.dim[u];
+ if( !(scaled_power2up = H5VM_power2up(rdcc->scaled_dims[u])) )
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2")
+
/* Inital 'power2up' values for scaled dimensions */
- rdcc->scaled_power2up[u] = H5VM_power2up(rdcc->scaled_dims[u]);
+ rdcc->scaled_power2up[u] = scaled_power2up;
/* Number of bits required to encode scaled dimension size */
rdcc->scaled_encode_bits[u] = H5VM_log2_gen(rdcc->scaled_power2up[u]);
diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c
index 8d9461c..0807048 100644
--- a/src/H5Ddeprec.c
+++ b/src/H5Ddeprec.c
@@ -329,8 +329,11 @@ H5D__extend(H5D_t *dataset, const hsize_t *size, hid_t dxpl_id)
dataset->shared->cache.chunk.scaled_dims[u] > dataset->shared->cache.chunk.nslots))
update_chunks = TRUE;
+ if( !(scaled_power2up = H5VM_power2up(scaled)) )
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2")
+
/* Check if the number of bits required to encode the scaled size value changed */
- if(dataset->shared->cache.chunk.scaled_power2up[u] != (scaled_power2up = H5VM_power2up(scaled))) {
+ if(dataset->shared->cache.chunk.scaled_power2up[u] != scaled_power2up) {
/* Update the 'power2up' & 'encode_bits' values for the current dimension */
dataset->shared->cache.chunk.scaled_power2up[u] = scaled_power2up;
dataset->shared->cache.chunk.scaled_encode_bits[u] = H5VM_log2_gen(scaled_power2up);
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 08b3eb8..3b938e2 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -745,8 +745,13 @@ H5D__cache_dataspace_info(const H5D_t *dset)
dset->shared->ndims = (unsigned)sndims;
/* Compute the inital 'power2up' values */
- for(u = 0; u < dset->shared->ndims; u++)
- dset->shared->curr_power2up[u] = H5VM_power2up(dset->shared->curr_dims[u]);
+ for(u = 0; u < dset->shared->ndims; u++) {
+ hsize_t scaled_power2up; /* Scaled value, rounded to next power of 2 */
+
+ if( !(scaled_power2up = H5VM_power2up(dset->shared->curr_dims[u])) )
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2")
+ dset->shared->curr_power2up[u] = scaled_power2up;
+ }
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2809,8 +2814,11 @@ H5D__set_extent(H5D_t *dset, const hsize_t *size, hid_t dxpl_id)
dset->shared->cache.chunk.scaled_dims[u] > dset->shared->cache.chunk.nslots))
update_chunks = TRUE;
+ if( !(scaled_power2up = H5VM_power2up(scaled)) )
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "unable to get the next power of 2")
+
/* Check if the number of bits required to encode the scaled size value changed */
- if(dset->shared->cache.chunk.scaled_power2up[u] != (scaled_power2up = H5VM_power2up(scaled))) {
+ if(dset->shared->cache.chunk.scaled_power2up[u] != scaled_power2up) {
/* Update the 'power2up' & 'encode_bits' values for the current dimension */
dset->shared->cache.chunk.scaled_power2up[u] = scaled_power2up;
dset->shared->cache.chunk.scaled_encode_bits[u] = H5VM_log2_gen(scaled_power2up);
diff --git a/src/H5FD.c b/src/H5FD.c
index 67cf963..9f62065 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -12,14 +12,11 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Robb Matzke <matzke@llnl.gov>
- * Monday, July 26, 1999
- *
- * Purpose: The Virtual File Layer as described in documentation.
- * This is the greatest common denominator for all types of
- * storage access whether a file, memory, network, etc. This
- * layer usually just dispatches the request to an actual
- * file driver layer.
+ * Purpose: The Virtual File Layer as described in documentation.
+ * This is the greatest common denominator for all types of
+ * storage access whether a file, memory, network, etc. This
+ * layer usually just dispatches the request to an actual
+ * file driver layer.
*/
/****************/
@@ -112,9 +109,6 @@ static const H5I_class_t H5I_VFL_CLS[1] = {{
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Monday, July 26, 1999
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -147,9 +141,6 @@ done:
* otherwise.
* Failure: Never fails.
*
- * Programmer: Robb Matzke
- * Friday, February 19, 1999
- *
*-------------------------------------------------------------------------
*/
int
@@ -189,11 +180,6 @@ H5FD_term_package(void)
*
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Monday, July 26, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -221,26 +207,18 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5FDregister
- *
- * Purpose: Registers a new file driver as a member of the virtual file
- * driver class. Certain fields of the class struct are
- * required and that is checked here so it doesn't have to be
- * checked every time the field is accessed.
- *
- * Return: Success: A file driver ID which is good until the
- * library is closed or the driver is
- * unregistered.
+ * Function: H5FDregister
*
- * Failure: A negative value.
+ * Purpose: Registers a new file driver as a member of the virtual file
+ * driver class. Certain fields of the class struct are
+ * required and that is checked here so it doesn't have to be
+ * checked every time the field is accessed.
*
- * Programmer: Robb Matzke
- * Monday, July 26, 1999
+ * Return: Success: A file driver ID which is good until the
+ * library is closed or the driver is
+ * unregistered.
*
- * Modifications:
- * Copied guts of function into H5FD_register
- * Quincey Koziol
- * Friday, January 30, 2004
+ * Failure: A negative value.
*
*-------------------------------------------------------------------------
*/
@@ -255,18 +233,18 @@ H5FDregister(const H5FD_class_t *cls)
/* Check arguments */
if(!cls)
- HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "null class pointer is disallowed")
+ HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "null class pointer is disallowed")
if(!cls->open || !cls->close)
- HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "`open' and/or `close' methods are not defined")
+ HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "`open' and/or `close' methods are not defined")
if(!cls->get_eoa || !cls->set_eoa)
- HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "`get_eoa' and/or `set_eoa' methods are not defined")
+ HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "`get_eoa' and/or `set_eoa' methods are not defined")
if(!cls->get_eof)
- HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "`get_eof' method is not defined")
+ HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "`get_eof' method is not defined")
if(!cls->read || !cls->write)
- HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "`read' and/or `write' method is not defined")
- for (type=H5FD_MEM_DEFAULT; type<H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t,type))
- if(cls->fl_map[type]<H5FD_MEM_NOLIST || cls->fl_map[type]>=H5FD_MEM_NTYPES)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid free-list mapping")
+ HGOTO_ERROR(H5E_ARGS, H5E_UNINITIALIZED, FAIL, "`read' and/or `write' method is not defined")
+ for (type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t,type))
+ if(cls->fl_map[type] < H5FD_MEM_NOLIST || cls->fl_map[type] >= H5FD_MEM_NTYPES)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid free-list mapping")
/* Create the new class ID */
if((ret_value=H5FD_register(cls, sizeof(H5FD_class_t), TRUE)) < 0)
@@ -278,29 +256,18 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5FD_register
- *
- * Purpose: Registers a new file driver as a member of the virtual file
- * driver class. Certain fields of the class struct are
- * required and that is checked here so it doesn't have to be
- * checked every time the field is accessed.
- *
- * Return: Success: A file driver ID which is good until the
- * library is closed or the driver is
- * unregistered.
+ * Function: H5FD_register
*
- * Failure: A negative value.
+ * Purpose: Registers a new file driver as a member of the virtual file
+ * driver class. Certain fields of the class struct are
+ * required and that is checked here so it doesn't have to be
+ * checked every time the field is accessed.
*
- * Programmer: Robb Matzke
- * Monday, July 26, 1999
+ * Return: Success: A file driver ID which is good until the
+ * library is closed or the driver is
+ * unregistered.
*
- * Modifications:
- * Broke into public and internal routines & added 'size'
- * parameter to internal routine, which allows us to create
- * sub-classes of H5FD_class_t for internal support (see the
- * MPI drivers, etc.)
- * Quincey Koziol
- * January 30, 2004
+ * Failure: A negative value.
*
*-------------------------------------------------------------------------
*/
@@ -325,7 +292,7 @@ H5FD_register(const void *_cls, size_t size, hbool_t app_ref)
/* Copy the class structure so the caller can reuse or free it */
if(NULL == (saved = (H5FD_class_t *)H5MM_malloc(size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for file driver class struct")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for file driver class struct")
HDmemcpy(saved, cls, size);
/* Create the new class ID */
@@ -342,19 +309,16 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5FDunregister
- *
- * Purpose: Removes a driver ID from the library. This in no way affects
- * file access property lists which have been defined to use
- * this driver or files which are already opened under this
- * driver.
+ * Function: H5FDunregister
*
- * Return: Success: Non-negative
+ * Purpose: Removes a driver ID from the library. This in no way affects
+ * file access property lists which have been defined to use
+ * this driver or files which are already opened under this
+ * driver.
*
- * Failure: Negative
+ * Return: Success: Non-negative
*
- * Programmer: Robb Matzke
- * Monday, July 26, 1999
+ * Failure: Negative
*
*-------------------------------------------------------------------------
*/
@@ -394,9 +358,6 @@ done:
*
* Failure: NULL
*
- * Programmer: Robb Matzke
- * Friday, August 20, 1999
- *
*-------------------------------------------------------------------------
*/
H5FD_class_t *
@@ -442,11 +403,6 @@ done:
* Failure: 0 if an error occurs or if the driver has no
* data to store in the superblock.
*
- * Programmer: Robb Matzke
- * Monday, August 16, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
hsize_t
@@ -480,11 +436,6 @@ done:
*
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Monday, August 16, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -511,9 +462,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Monday, August 16, 1999
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -542,9 +490,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Quincey Koziol
- * Friday, July 19, 2013
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -592,9 +537,6 @@ done:
* Failure: NULL, including when the file has no
* properties.
*
- * Programmer: Robb Matzke
- * Friday, August 13, 1999
- *
*-------------------------------------------------------------------------
*/
void *
@@ -622,9 +564,6 @@ done:
* Return: Success: non-negative
* Failure: negative
*
- * Programmer: Robb Matzke
- * Tuesday, August 3, 1999
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -706,11 +645,6 @@ done:
*
* Failure: NULL
*
- * Programmer: Robb Matzke
- * Tuesday, July 27, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
H5FD_t *
@@ -745,16 +679,6 @@ done:
*
* Failure: NULL
*
- * Programmer: Robb Matzke
- * Wednesday, August 4, 1999
- *
- * Modifications:
- *
- * Raymond Lu
- * Tuesday, Oct 23, 2001
- * Changed the file access list to the new generic property
- * list.
- *
*-------------------------------------------------------------------------
*/
H5FD_t *
@@ -861,9 +785,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Tuesday, July 27, 1999
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -893,9 +814,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Wednesday, August 4, 1999
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -944,11 +862,6 @@ done:
* comparison callback then the file pointers
* themselves are compared.
*
- * Programmer: Robb Matzke
- * Tuesday, July 27, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
int
@@ -975,11 +888,6 @@ done:
*
* Failure: Must never fail.
*
- * Programmer: Robb Matzke
- * Wednesday, August 4, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
int
@@ -1025,11 +933,6 @@ done:
*
* Failure: negative
*
- * Programmer: Quincey Koziol
- * Friday, August 25, 2000
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
int
@@ -1059,9 +962,6 @@ done:
*
* Failure: negative
*
- * Programmer: Quincey Koziol
- * Friday, August 25, 2000
- *
*-------------------------------------------------------------------------
*/
static int
@@ -1118,9 +1018,6 @@ H5FD_query(const H5FD_t *f, unsigned long *flags/*out*/)
*
* Failure: The undefined address HADDR_UNDEF
*
- * Programmer: Robb Matzke
- * Tuesday, July 27, 1999
- *
*-------------------------------------------------------------------------
*/
haddr_t
@@ -1170,11 +1067,6 @@ done:
*
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Wednesday, July 28, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1207,16 +1099,13 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5FDget_eoa
- *
- * Purpose: Returns the address of the first byte after the last
- * allocated memory in the file.
+ * Function: H5FDget_eoa
*
- * Return: Success: First byte after allocated memory.
- * Failure: HADDR_UNDEF
+ * Purpose: Returns the address of the first byte after the last
+ * allocated memory in the file.
*
- * Programmer: Robb Matzke
- * Friday, July 30, 1999
+ * Return: Success: First byte after allocated memory.
+ * Failure: HADDR_UNDEF
*
*-------------------------------------------------------------------------
*/
@@ -1230,13 +1119,13 @@ H5FDget_eoa(H5FD_t *file, H5FD_mem_t type)
/* Check args */
if(!file || !file->cls)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, HADDR_UNDEF, "invalid file pointer")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, HADDR_UNDEF, "invalid file pointer")
if(type < H5FD_MEM_DEFAULT || type >= H5FD_MEM_NTYPES)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, HADDR_UNDEF, "invalid file type")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, HADDR_UNDEF, "invalid file type")
/* The real work */
if(HADDR_UNDEF == (ret_value = H5FD_get_eoa(file, type)))
- HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, HADDR_UNDEF, "file get eoa request failed")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, HADDR_UNDEF, "file get eoa request failed")
/* (Note compensating for base address subtraction in internal routine) */
ret_value += file->base_addr;
@@ -1265,9 +1154,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative, no side effect
*
- * Programmer: Robb Matzke
- * Friday, July 30, 1999
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1280,16 +1166,16 @@ H5FDset_eoa(H5FD_t *file, H5FD_mem_t type, haddr_t addr)
/* Check args */
if(!file || !file->cls)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file pointer")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file pointer")
if(type < H5FD_MEM_DEFAULT || type >= H5FD_MEM_NTYPES)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file type")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file type")
if(!H5F_addr_defined(addr) || addr > file->maxaddr)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid end-of-address value")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid end-of-address value")
/* The real work */
/* (Note compensating for base address addition in internal routine) */
if(H5FD_set_eoa(file, type, addr - file->base_addr) < 0)
- HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "file set eoa request failed")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "file set eoa request failed")
done:
FUNC_LEAVE_API(ret_value)
@@ -1315,11 +1201,6 @@ done:
*
* Failure: HADDR_UNDEF
*
- * Programmer: Robb Matzke
- * Thursday, July 29, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
haddr_t
@@ -1332,11 +1213,11 @@ H5FDget_eof(H5FD_t *file, H5FD_mem_t type)
/* Check arguments */
if(!file || !file->cls)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, HADDR_UNDEF, "invalid file pointer")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, HADDR_UNDEF, "invalid file pointer")
/* The real work */
if(HADDR_UNDEF == (ret_value = H5FD_get_eof(file, type)))
- HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, HADDR_UNDEF, "file get eof request failed")
+ HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, HADDR_UNDEF, "file get eof request failed")
/* (Note compensating for base address subtraction in internal routine) */
ret_value += file->base_addr;
@@ -1354,9 +1235,6 @@ done:
* Return: Success: The maximum address allowed in the file.
* Failure: HADDR_UNDEF
*
- * Programmer: Quincey Koziol
- * Thursday, January 3, 2008
- *
*-------------------------------------------------------------------------
*/
haddr_t
@@ -1384,9 +1262,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Quincey Koziol
- * Tuesday, January 8, 2008
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1412,8 +1287,6 @@ H5FD_get_feature_flags(const H5FD_t *file, unsigned long *feature_flags)
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Vailin Choi; Oct 2013
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1438,9 +1311,6 @@ H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags)
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Quincey Koziol
- * Thursday, January 17, 2008
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1483,9 +1353,6 @@ done:
*
* Failure: Negative. The contents of BUF is undefined.
*
- * Programmer: Robb Matzke
- * Thursday, July 29, 1999
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1548,9 +1415,6 @@ done:
*
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Thursday, July 29, 1999
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1613,10 +1477,6 @@ done:
* Programmer: Robb Matzke
* Thursday, July 29, 1999
*
- * Modifications:
- * Quincey Koziol, May 20, 2002
- * Added 'closing' parameter
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1653,9 +1513,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Robb Matzke
- * Wednesday, August 4, 1999
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1683,9 +1540,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Quincey Koziol
- * Thursday, January 31, 2008
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1722,9 +1576,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Quincey Koziol
- * Thursday, January 31, 2008
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1752,8 +1603,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Vailin Choi; March 2015
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1786,8 +1635,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Vailin Choi; May 2013
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1815,8 +1662,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Vailin Choi; March 2015
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1848,8 +1693,6 @@ done:
* Return: Success: Non-negative
* Failure: Negative
*
- * Programmer: Vailin Choi; May 2013
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1878,9 +1721,6 @@ done:
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
- * March 27, 2002
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -1909,8 +1749,6 @@ H5FD_get_fileno(const H5FD_t *file, unsigned long *filenum)
* Programmer: Raymond Lu
* Sep. 16, 2002
*
- * Modifications:
- *
*--------------------------------------------------------------------------
*/
herr_t
@@ -1972,9 +1810,6 @@ done:
*
* Return: Non-negative if succeed; negative if fails.
*
- * Programmer: Quincey Koziol
- * Jan. 17, 2008
- *
*--------------------------------------------------------------------------
*/
herr_t
@@ -2000,9 +1835,6 @@ H5FD_set_base_addr(H5FD_t *file, haddr_t base_addr)
* Return: Success: The absolute base address of the file
* Failure: The undefined address (HADDR_UNDEF)
*
- * Programmer: Quincey Koziol
- * Sept. 10, 2009
- *
*--------------------------------------------------------------------------
*/
haddr_t
@@ -2024,8 +1856,6 @@ H5FD_get_base_addr(const H5FD_t *file)
*
* Return: Non-negative if succeed; negative if fails.
*
- * Programmer: Vailin Choi; April 2013
- *
*--------------------------------------------------------------------------
*/
herr_t
@@ -2052,9 +1882,6 @@ H5FD_set_paged_aggr(H5FD_t *file, hbool_t paged)
*
* Return: SUCCEED/FAIL
*
-* Programmer: Jacob Gruber
-* Wednesday, August 17, 2011
-*
*-------------------------------------------------------------------------
*/
herr_t
diff --git a/src/H5FSint.c b/src/H5FSint.c
index 1a41172..4297291 100644
--- a/src/H5FSint.c
+++ b/src/H5FSint.c
@@ -77,6 +77,32 @@
/*******************/
+/*-------------------------------------------------------------------------
+ * Function: H5FS_init
+ *
+ * Purpose: Initialize the interface in case it is unable to initialize
+ * itself soon enough.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Saturday, March 4, 2000
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FS_init(void)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+ /* FUNC_ENTER() does all the work */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS_init() */
+
/*-------------------------------------------------------------------------
* Function: H5FS__create_flush_depend
diff --git a/src/H5FSprivate.h b/src/H5FSprivate.h
index c0467a6..247d75c 100644
--- a/src/H5FSprivate.h
+++ b/src/H5FSprivate.h
@@ -175,6 +175,9 @@ H5FL_SEQ_EXTERN(H5FS_section_class_t);
/* Library-private Function Prototypes */
/***************************************/
+/* Package initialization routine */
+H5_DLL herr_t H5FS_init(void);
+
/* Free space manager routines */
H5_DLL H5FS_t *H5FS_create(H5F_t *f, hid_t dxpl_id, haddr_t *fs_addr,
const H5FS_create_t *fs_create, uint16_t nclasses,
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index 6f68a62..eba48de 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -549,9 +549,11 @@
#define H5F_FILE_SPACE_PAGE_SIZE_DEF 4096
/* For paged aggregation: minimum value for file space page size */
#define H5F_FILE_SPACE_PAGE_SIZE_MIN 512
+/* For paged aggregation: maxiumum value for file space page size: 1 gigabyte */
+#define H5F_FILE_SPACE_PAGE_SIZE_MAX 1024*1024*1024
/* For paged aggregation: drop free-space with size <= this threshold for small meta section */
-#define H5F_FILE_SPACE_PGEND_META_THRES 10
+#define H5F_FILE_SPACE_PGEND_META_THRES 0
/* Default for threshold for alignment (can be set via H5Pset_alignment()) */
#define H5F_ALIGN_DEF 1
diff --git a/src/H5I.c b/src/H5I.c
index 42edf58..b8e47a2 100644
--- a/src/H5I.c
+++ b/src/H5I.c
@@ -451,9 +451,9 @@ H5I_nmembers(H5I_type_t type)
FUNC_ENTER_NOAPI(FAIL)
if(type <= H5I_BADID || type >= H5I_next_type)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid type number")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid type number")
if(NULL == (type_ptr = H5I_id_type_list_g[type]) || type_ptr->init_count <= 0)
- HGOTO_DONE(0);
+ HGOTO_DONE(0);
/* Set return value */
H5_CHECKED_ASSIGN(ret_value, int64_t, type_ptr->id_count, uint64_t);
diff --git a/src/H5Ipublic.h b/src/H5Ipublic.h
index 896f82f..c737bbe 100644
--- a/src/H5Ipublic.h
+++ b/src/H5Ipublic.h
@@ -28,26 +28,28 @@
* fail otherwise).
*
* When adding types here, add a section to the 'misc19' test in test/tmisc.c
- * to verify that the H5I{inc|dec|get}_ref() routines work correctly with in.
+ * to verify that the H5I{inc|dec|get}_ref() routines work correctly with it.
*
+ * NOTE: H5I_REFERENCE is not used by the library and has been deprecated
+ * with a tentative removal version of 1.12.0. (DER, July 2017)
*/
typedef enum H5I_type_t {
- H5I_UNINIT = (-2), /*uninitialized type */
- H5I_BADID = (-1), /*invalid Type */
- H5I_FILE = 1, /*type ID for File objects */
- H5I_GROUP, /*type ID for Group objects */
- H5I_DATATYPE, /*type ID for Datatype objects */
- H5I_DATASPACE, /*type ID for Dataspace objects */
- H5I_DATASET, /*type ID for Dataset objects */
- H5I_ATTR, /*type ID for Attribute objects */
- H5I_REFERENCE, /*type ID for Reference objects */
- H5I_VFL, /*type ID for virtual file layer */
- H5I_GENPROP_CLS, /*type ID for generic property list classes */
- H5I_GENPROP_LST, /*type ID for generic property lists */
- H5I_ERROR_CLASS, /*type ID for error classes */
- H5I_ERROR_MSG, /*type ID for error messages */
- H5I_ERROR_STACK, /*type ID for error stacks */
- H5I_NTYPES /*number of library types, MUST BE LAST! */
+ H5I_UNINIT = (-2), /* uninitialized type */
+ H5I_BADID = (-1), /* invalid Type */
+ H5I_FILE = 1, /* type ID for File objects */
+ H5I_GROUP, /* type ID for Group objects */
+ H5I_DATATYPE, /* type ID for Datatype objects */
+ H5I_DATASPACE, /* type ID for Dataspace objects */
+ H5I_DATASET, /* type ID for Dataset objects */
+ H5I_ATTR, /* type ID for Attribute objects */
+ H5I_REFERENCE, /* *DEPRECATED* type ID for Reference objects */
+ H5I_VFL, /* type ID for virtual file layer */
+ H5I_GENPROP_CLS, /* type ID for generic property list classes */
+ H5I_GENPROP_LST, /* type ID for generic property lists */
+ H5I_ERROR_CLASS, /* type ID for error classes */
+ H5I_ERROR_MSG, /* type ID for error messages */
+ H5I_ERROR_STACK, /* type ID for error stacks */
+ H5I_NTYPES /* number of library types, MUST BE LAST! */
} H5I_type_t;
/* Type of atoms to return to users */
diff --git a/src/H5MF.c b/src/H5MF.c
index e54d809..d7af56a 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -1519,7 +1519,7 @@ H5MF_try_shrink(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, haddr_t addr,
H5AC_ring_t fsm_ring = H5AC_RING_INV; /* Ring of fsm */
H5F_mem_page_t fs_type; /* Free space type */
hbool_t reset_ring = FALSE; /* Whether the ring was set */
- htri_t ret_value = FAIL; /* Return value */
+ htri_t ret_value = FALSE; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
#ifdef H5MF_ALLOC_DEBUG
diff --git a/src/H5MFsection.c b/src/H5MFsection.c
index 02fc2d9..02e3218 100644
--- a/src/H5MFsection.c
+++ b/src/H5MFsection.c
@@ -130,27 +130,27 @@ H5FS_section_class_t H5MF_FSPACE_SECT_CLS_SIMPLE[1] = {{
/* Class info for "small" free space sections */
H5FS_section_class_t H5MF_FSPACE_SECT_CLS_SMALL[1] = {{
/* Class variables */
- H5MF_FSPACE_SECT_SMALL, /* Section type */
- 0, /* Extra serialized size */
- H5FS_CLS_MERGE_SYM | H5FS_CLS_ADJUST_OK, /* Class flags */
- NULL, /* Class private info */
+ H5MF_FSPACE_SECT_SMALL, /* Section type */
+ 0, /* Extra serialized size */
+ H5FS_CLS_MERGE_SYM | H5FS_CLS_ADJUST_OK, /* Class flags */
+ NULL, /* Class private info */
/* Class methods */
- NULL, /* Initialize section class */
- NULL, /* Terminate section class */
+ NULL, /* Initialize section class */
+ NULL, /* Terminate section class */
/* Object methods */
- H5MF_sect_small_add, /* Add section */
- NULL, /* Serialize section */
+ H5MF_sect_small_add, /* Add section */
+ NULL, /* Serialize section */
H5MF_sect_deserialize, /* Deserialize section */
- H5MF_sect_small_can_merge, /* Can sections merge? */
- H5MF_sect_small_merge, /* Merge sections */
- H5MF_sect_small_can_shrink, /* Can section shrink container?*/
- H5MF_sect_small_shrink, /* Shrink container w/section */
- H5MF_sect_free, /* Free section */
- H5MF_sect_valid, /* Check validity of section */
- H5MF_sect_split, /* Split section node for alignment */
- NULL, /* Dump debugging for section */
+ H5MF_sect_small_can_merge, /* Can sections merge? */
+ H5MF_sect_small_merge, /* Merge sections */
+ NULL, /* Can section shrink container?*/
+ NULL, /* Shrink container w/section */
+ H5MF_sect_free, /* Free section */
+ H5MF_sect_valid, /* Check validity of section */
+ H5MF_sect_split, /* Split section node for alignment */
+ NULL, /* Dump debugging for section */
}};
/* Class info for "large" free space sections */
@@ -674,7 +674,7 @@ HDfprintf(stderr, "%s: Entering, section {%a, %Hu}\n", FUNC, (*sect)->sect_info.
HDfprintf(stderr, "%s: section is dropped\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG_MORE */
} /* end if */
- /* Adjust the section if it is not at page end but its size + pgend threshold is at page end */
+ /* Adjust the section if it is not at page end but its size + prem is at page end */
else
if(prem <= H5F_PGEND_META_THRES(udata->f)) {
(*sect)->sect_info.size += prem;
diff --git a/src/H5Oflush.c b/src/H5Oflush.c
index 2d93221..9764f56 100644
--- a/src/H5Oflush.c
+++ b/src/H5Oflush.c
@@ -370,40 +370,40 @@ H5O_refresh_metadata_reopen(hid_t oid, H5G_loc_t *obj_loc, hid_t dxpl_id, hbool_
type = H5I_get_type(oid);
switch(type) {
- case(H5I_GROUP):
+ case H5I_GROUP:
/* Re-open the group */
if(NULL == (object = H5G_open(obj_loc, dxpl_id)))
HGOTO_ERROR(H5E_SYM, H5E_CANTOPENOBJ, FAIL, "unable to open group")
break;
- case(H5I_DATATYPE):
+ case H5I_DATATYPE:
/* Re-open the named datatype */
if(NULL == (object = H5T_open(obj_loc, dxpl_id)))
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTOPENOBJ, FAIL, "unable to open named datatype")
break;
- case(H5I_DATASET):
+ case H5I_DATASET:
/* Re-open the dataset */
if(NULL == (object = H5D_open(obj_loc, H5P_DATASET_ACCESS_DEFAULT, dxpl_id)))
HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to open dataset")
- if(!start_swmr) /* No need to handle multiple opens when H5Fstart_swmr_write() */
- if(H5D_mult_refresh_reopen((H5D_t *)object, dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to finish refresh for dataset")
+ if(!start_swmr) /* No need to handle multiple opens when H5Fstart_swmr_write() */
+ if(H5D_mult_refresh_reopen((H5D_t *)object, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTOPENOBJ, FAIL, "unable to finish refresh for dataset")
break;
- case(H5I_UNINIT):
- case(H5I_BADID):
- case(H5I_FILE):
- case(H5I_DATASPACE):
- case(H5I_ATTR):
- case(H5I_REFERENCE):
- case(H5I_VFL):
- case(H5I_GENPROP_CLS):
- case(H5I_GENPROP_LST):
- case(H5I_ERROR_CLASS):
- case(H5I_ERROR_MSG):
- case(H5I_ERROR_STACK):
- case(H5I_NTYPES):
+ case H5I_UNINIT:
+ case H5I_BADID:
+ case H5I_FILE:
+ case H5I_DATASPACE:
+ case H5I_ATTR:
+ case H5I_REFERENCE:
+ case H5I_VFL:
+ case H5I_GENPROP_CLS:
+ case H5I_GENPROP_LST:
+ case H5I_ERROR_CLASS:
+ case H5I_ERROR_MSG:
+ case H5I_ERROR_STACK:
+ case H5I_NTYPES:
default:
HGOTO_ERROR(H5E_ARGS, H5E_CANTRELEASE, FAIL, "not a valid file object ID (dataset, group, or datatype)")
break;
diff --git a/src/H5Pfcpl.c b/src/H5Pfcpl.c
index 5383aae..6b0d2c0 100644
--- a/src/H5Pfcpl.c
+++ b/src/H5Pfcpl.c
@@ -1472,6 +1472,9 @@ H5Pset_file_space_page_size(hid_t plist_id, hsize_t fsp_size)
if(fsp_size < H5F_FILE_SPACE_PAGE_SIZE_MIN)
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "cannot set file space page size to less than 512")
+ if(fsp_size > H5F_FILE_SPACE_PAGE_SIZE_MAX)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "cannot set file space page size to more than 1GB")
+
/* Set the value*/
if(H5P_set(plist, H5F_CRT_FILE_SPACE_PAGE_SIZE_NAME, &fsp_size) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set file space page size")
diff --git a/src/H5R.c b/src/H5R.c
index 73c1d55..b000183 100644
--- a/src/H5R.c
+++ b/src/H5R.c
@@ -105,7 +105,7 @@ H5R__init_package(void)
/* Initialize the atom group for the file IDs */
if(H5I_register_type(H5I_REFERENCE_CLS) < 0)
- HGOTO_ERROR(H5E_REFERENCE, H5E_CANTINIT, FAIL, "unable to initialize interface")
+ HGOTO_ERROR(H5E_REFERENCE, H5E_CANTINIT, FAIL, "unable to initialize interface")
/* Mark "top" of interface as initialized, too */
H5R_top_package_initialize_s = TRUE;
@@ -141,10 +141,10 @@ H5R_top_term_package(void)
FUNC_ENTER_NOAPI_NOINIT_NOERR
if(H5R_top_package_initialize_s) {
- if(H5I_nmembers(H5I_REFERENCE) > 0) {
- (void)H5I_clear_type(H5I_REFERENCE, FALSE, FALSE);
+ if(H5I_nmembers(H5I_REFERENCE) > 0) {
+ (void)H5I_clear_type(H5I_REFERENCE, FALSE, FALSE);
n++; /*H5I*/
- } /* end if */
+ } /* end if */
/* Mark closed */
if(0 == n)
@@ -741,7 +741,7 @@ H5Rget_region(hid_t id, H5R_type_t ref_type, const void *ref)
HGOTO_ERROR(H5E_REFERENCE, H5E_CANTCREATE, FAIL, "unable to create dataspace")
/* Atomize */
- if((ret_value = H5I_register (H5I_DATASPACE, space, TRUE)) < 0)
+ if((ret_value = H5I_register(H5I_DATASPACE, space, TRUE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to register dataspace atom")
done:
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index c24c455..46f7a59 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -33,7 +33,7 @@
#include "H5Oprivate.h" /* Object headers */
#include "H5Pprivate.h" /* Property lists */
#include "H5Spkg.h" /* Dataspaces */
-#include "H5VMprivate.h" /* Vector and array functions */
+#include "H5VMprivate.h" /* Vector and array functions */
#ifdef H5_HAVE_PARALLEL
@@ -55,9 +55,42 @@ static herr_t H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
MPI_Datatype *new_type, int *count, hbool_t *is_derived_type);
static herr_t H5S_obtain_datatype(const hsize_t down[], H5S_hyper_span_t* span,
const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size);
+static herr_t H5S_mpio_create_large_type (hsize_t, MPI_Aint, MPI_Datatype , MPI_Datatype *);
+
#define H5S_MPIO_INITIAL_ALLOC_COUNT 256
+#define TWO_GIG_LIMIT 2147483648
+
+#ifndef H5S_MAX_MPI_COUNT
+#define H5S_MAX_MPI_COUNT 536870911 /* (2^29)-1 */
+#endif
+
+static hsize_t bigio_count = H5S_MAX_MPI_COUNT;
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_mpio_set_bigio_count
+ *
+ * Purpose: Allow us to programatically change the switch point
+ * when we utilize derived datatypes. This is of
+ * particular interest for allowing nightly testing
+ *
+ * Return: the current/previous value of bigio_count.
+ *
+ * Programmer: Richard Warren, March 10, 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+hsize_t
+H5S_mpio_set_bigio_count(hsize_t new_count)
+{
+ hsize_t orig_count = bigio_count;
+ if ((new_count > 0) && (new_count < TWO_GIG_LIMIT)) {
+ bigio_count = new_count;
+ }
+ return orig_count;
+}
+
/*-------------------------------------------------------------------------
* Function: H5S_mpio_all_type
@@ -72,6 +105,11 @@ static herr_t H5S_obtain_datatype(const hsize_t down[], H5S_hyper_span_t* span,
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: rky 980813
+ * Modifications:
+ * Mohamad Chaarawi
+ * Adding support for large datatypes (beyond the limit of a
+ * 32 bit integer.
+ *
*
*-------------------------------------------------------------------------
*/
@@ -95,11 +133,22 @@ H5S_mpio_all_type(const H5S_t *space, size_t elmt_size,
H5_CHECKED_ASSIGN(nelmts, hsize_t, snelmts, hssize_t);
total_bytes = (hsize_t)elmt_size * nelmts;
-
- /* fill in the return values */
- *new_type = MPI_BYTE;
- H5_CHECKED_ASSIGN(*count, int, total_bytes, hsize_t);
- *is_derived_type = FALSE;
+ /* Verify that the size can be expressed as a 32 bit integer */
+ if(bigio_count >= total_bytes) {
+ /* fill in the return values */
+ *new_type = MPI_BYTE;
+ H5_CHECKED_ASSIGN(*count, int, total_bytes, hsize_t);
+ *is_derived_type = FALSE;
+ }
+ else {
+ /* Create a LARGE derived datatype for this transfer */
+ if (H5S_mpio_create_large_type (total_bytes, 0, MPI_BYTE, new_type) < 0) {
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large datatype from the all selection")
+ }
+ *count = 1;
+ *is_derived_type = TRUE;
+ }
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -167,27 +216,103 @@ H5S_mpio_create_point_datatype (size_t elmt_size, hsize_t num_points,
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
elmt_type_created = TRUE;
+ /* Check whether standard or BIGIO processing will be employeed */
+ if(bigio_count >= num_points) {
#if MPI_VERSION >= 3
- /* Create an MPI datatype for the whole point selection */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code)
+ /* Create an MPI datatype for the whole point selection */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block((int)num_points, 1, disp, elmt_type, new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code)
#else
- /* Allocate block sizes for MPI datatype call */
- if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points)))
- HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
+ /* Allocate block sizes for MPI datatype call */
+ if(NULL == (blocks = (int *)H5MM_malloc(sizeof(int) * num_points)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
- for(u = 0; u < num_points; u++)
- blocks[u] = 1;
+ for(u = 0; u < num_points; u++)
+ blocks[u] = 1;
- /* Create an MPI datatype for the whole point selection */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_indexed_block failed", mpi_code)
+ /* Create an MPI datatype for the whole point selection */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)num_points, blocks, disp, elmt_type, new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
#endif
- /* Commit MPI datatype for later use */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ /* Commit MPI datatype for later use */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ }
+ else {
+ /* use LARGE_DATATYPE::
+ * We'll create an hindexed_block type for every 2G point count and then combine
+ * those and any remaining points into a single large datatype.
+ */
+ int total_types, i;
+ int remaining_points;
+ int num_big_types;
+ hsize_t leftover;
+
+ int *inner_blocks;
+ MPI_Aint *inner_disps;
+ MPI_Datatype *inner_types = NULL;
+
+ /* Calculate how many Big MPI datatypes are needed to represent the buffer */
+ num_big_types = (int)(num_points/bigio_count);
+
+ leftover = (hsize_t)num_points - (hsize_t)num_big_types * (hsize_t)bigio_count;
+ H5_CHECKED_ASSIGN(remaining_points, int, leftover, hsize_t);
+
+ total_types = (int)(remaining_points) ? (num_big_types + 1) : num_big_types;
+
+ /* Allocate array if MPI derived types needed */
+ if(NULL == (inner_types = (MPI_Datatype *)H5MM_malloc((sizeof(MPI_Datatype) * (size_t)total_types))))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
+
+ if(NULL == (inner_blocks = (int *)H5MM_malloc(sizeof(int) * (size_t)total_types)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
+
+ if(NULL == (inner_disps = (MPI_Aint *)H5MM_malloc(sizeof(MPI_Aint) * (size_t)total_types)))
+ HGOTO_ERROR(H5E_DATASPACE, H5E_CANTALLOC, FAIL, "can't allocate array of blocks")
+
+ for(i=0 ; i<num_big_types ; i++) {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(bigio_count,
+ 1,
+ &disp[i*bigio_count],
+ elmt_type,
+ &inner_types[i]))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code);
+ }
+ inner_blocks[i] = 1;
+ inner_disps[i] = 0;
+ }
+
+ if(remaining_points) {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed_block(remaining_points,
+ 1,
+ &disp[num_big_types*bigio_count],
+ elmt_type,
+ &inner_types[num_big_types]))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed_block failed", mpi_code);
+ }
+ inner_blocks[num_big_types] = 1;
+ inner_disps[num_big_types] = 0;
+ }
+
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_struct(total_types,
+ inner_blocks,
+ inner_disps,
+ inner_types,
+ new_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct", mpi_code);
+ }
+ for(i=0 ; i<total_types ; i++)
+ MPI_Type_free(&inner_types[i]);
+
+ H5MM_free(inner_types);
+ H5MM_free(inner_blocks);
+ H5MM_free(inner_disps);
+ /* Commit MPI datatype for later use */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+ }
done:
if(elmt_type_created)
MPI_Type_free(&elmt_type);
@@ -481,7 +606,10 @@ done:
* *is_derived_type 0 if MPI primitive type, 1 if derived
*
* Programmer: rky 980813
- *
+ * Modifications:
+ * Mohamad Chaarawi
+ * Adding support for large datatypes (beyond the limit of a
+ * 32 bit integer.
*-------------------------------------------------------------------------
*/
static herr_t
@@ -636,8 +764,25 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
HDfprintf(H5DEBUG(S), "d[%d].xtent=%Hu \n", i, d[i].xtent);
}
#endif
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &inner_type)))
+
+ /* LARGE_DATATYPE::
+ * Check if the number of elements to form the inner type fits into a 32 bit integer.
+ * If yes then just create the innertype with MPI_Type_contiguous.
+ * Otherwise create a compound datatype by iterating as many times as needed
+ * for the innertype to be created.
+ */
+ if(bigio_count >= elmt_size) {
+ /* Use a single MPI datatype that has a 32 bit size */
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &inner_type)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ }
+ else {
+ /* Create the compound datatype for this operation (> 2GB) */
+ if (H5S_mpio_create_large_type (elmt_size, 0, MPI_BYTE, &inner_type) < 0) {
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't ccreate a large inner datatype in hyper selection")
+ }
+ }
/*******************************************************
* Construct the type by walking the hyperslab dims
@@ -645,30 +790,93 @@ H5S_mpio_hyper_type(const H5S_t *space, size_t elmt_size,
*******************************************************/
for(i = ((int)rank) - 1; i >= 0; --i) {
#ifdef H5S_DEBUG
- if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n"
- "start=%Hd count=%Hu block=%Hu stride=%Hu, xtent=%Hu max_xtent=%d\n",
- FUNC, i, d[i].start, d[i].count, d[i].block, d[i].strid, d[i].xtent, max_xtent[i]);
+ if(H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: Dimension i=%d \n"
+ "start=%Hd count=%Hu block=%Hu stride=%Hu, xtent=%Hu max_xtent=%d\n",
+ FUNC, i, d[i].start, d[i].count, d[i].block, d[i].strid, d[i].xtent, max_xtent[i]);
#endif
#ifdef H5S_DEBUG
- if(H5DEBUG(S))
- HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i);
+ if(H5DEBUG(S))
+ HDfprintf(H5DEBUG(S), "%s: i=%d Making vector-type \n", FUNC,i);
#endif
/****************************************
* Build vector type of the selection.
****************************************/
- mpi_code = MPI_Type_vector((int)(d[i].count), /* count */
- (int)(d[i].block), /* blocklength */
- (int)(d[i].strid), /* stride */
- inner_type, /* old type */
- &outer_type); /* new type */
+ if (bigio_count >= d[i].count &&
+ bigio_count >= d[i].block &&
+ bigio_count >= d[i].strid) {
+
+ /* All the parameters fit into 32 bit integers so create the vector type normally */
+ mpi_code = MPI_Type_vector((int)(d[i].count), /* count */
+ (int)(d[i].block), /* blocklength */
+ (int)(d[i].strid), /* stride */
+ inner_type, /* old type */
+ &outer_type); /* new type */
+
+ MPI_Type_free(&inner_type);
+ if(mpi_code != MPI_SUCCESS)
+ HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code)
+ }
+ else {
+ /* Things get a bit more complicated and require LARGE_DATATYPE processing
+ * There are two MPI datatypes that need to be created:
+ * 1) an internal contiguous block; and
+ * 2) a collection of elements where an element is a contiguous block(1).
+ * Remember that the input arguments to the MPI-IO functions use integer
+ * values to represent element counts. We ARE allowed however, in the
+ * more recent MPI implementations to use constructed datatypes whereby
+ * the total number of bytes in a transfer could be :
+ * (2GB-1)number_of_blocks * the_datatype_extent.
+ */
+
+ MPI_Aint stride_in_bytes, inner_extent;
+ MPI_Datatype block_type;
+
+ /* create a contiguous datatype inner_type x number of BLOCKS.
+ * Again we need to check that the number of BLOCKS can fit into
+ * a 32 bit integer */
+ if (bigio_count < d[i].block) {
+ if (H5S_mpio_create_large_type(d[i].block, 0, inner_type,
+ &block_type) < 0) {
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't ccreate a large block datatype in hyper selection")
+ }
+ }
+ else {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)d[i].block,
+ inner_type,
+ &block_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ }
- MPI_Type_free(&inner_type);
- if(mpi_code != MPI_SUCCESS)
- HMPI_GOTO_ERROR(FAIL, "couldn't create MPI vector type", mpi_code)
+ MPI_Type_extent (inner_type, &inner_extent);
+ stride_in_bytes = inner_extent * (MPI_Aint)d[i].strid;
- /****************************************
+ /* If the element count is larger than what a 32 bit integer can hold,
+ * we call the large type creation function to handle that
+ */
+ if (bigio_count < d[i].count) {
+ if (H5S_mpio_create_large_type (d[i].count, stride_in_bytes, block_type,
+ &outer_type) < 0) {
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large outer datatype in hyper selection")
+ }
+ }
+ /* otherwise a regular create_hvector will do */
+ else {
+ mpi_code = MPI_Type_create_hvector((int)d[i].count, /* count */
+ 1, /* blocklength */
+ stride_in_bytes, /* stride in bytes*/
+ block_type, /* old type */
+ &outer_type); /* new type */
+ if(MPI_SUCCESS != mpi_code)
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
+ }
+ MPI_Type_free(&block_type);
+ MPI_Type_free(&inner_type);
+ }
+ /****************************************
* Then build the dimension type as (start, vector type, xtent).
****************************************/
/* calculate start and extent values of this dimension */
@@ -752,6 +960,10 @@ done:
*
* Programmer: kyang
*
+ * Modifications:
+ * Mohamad Chaarawi
+ * Adding support for large datatypes (beyond the limit of a
+ * 32 bit integer.
*-------------------------------------------------------------------------
*/
static herr_t
@@ -774,8 +986,17 @@ H5S_mpio_span_hyper_type(const H5S_t *space, size_t elmt_size,
HDassert(space->select.sel_info.hslab->span_lst->head);
/* Create the base type for an element */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ if (bigio_count >= elmt_size) {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)elmt_size, MPI_BYTE, &elmt_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ }
+ }
+ else {
+ if (H5S_mpio_create_large_type (elmt_size, 0, MPI_BYTE, &elmt_type) < 0) {
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large element datatype in span_hyper selection")
+ }
+ }
elmt_type_is_derived = TRUE;
/* Compute 'down' sizes for each dimension */
@@ -821,14 +1042,15 @@ static herr_t
H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span,
const MPI_Datatype *elmt_type, MPI_Datatype *span_type, size_t elmt_size)
{
- size_t alloc_count; /* Number of span tree nodes allocated at this level */
- size_t outercount; /* Number of span tree nodes at this level */
+ size_t alloc_count = 0; /* Number of span tree nodes allocated at this level */
+ size_t outercount = 0; /* Number of span tree nodes at this level */
MPI_Datatype *inner_type = NULL;
hbool_t inner_types_freed = FALSE; /* Whether the inner_type MPI datatypes have been freed */
hbool_t span_type_valid = FALSE; /* Whether the span_type MPI datatypes is valid */
+ hbool_t large_block = FALSE; /* Wether the block length is larger than 32 bit integer */
int *blocklen = NULL;
MPI_Aint *disp = NULL;
- H5S_hyper_span_t *tspan; /* Temporary pointer to span tree node */
+ H5S_hyper_span_t *tspan = NULL; /* Temporary pointer to span tree node */
int mpi_code; /* MPI return status code */
herr_t ret_value = SUCCEED; /* Return value */
@@ -870,14 +1092,70 @@ H5S_obtain_datatype(const hsize_t *down, H5S_hyper_span_t *span,
disp[outercount] = (MPI_Aint)elmt_size * tspan->low;
H5_CHECK_OVERFLOW(tspan->nelem, hsize_t, int)
blocklen[outercount] = (int)tspan->nelem;
-
tspan = tspan->next;
+
+ if (bigio_count < blocklen[outercount]) {
+ large_block = TRUE; /* at least one block type is large, so set this flag to true */
+ }
+
outercount++;
} /* end while */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)outercount, blocklen, disp, *elmt_type, span_type)))
- HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
- span_type_valid = TRUE;
+ /* Everything fits into integers, so cast them and use hindexed */
+ if (bigio_count >= outercount && large_block == FALSE) {
+
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed((int)outercount, blocklen, disp, *elmt_type, span_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
+ span_type_valid = TRUE;
+ }
+ else { /* LARGE_DATATYPE:: Something doesn't fit into a 32 bit integer */
+ size_t i;
+
+ for (i=0 ; i<outercount ; i++) {
+ MPI_Datatype temp_type = MPI_DATATYPE_NULL, outer_type = MPI_DATATYPE_NULL;
+ /* create the block type from elmt_type while checking the 32 bit int limit */
+ if (blocklen[i] > bigio_count) {
+ if (H5S_mpio_create_large_type (blocklen[i], 0, *elmt_type, &temp_type) < 0) {
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL,
+ "couldn't create a large element datatype in span_hyper selection")
+ }
+ }
+ else {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous((int)blocklen[i],
+ *elmt_type,
+ &temp_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ }
+
+ /* combine the current datatype that is created with this current block type */
+ if (0 == i) { /* first iteration, there is no combined datatype yet */
+ *span_type = temp_type;
+ }
+ else {
+ int bl[2] = {1,1};
+ MPI_Aint ds[2] = {disp[i-1],disp[i]};
+ MPI_Datatype dt[2] = {*span_type, temp_type};
+
+ if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct (2, /* count */
+ bl, /* blocklength */
+ ds, /* stride in bytes*/
+ dt, /* old type */
+ &outer_type))){ /* new type */
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ }
+ *span_type = outer_type;
+ }
+
+ if (outer_type != MPI_DATATYPE_NULL)
+ MPI_Type_free(&outer_type);
+ /* temp_type shouldn't be freed here...
+ * Note that we have simply copied it above (not MPI_Type_dup)
+ * into the 'span_type' argument of the caller.
+ * The caller needs to deal with it there!
+ */
+ }
+ } /* end (LARGE_DATATYPE::) */
+
} /* end if */
else {
size_t u; /* Local index variable */
@@ -1091,5 +1369,139 @@ H5S_mpio_space_type(const H5S_t *space, size_t elmt_size, MPI_Datatype *new_type
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5S_mpio_space_type() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5S_mpio_create_large_type
+ *
+ * Purpose: Create a large datatype of size larger than what a 32 bit integer
+ * can hold.
+ *
+ * Return: non-negative on success, negative on failure.
+ *
+ * *new_type the new datatype created
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t H5S_mpio_create_large_type (hsize_t num_elements,
+ MPI_Aint stride_bytes,
+ MPI_Datatype old_type,
+ MPI_Datatype *new_type)
+{
+ int num_big_types; /* num times the 2G datatype will be repeated */
+ int remaining_bytes; /* the number of bytes left that can be held in an int value */
+ hsize_t leftover;
+ int block_len[2];
+ int mpi_code; /* MPI return code */
+ MPI_Datatype inner_type, outer_type, leftover_type, type[2];
+ MPI_Aint disp[2], old_extent;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Calculate how many Big MPI datatypes are needed to represent the buffer */
+ num_big_types = (int)(num_elements/bigio_count);
+ leftover = num_elements - num_big_types * (hsize_t)bigio_count;
+ H5_CHECKED_ASSIGN(remaining_bytes, int, leftover, hsize_t);
+
+ /* Create a contiguous datatype of size equal to the largest
+ * number that a 32 bit integer can hold x size of old type.
+ * If the displacement is 0, then the type is contiguous, otherwise
+ * use type_hvector to create the type with the displacement provided
+ */
+ if (0 == stride_bytes) {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous(bigio_count,
+ old_type,
+ &inner_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ }
+ }
+ else {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector (bigio_count,
+ 1,
+ stride_bytes,
+ old_type,
+ &inner_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
+ }
+ }
+
+ /* Create a contiguous datatype of the buffer (minus the remaining < 2GB part)
+ * If a stride is present, use hvector type
+ */
+ if (0 == stride_bytes) {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous(num_big_types,
+ inner_type,
+ &outer_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ }
+ }
+ else {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector (num_big_types,
+ 1,
+ stride_bytes,
+ inner_type,
+ &outer_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
+ }
+ }
+
+ MPI_Type_free(&inner_type);
+
+ /* If there is a remaining part create a contiguous/vector datatype and then
+ * use a struct datatype to encapsulate everything.
+ */
+ if(remaining_bytes) {
+ if (stride_bytes == 0) {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_contiguous (remaining_bytes,
+ old_type,
+ &leftover_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_contiguous failed", mpi_code)
+ }
+ }
+ else {
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector
+ ((int)(num_elements - (hsize_t)num_big_types*bigio_count),
+ 1,
+ stride_bytes,
+ old_type,
+ &leftover_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code)
+ }
+ }
+
+ MPI_Type_extent (old_type, &old_extent);
+
+ /* Set up the arguments for MPI_Type_struct constructor */
+ type[0] = outer_type;
+ type[1] = leftover_type;
+ block_len[0] = 1;
+ block_len[1] = 1;
+ disp[0] = 0;
+ disp[1] = (old_extent+stride_bytes)*num_big_types*(MPI_Aint)bigio_count;
+
+ if(MPI_SUCCESS != (mpi_code =
+ MPI_Type_create_struct(2, block_len, disp, type, new_type))) {
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)
+ }
+
+ MPI_Type_free(&outer_type);
+ MPI_Type_free(&leftover_type);
+ }
+ else {
+ /* There are no remaining bytes so just set the new type to
+ * the outer type created */
+ *new_type = outer_type;
+ }
+
+ if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(new_type)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5S_mpio_create_large_type */
+
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h
index 4d71b29..decac7e 100644
--- a/src/H5VMprivate.h
+++ b/src/H5VMprivate.h
@@ -460,7 +460,11 @@ H5VM_power2up(hsize_t n)
{
hsize_t ret_value = 1; /* Return value */
- while(ret_value < n)
+ /* Returns 0 when n exceeds 2^63 */
+ if(n >= (hsize_t)1 << ((sizeof(hsize_t) * CHAR_BIT) - 1))
+ ret_value = 0;
+
+ while(ret_value && ret_value < n)
ret_value <<= 1;
return(ret_value);
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index d7965cb..8522c1d 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -373,6 +373,7 @@ set (H5_CHECK_TESTS
atomic_reader
links_env
flushrefresh
+ filenotclosed
)
foreach (test ${H5_CHECK_TESTS})
diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake
index fe8f261..5f0d386 100644
--- a/test/CMakeTests.cmake
+++ b/test/CMakeTests.cmake
@@ -570,6 +570,7 @@ set (test_CLEANFILES
flushrefresh_VERIFICATION_CHECKPOINT1
flushrefresh_VERIFICATION_CHECKPOINT2
flushrefresh_VERIFICATION_DONE
+ filenotclosed.h5
atomic_data
accum_swmr_big.h5
ohdr_swmr.h5
@@ -783,6 +784,22 @@ set_tests_properties (H5TEST-cache_image PROPERTIES
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
+#-- Adding test for filenotclosed
+add_test (
+ NAME H5TEST-clear-filenotclosed-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ filenotclosed.h5
+ WORKING_DIRECTORY
+ ${HDF5_TEST_BINARY_DIR}/H5TEST
+)
+add_test (NAME H5TEST-filenotclosed COMMAND $<TARGET_FILE:filenotclosed>)
+set_tests_properties (H5TEST-filenotclosed PROPERTIES
+ DEPENDS H5TEST-clear-filenotclosed-objects
+ ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
+ WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
+)
+
#-- Adding test for err_compat
if (HDF5_ENABLE_DEPRECATED_SYMBOLS)
add_test (NAME H5TEST-clear-err_compat-objects
diff --git a/test/Makefile.am b/test/Makefile.am
index 20b63f6..f64b6d8 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -25,14 +25,15 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_builddir)/src
# testerror.sh: err_compat, error_test
# testlibinfo.sh:
# testcheck_version.sh: tcheck_version
-# tetlinks_env.sh: links_env
+# testlinks_env.sh: links_env
+# test_filenotclosed.sh: filenotclosed.c
# testflushrefresh.sh: flushrefresh
# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
# testswmr.sh: swmr*
# testvdsswmr.sh: vds_swmr*
-TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh \
+TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh test_filenotclosed.sh\
testswmr.sh testvdsswmr.sh testflushrefresh.sh test_usecases.sh
-SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) \
+SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) test_filenotclosed$(EXEEXT) \
flushrefresh$(EXEEXT) use_append_chunk$(EXEEXT) use_append_mchunks$(EXEEXT) use_disable_mdc_flushes$(EXEEXT) \
swmr_generator$(EXEEXT) swmr_reader$(EXEEXT) swmr_writer$(EXEEXT) \
swmr_remove_reader$(EXEEXT) swmr_remove_writer$(EXEEXT) swmr_addrem_writer$(EXEEXT) \
@@ -65,6 +66,7 @@ TEST_PROG= testhdf5 \
# accum_swmr_reader is used by accum.c.
# atomic_writer and atomic_reader are standalone programs.
# links_env is used by testlinks_env.sh
+# filenotclosed is used by test_filenotclosed.sh
# flushrefresh is used by testflushrefresh.sh.
# use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_usecases.sh
# swmr_* files (besides swmr.c) are used by testswmr.sh.
@@ -74,7 +76,7 @@ TEST_PROG= testhdf5 \
# and this lets automake keep all its test programs in one place.
check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version \
testmeta accum_swmr_reader atomic_writer atomic_reader \
- links_env flushrefresh use_append_chunk use_append_mchunks use_disable_mdc_flushes \
+ links_env filenotclosed flushrefresh use_append_chunk use_append_mchunks use_disable_mdc_flushes \
swmr_generator swmr_start_write swmr_reader swmr_writer swmr_remove_reader \
swmr_remove_writer swmr_addrem_writer swmr_sparse_reader swmr_sparse_writer \
swmr_check_compat_vfd vds_swmr_gen vds_swmr_reader vds_swmr_writer
@@ -216,6 +218,6 @@ use_disable_mdc_flushes_SOURCES=use_disable_mdc_flushes.c
# Temporary files.
DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh test_plugin.sh \
- testswmr.sh testvdsswmr.sh test_usecases.sh testflushrefresh.sh
+ testswmr.sh testvdsswmr.sh test_usecases.sh testflushrefresh.sh test_filenotclosed.sh
include $(top_srcdir)/config/conclude.am
diff --git a/test/dsets.c b/test/dsets.c
index c29d18e..0ca08e4 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -51,14 +51,15 @@ const char *FILENAME[] = {
"copy_dcpl_newfile",/* 13 */
"partial_chunks", /* 14 */
"layout_extend", /* 15 */
- "zero_chunk", /* 16 */
+ "zero_chunk", /* 16 */
"chunk_single", /* 17 */
"swmr_non_latest", /* 18 */
"earray_hdr_fd", /* 19 */
"farray_hdr_fd", /* 20 */
"bt2_hdr_fd", /* 21 */
- "storage_size", /* 22 */
+ "storage_size", /* 22 */
"dls_01_strings", /* 23 */
+ "power2up", /* 24 */
NULL
};
#define FILENAME_BUF_SIZE 1024
@@ -11345,6 +11346,97 @@ error:
/*-------------------------------------------------------------------------
+ * Function: test_power2up
+ *
+ * Purpose: Tests that the H5VM_power2up(n) function does not result in an
+ * infinite loop when input n exceeds 2^63. (HDFFV-10217)
+ * H5VM_power2up() is used to calculate the next power of 2 for
+ * a dataset's scaled dimension sizes.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Vailin Choi; June 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_power2up(hid_t fapl)
+{
+ char filename[FILENAME_BUF_SIZE];
+ hid_t fid = -1; /* File ID */
+ hid_t dcpl = -1; /* Dataset creation property list */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t did = -1; /* Dataset ID */
+ hsize_t dims[2]; /* Dataset dimension sizes */
+ hsize_t max_dims[2]; /* Maximum dimension sizes */
+ hsize_t chunk_dims[2]; /* Chunk dimensions */
+ hsize_t ext_dims[2]; /* Extended dimension sizes */
+ herr_t status; /* Error status */
+
+ TESTING("the next power of 2");
+
+ h5_fixname(FILENAME[24], fapl, filename, sizeof filename);
+
+ /* Create file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Set dims[1] to ((2^63) -1) */
+ dims[0] = 0;
+ dims[1] = ((hsize_t)1 << ((sizeof(hsize_t) * CHAR_BIT) -1)) - 1;
+ max_dims[0] = max_dims[1] = H5S_UNLIMITED;
+ sid = H5Screate_simple(2, dims, max_dims);
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Set chunk size */
+ chunk_dims[0] = chunk_dims[1] = 1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ TEST_ERROR
+
+ /* Create chunked dataset */
+ if((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT64, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ ext_dims[0] = 1;
+ ext_dims[1] = dims[1] + 5;
+
+ /* Extend to (2^63)+ */
+ H5E_BEGIN_TRY {
+ status = H5Dset_extent(did, ext_dims);
+ } H5E_END_TRY;
+ if(status >= 0)
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ PASSED();
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Dclose(did);
+ H5Sclose(sid);
+ H5Pclose(dcpl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* end test_power2up() */
+
+
+/*-------------------------------------------------------------------------
* Function: test_scatter
*
* Purpose: Tests H5Dscatter with a variety of different selections
@@ -12928,6 +13020,7 @@ main(void)
nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0);
nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0);
nerrors += (test_storage_size(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_power2up(my_fapl) < 0 ? 1 : 0);
nerrors += (test_swmr_non_latest(envval, my_fapl) < 0 ? 1 : 0);
nerrors += (test_earray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
diff --git a/test/fheap.c b/test/fheap.c
index 4be6cb9..6c3a8ac 100644
--- a/test/fheap.c
+++ b/test/fheap.c
@@ -16377,6 +16377,16 @@ main(void)
unsigned nerrors = 0; /* Cumulative error count */
unsigned num_pb_fs = 1; /* The number of settings to test for page buffering and file space handling */
int ExpressMode; /* Express testing level */
+ const char *envval; /* Environment variable */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+
+ /* Don't run this test using certain file drivers */
+ envval = HDgetenv("HDF5_DRIVER");
+ if(envval == NULL)
+ envval = "nomatch";
+
+ /* Current VFD that does not support contigous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(envval, "split") && HDstrcmp(envval, "multi"));
/* Reset library */
h5_reset();
@@ -16428,6 +16438,12 @@ main(void)
shared_wobj_g[u] = (unsigned char)u;
for(v = 0; v < num_pb_fs; v++) {
+ /* Skip test when:
+ a) multi/split drivers and
+ b) persisting free-space or using paged aggregation strategy
+ because the library will fail file creation (temporary) for the above conditions */
+ if(!contig_addr_vfd && v)
+ break;
if((fcpl = H5Pcopy(def_fcpl)) < 0)
TEST_ERROR
diff --git a/test/filenotclosed.c b/test/filenotclosed.c
new file mode 100644
index 0000000..d1f468a
--- /dev/null
+++ b/test/filenotclosed.c
@@ -0,0 +1,146 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Purpose: Test to verify that the assertion/abort failure is fixed when the
+ * application does not close the file. (See HDFFV-10160)
+ */
+
+
+#include "h5test.h"
+
+#define FILENAME "filenotclosed"
+#define DATASET "dset"
+
+/*-------------------------------------------------------------------------
+ * Function: catch_signal
+ *
+ * Purpose: The signal handler to catch the SIGABRT signal.
+ *
+ * Return: No return
+ *
+ * Programmer: Vailin Choi
+ *
+ *-------------------------------------------------------------------------
+ */
+static void catch_signal(int H5_ATTR_UNUSED signo)
+{
+ HDexit(1);
+} /* catch_signal() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Test to verify the following problem described in HDFFV-10160 is fixed:
+ * "a.out: H5Fint.c:1679: H5F_close: Assertion `f->file_id > 0' failed."
+ *
+ * Return: Success: exit(EXIT_SUCCESS)
+ * Failure: exit(EXIT_FAILURE)
+ *
+ * Programmer: Vailin Choi; June 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ hid_t fapl = -1; /* File access property lists */
+ hid_t fid = -1; /* File ID */
+ hid_t did = -1; /* Dataset ID */
+ hid_t dcpl = -1; /* Dataset creation property list */
+ hid_t sid = -1; /* Dataspace ID */
+ hsize_t cur_dim[1] = {5}; /* Current dimension sizes */
+ hsize_t max_dim[1] = {H5S_UNLIMITED}; /* Maximum dimension sizes */
+ hsize_t chunk_dim[1] = {10}; /* Chunk dimension sizes */
+ int buf[5] = {1, 2, 3, 4, 5}; /* The data to be written to the dataset */
+ char filename[100]; /* File name */
+ const char *env_h5_drvr; /* File Driver value from environment */
+ hbool_t contig_addr_vfd; /* Contiguous address vfd */
+
+ /* Get the VFD to use */
+ env_h5_drvr = HDgetenv("HDF5_DRIVER");
+ if(env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+
+ /* Skip test when using VFDs that has different address spaces for each
+ * type of metadata allocation.
+ * Further investigation is needed to resolve the test failure with the
+ * split/multi driver. Please see HDFFV-10160.
+ */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
+ if(!contig_addr_vfd) {
+ SKIPPED();
+ puts(" Temporary skipped for a spilt/multi driver");
+ HDexit(EXIT_SUCCESS);
+ }
+
+ h5_reset();
+
+ /* To exit from the file for SIGABRT signal */
+ if(HDsignal(SIGABRT, catch_signal) == SIG_ERR)
+ TEST_ERROR
+
+ fapl = h5_fileaccess();
+ h5_fixname(FILENAME, fapl, filename, sizeof(filename));
+
+ /* Set to latest format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Create the dcpl and set the chunk size */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+
+ if(H5Pset_chunk(dcpl, 1, chunk_dim) < 0)
+ TEST_ERROR
+
+ /* Create the dataspace */
+ if((sid = H5Screate_simple(1, cur_dim, max_dim)) < 0)
+ TEST_ERROR
+
+ /* Create the dataset */
+ if((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ TEST_ERROR
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+
+ /* Close the dataspace */
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR
+
+ /* Close the property lists */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+ if(H5Pclose(fapl) < 0)
+ TEST_ERROR
+
+ /* The file is not closed. */
+ /* The library will call H5_term_library to shut down the library. */
+
+ HDexit(EXIT_SUCCESS);
+
+error:
+ HDputs("*** TEST FAILED ***");
+ HDexit(EXIT_FAILURE);
+}
diff --git a/test/h5test.c b/test/h5test.c
index 8db4388..af45589 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -104,6 +104,68 @@ static herr_t h5_errors(hid_t estack, void *client_data);
static char * h5_fixname_real(const char *base_name, hid_t fapl, const char *suffix,
char *fullname, size_t size, hbool_t nest_printf);
+
+
+/* A non-usable VFD class and its functions.
+ *
+ * Usable for testing things like ID handling where we shouldn't mess with the real VFDs.
+ */
+static H5FD_t *dummy_vfd_open(const char *name, unsigned flags, hid_t fapl_id, haddr_t maxaddr);
+static H5FD_t *dummy_vfd_open(const char H5_ATTR_UNUSED *name, unsigned H5_ATTR_UNUSED flags, hid_t H5_ATTR_UNUSED fapl_id, haddr_t H5_ATTR_UNUSED maxaddr) { return NULL; }
+
+static herr_t dummy_vfd_close(H5FD_t *_file);
+static herr_t dummy_vfd_close(H5FD_t H5_ATTR_UNUSED *_file) { return FAIL; }
+
+static haddr_t dummy_vfd_get_eoa(const H5FD_t *file, H5FD_mem_t type);
+static haddr_t dummy_vfd_get_eoa(const H5FD_t H5_ATTR_UNUSED *file, H5FD_mem_t H5_ATTR_UNUSED type) { return HADDR_UNDEF; }
+
+static herr_t dummy_vfd_set_eoa(H5FD_t *_file, H5FD_mem_t type, haddr_t addr);
+static herr_t dummy_vfd_set_eoa(H5FD_t H5_ATTR_UNUSED *_file, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t H5_ATTR_UNUSED addr) { return FAIL; }
+
+static haddr_t dummy_vfd_get_eof(const H5FD_t *file, H5FD_mem_t type);
+static haddr_t dummy_vfd_get_eof(const H5FD_t H5_ATTR_UNUSED *file, H5FD_mem_t H5_ATTR_UNUSED type) { return HADDR_UNDEF; }
+
+static herr_t dummy_vfd_read(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, haddr_t addr, size_t size, void *buf);
+static herr_t dummy_vfd_read(H5FD_t H5_ATTR_UNUSED *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNUSED fapl_id, haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED size, void H5_ATTR_UNUSED *buf) { return FAIL; }
+
+static herr_t dummy_vfd_write(H5FD_t *_file, H5FD_mem_t type, hid_t fapl_id, haddr_t addr, size_t size, const void *buf);
+static herr_t dummy_vfd_write(H5FD_t H5_ATTR_UNUSED *_file, H5FD_mem_t H5_ATTR_UNUSED type, hid_t H5_ATTR_UNUSED fapl_id, haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED size, const void H5_ATTR_UNUSED *buf) { return FAIL; }
+
+static H5FD_class_t dummy_vfd_class_g = {
+ "fake", /* name */
+ 1, /* maxaddr */
+ H5F_CLOSE_WEAK, /* fc_degree */
+ NULL, /* terminate */
+ NULL, /* sb_size */
+ NULL, /* sb_encode */
+ NULL, /* sb_decode */
+ 0, /* fapl_size */
+ NULL, /* fapl_get */
+ NULL, /* fapl_copy */
+ NULL, /* fapl_free */
+ 0, /* dxpl_size */
+ NULL, /* dxpl_copy */
+ NULL, /* dxpl_free */
+ dummy_vfd_open, /* open */
+ dummy_vfd_close, /* close */
+ NULL, /* cmp */
+ NULL, /* query */
+ NULL, /* get_type_map */
+ NULL, /* alloc */
+ NULL, /* free */
+ dummy_vfd_get_eoa, /* get_eoa */
+ dummy_vfd_set_eoa, /* set_eoa */
+ dummy_vfd_get_eof, /* get_eof */
+ NULL, /* get_handle */
+ dummy_vfd_read, /* read */
+ dummy_vfd_write, /* write */
+ NULL, /* flush */
+ NULL, /* truncate */
+ NULL, /* lock */
+ NULL, /* unlock */
+ H5FD_FLMAP_DEFAULT /* fl_map */
+};
+
/*-------------------------------------------------------------------------
* Function: h5_errors
@@ -1828,3 +1890,41 @@ error:
return FAIL;
} /* h5_wait_message() */
+/*-------------------------------------------------------------------------
+ * Function: h5_get_dummy_vfd_class()
+ *
+ * Purpose: Returns a disposable, generally non-functional,
+ * VFD class struct.
+ *
+ * In some of the test code, we need a disposable VFD but
+ * we don't want to mess with the real VFDs and we also
+ * don't have access to the internals of the real VFDs (which
+ * use static globals and functions) to easily duplicate
+ * them (e.g.: for testing VFD ID handling).
+ *
+ * This API call will return a pointer to a VFD class that
+ * can be used to construct a test VFD using H5FDregister().
+ *
+ * Return: Success: A pointer to a VFD class struct
+ * Failure: NULL
+ *
+ *-------------------------------------------------------------------------
+ */
+H5FD_class_t *
+h5_get_dummy_vfd_class(void)
+{
+ H5FD_class_t *vfd_class = NULL;
+
+ if(NULL == (vfd_class = (H5FD_class_t *)HDmalloc(sizeof(H5FD_class_t))))
+ TEST_ERROR;
+
+ HDmemcpy(vfd_class, &dummy_vfd_class_g, sizeof(H5FD_class_t));
+
+ return vfd_class;
+
+error:
+ if(vfd_class)
+ HDfree(vfd_class);
+ return NULL;
+} /* h5_get_dummy_vfd_class */
+
diff --git a/test/h5test.h b/test/h5test.h
index 0e23255..ce5c64d 100644
--- a/test/h5test.h
+++ b/test/h5test.h
@@ -136,6 +136,7 @@ H5TEST_DLL h5_stat_size_t h5_get_file_size(const char *filename, hid_t fapl);
H5TEST_DLL int print_func(const char *format, ...);
H5TEST_DLL int h5_make_local_copy(const char *origfilename, const char *local_copy_name);
H5TEST_DLL herr_t h5_verify_cached_stabs(const char *base_name[], hid_t fapl);
+H5TEST_DLL H5FD_class_t *h5_get_dummy_vfd_class(void);
/* Functions that will replace VFD-dependent functions that violate
* the single responsibility principle. Unlike their predecessors,
diff --git a/test/plugin.c b/test/plugin.c
index 1254e9a..3034c0b 100644
--- a/test/plugin.c
+++ b/test/plugin.c
@@ -733,7 +733,10 @@ test_filter_path_apis(void)
if(H5Zfilter_avail(H5Z_FILTER_DYNLIB1) != TRUE) TEST_ERROR
+ TESTING(" initialize");
H5PLsize(&ndx);
+ if(ndx!=2) TEST_ERROR
+ PASSED();
TESTING(" remove");
/* Remove all existing paths*/
diff --git a/test/test_filenotclosed.sh.in b/test/test_filenotclosed.sh.in
new file mode 100644
index 0000000..0b43c5b
--- /dev/null
+++ b/test/test_filenotclosed.sh.in
@@ -0,0 +1,41 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+# Test to verify that the assertion/abort failure is fixed when the application
+# does not close the file. (See HDFFV-10160)
+
+srcdir=@srcdir@
+
+nerrors=0
+
+##############################################################################
+##############################################################################
+### T H E T E S T ###
+##############################################################################
+##############################################################################
+
+echo "Testing file not closed assertion/abort failure"
+TEST_NAME=filenotclosed # The test name
+TEST_BIN=`pwd`/$TEST_NAME # The path of the test binary
+#
+# Run the test
+#$RUNSERIAL $TEST_BIN >/dev/null 2>&1
+$RUNSERIAL $TEST_BIN 2>&1
+exitcode=$?
+if [ $exitcode -eq 0 ]; then
+ echo "Test PASSED"
+else
+ nerrors="`expr $nerrors + 1`"
+ echo "***Error encountered***"
+fi
+exit $nerrors
diff --git a/test/tfile.c b/test/tfile.c
index 533bb24..027ad62 100644
--- a/test/tfile.c
+++ b/test/tfile.c
@@ -109,6 +109,7 @@
#define TEST_THRESHOLD10 10 /* Free space section threshold */
#define FSP_SIZE_DEF 4096 /* File space page size default */
#define FSP_SIZE512 512 /* File space page size */
+#define FSP_SIZE1G 1024*1024*1024 /* File space page size */
/* Declaration for test_libver_macros2() */
#define FILE6 "tfile6.h5" /* Test file */
@@ -3581,6 +3582,9 @@ test_filespace_info(const char *env_h5_drvr)
* Setting value less than 512 will return an error;
* --setting file space page size to 0
* --setting file space page size to 511
+ *
+ * File space page size has a maximum size of 1 gigabyte.
+ * Setting value greater than 1 gigabyte will return an error.
*/
/* Create file creation property list template */
fcpl = H5Pcreate(H5P_FILE_CREATE);
@@ -3598,6 +3602,12 @@ test_filespace_info(const char *env_h5_drvr)
} H5E_END_TRY;
VERIFY(ret, FAIL, "H5Pset_file_space_page_size");
+ /* Setting to 1GB+1: should fail */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE1G+1);
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_file_space_page_size");
+
/* Setting to 512: should succeed */
ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512);
CHECK(ret, FAIL, "H5Pset_file_space_page_size");
@@ -3605,6 +3615,13 @@ test_filespace_info(const char *env_h5_drvr)
CHECK(ret, FAIL, "H5Pget_file_space_page_size");
VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
+ /* Setting to 1GB: should succeed */
+ ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE1G);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+ ret = H5Pget_file_space_page_size(fcpl, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE1G, "H5Pget_file_space_page_size");
+
/* Close property list */
H5Pclose(fcpl);
diff --git a/test/tmisc.c b/test/tmisc.c
index dc69e18..708ca9b 100644
--- a/test/tmisc.c
+++ b/test/tmisc.c
@@ -29,7 +29,7 @@
#include "hdf5.h"
#include "testhdf5.h"
#include "H5srcdir.h"
-#include "H5Dpkg.h" /* Datasets */
+#include "H5Dpkg.h" /* Datasets */
/* Definitions for misc. test #1 */
#define MISC1_FILE "tmisc1.h5"
@@ -178,17 +178,12 @@ typedef struct
#define MISC13_GROUP1_NAME "Group1"
#define MISC13_GROUP2_NAME "Group2"
#define MISC13_DTYPE_NAME "Datatype"
-#define MISC13_RANK 2
-#define MISC13_DIM1 20
-#define MISC13_DIM2 30
+#define MISC13_RANK 1
+#define MISC13_DIM1 600
#define MISC13_CHUNK_DIM1 10
-#define MISC13_CHUNK_DIM2 15
#define MISC13_USERBLOCK_SIZE 512
#define MISC13_COPY_BUF_SIZE 4096
-unsigned m13_data[MISC13_DIM1][MISC13_DIM2]; /* Data to write to dataset */
-unsigned m13_rdata[MISC13_DIM1][MISC13_DIM2]; /* Data read from dataset */
-
/* Definitions for misc. test #14 */
#define MISC14_FILE "tmisc14.h5"
#define MISC14_DSET1_NAME "Dataset1"
@@ -2083,38 +2078,36 @@ test_misc12(void)
/* Various routines for misc. 13 test */
static void
-init_data(void)
+misc13_init_data(unsigned *original_data)
{
- unsigned u,v; /* Local index variables */
+ unsigned u;
- for(u=0; u<MISC13_DIM1; u++)
- for(v=0; v<MISC13_DIM2; v++)
- m13_data[u][v]=(u*MISC13_DIM2)+v;
+ for(u = 0; u < MISC13_DIM1; u++)
+ original_data[u] = u;
}
-static int
-verify_data(void)
+static hbool_t
+misc13_verify_data_match(const unsigned *original_data, const unsigned *read_data)
{
- unsigned u,v; /* Local index variables */
+ unsigned u;
+
+ for(u = 0; u < MISC13_DIM1; u++)
+ if(original_data[u] != read_data[u])
+ return FALSE;
- for(u=0; u<MISC13_DIM1; u++)
- for(v=0; v<MISC13_DIM2; v++)
- if(m13_data[u][v]!=m13_rdata[u][v])
- return(-1);
- return(0);
+ return TRUE;
}
static void
-create_dataset(hid_t loc_id, const char *name, hid_t dcpl)
+misc13_create_dataset(hid_t loc_id, const char *name, hid_t dcpl, const unsigned *data)
{
- hid_t dsid; /* Dataset ID */
- hid_t sid; /* Dataspace ID */
- hsize_t dims[MISC13_RANK]; /* Dataset dimensions */
- herr_t ret; /* Generic return value */
+ hid_t dsid = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hsize_t dims[MISC13_RANK]; /* Dataset dimensions */
+ herr_t ret; /* Generic return value */
/* Create dataspace for use with dataset */
dims[0] = MISC13_DIM1;
- dims[1] = MISC13_DIM2;
sid = H5Screate_simple(MISC13_RANK, dims, NULL);
CHECK(sid, FAIL, "H5Screate_simple");
@@ -2123,7 +2116,7 @@ create_dataset(hid_t loc_id, const char *name, hid_t dcpl)
CHECK(dsid, FAIL, "H5Dcreate2");
/* Write some data to dataset */
- ret = H5Dwrite(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, m13_data);
+ ret = H5Dwrite(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
CHECK(ret, FAIL, "H5Dwrite");
/* Close the contiguous dataset */
@@ -2133,64 +2126,74 @@ create_dataset(hid_t loc_id, const char *name, hid_t dcpl)
/* Close the dataspace */
ret = H5Sclose(sid);
CHECK(ret, FAIL, "H5Sclose");
-}
+
+} /* end misc13_create_dataset() */
static void
-verify_dataset(hid_t loc_id, const char *name)
+misc13_verify_dataset(hid_t loc_id, const char *name, const unsigned *data)
{
- hid_t dsid; /* Dataset ID */
- herr_t ret; /* Generic return value */
+ unsigned *read_data = NULL; /* Data to write to dataset */
+ hid_t dsid = -1; /* Dataset ID */
+ herr_t ret; /* Generic return value */
+
+ /* Create a data buffer for the dataset read */
+ read_data = (unsigned *)HDcalloc(MISC13_DIM1, sizeof(unsigned));
+ CHECK(read_data, NULL, "HDcalloc");
/* Open the contiguous dataset in the root group */
dsid = H5Dopen2(loc_id, name, H5P_DEFAULT);
CHECK(dsid, FAIL, "H5Dopen2");
/* Read the data */
- ret = H5Dread(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, m13_rdata);
+ ret = H5Dread(dsid, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_data);
CHECK(ret, FAIL, "H5Dread");
- /* Verify that the data is correct */
- ret=verify_data();
- CHECK(ret, FAIL, "verify_data");
+ /* Verify that the data are correct */
+ ret = misc13_verify_data_match(data, read_data);
+ CHECK(ret, FAIL, "misc13_verify_data_match");
/* Close the contiguous dataset */
ret = H5Dclose(dsid);
CHECK(ret, FAIL, "H5Dclose");
-}
+
+ /* Free the dataset read buffer */
+ HDfree(read_data);
+
+} /* end misc13_verify_dataset() */
static void
-create_hdf_file(const char *name)
+misc13_create_hdf_file(const char *name, const unsigned *data)
{
- hid_t fid; /* File ID */
- hid_t gid,gid2; /* Group IDs */
- hid_t tid; /* Datatype ID */
- hid_t dcpl; /* Dataset creation property list ID */
- hsize_t chunk_dims[MISC13_RANK]; /* Chunk dimensions */
- herr_t ret; /* Generic return value */
+ hid_t fid = -1; /* File ID */
+ hid_t gid1 = -1; /* Group ID (level 1) */
+ hid_t gid2 = -1; /* Group ID (level 2) */
+ hid_t tid = -1; /* Datatype ID */
+ hid_t dcplid = -1; /* Dataset creation property list ID */
+ hsize_t chunk_dims[MISC13_RANK]; /* Chunk dimensions */
+ herr_t ret; /* Generic return value */
/* Create file */
- fid=H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
CHECK(fid, FAIL, "H5Fcreate");
/* Create DCPL for use with datasets */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- CHECK(dcpl, FAIL, "H5Pcreate");
+ dcplid = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcplid, FAIL, "H5Pcreate");
/* Set the DCPL to be chunked */
- ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ ret = H5Pset_layout(dcplid, H5D_CHUNKED);
CHECK(ret, FAIL, "H5Pset_layout");
/* Use chunked storage for this DCPL */
chunk_dims[0] = MISC13_CHUNK_DIM1;
- chunk_dims[1] = MISC13_CHUNK_DIM2;
- ret = H5Pset_chunk(dcpl, MISC13_RANK, chunk_dims);
+ ret = H5Pset_chunk(dcplid, MISC13_RANK, chunk_dims);
CHECK(ret, FAIL, "H5Pset_chunk");
/* Create contiguous dataset in root group */
- create_dataset(fid, MISC13_DSET1_NAME, H5P_DEFAULT);
+ misc13_create_dataset(fid, MISC13_DSET1_NAME, H5P_DEFAULT, data);
/* Create chunked dataset in root group */
- create_dataset(fid, MISC13_DSET2_NAME, dcpl);
+ misc13_create_dataset(fid, MISC13_DSET2_NAME, dcplid, data);
/* Create a datatype to commit to the file */
tid = H5Tcopy(H5T_NATIVE_INT);
@@ -2205,11 +2208,11 @@ create_hdf_file(const char *name)
CHECK(ret, FAIL, "H5Tclose");
/* Create a group in the root group */
- gid = H5Gcreate2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- CHECK(gid, FAIL, "H5Gcreate2");
+ gid1 = H5Gcreate2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gcreate2");
/* Create another group in the new group */
- gid2 = H5Gcreate2(gid, MISC13_GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ gid2 = H5Gcreate2(gid1, MISC13_GROUP2_NAME, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK(gid2, FAIL, "H5Gcreate2");
/* Close the second group */
@@ -2217,17 +2220,17 @@ create_hdf_file(const char *name)
CHECK(ret, FAIL, "H5Gclose");
/* Create contiguous dataset in new group */
- create_dataset(gid, MISC13_DSET1_NAME, H5P_DEFAULT);
+ misc13_create_dataset(gid1, MISC13_DSET1_NAME, H5P_DEFAULT, data);
/* Create chunked dataset in new group */
- create_dataset(gid, MISC13_DSET2_NAME, dcpl);
+ misc13_create_dataset(gid1, MISC13_DSET2_NAME, dcplid, data);
/* Create a datatype to commit to the new group */
tid = H5Tcopy(H5T_NATIVE_INT);
CHECK(tid, FAIL, "H5Tcopy");
/* Create a named datatype in the new group */
- ret = H5Tcommit2(gid, MISC13_DTYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ ret = H5Tcommit2(gid1, MISC13_DTYPE_NAME, tid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
CHECK(ret, FAIL, "H5Tcommit2");
/* Close named datatype */
@@ -2235,25 +2238,26 @@ create_hdf_file(const char *name)
CHECK(ret, FAIL, "H5Tclose");
/* Close the first group */
- ret = H5Gclose(gid);
+ ret = H5Gclose(gid1);
CHECK(ret, FAIL, "H5Gclose");
/* Close the DCPL */
- ret = H5Pclose(dcpl);
+ ret = H5Pclose(dcplid);
CHECK(ret, FAIL, "H5Pclose");
/* Close the file */
ret = H5Fclose(fid);
- HDassert(ret >= 0);
CHECK(ret, FAIL, "H5Fclose");
-}
+
+} /* end misc13_create_hdf_file() */
static void
-insert_user_block(const char *old_name, const char *new_name,const char *str,size_t size)
+misc13_insert_user_block(const char *old_name, const char *new_name, const char *str, size_t size)
{
- FILE *new_fp, *old_fp; /* Pointers to new & old files */
- void *user_block; /* Pointer to user block to write to file */
- void *copy_buf; /* Pointer to buffer for copying data */
+ FILE *new_fp = NULL; /* Pointers to new & old files */
+ FILE *old_fp = NULL;
+ void *user_block = NULL; /* Pointer to user block to write to file */
+ void *copy_buf = NULL; /* Pointer to buffer for copying data */
size_t written; /* Amount of data written to new file */
size_t read_in; /* Amount of data read in from old file */
int ret; /* Generic status value */
@@ -2263,10 +2267,10 @@ insert_user_block(const char *old_name, const char *new_name,const char *str,siz
CHECK(user_block, NULL, "HDcalloc");
/* Copy in the user block data */
- HDmemcpy(user_block,str,strlen(str));
+ HDmemcpy(user_block, str, strlen(str));
/* Open the new file */
- new_fp=HDfopen(new_name,"wb");
+ new_fp = HDfopen(new_name,"wb");
CHECK(new_fp, NULL, "HDfopen");
/* Write the user block to the new file */
@@ -2274,7 +2278,7 @@ insert_user_block(const char *old_name, const char *new_name,const char *str,siz
VERIFY(written, size, "HDfwrite");
/* Open the old file */
- old_fp=HDfopen(old_name,"rb");
+ old_fp = HDfopen(old_name,"rb");
CHECK(old_fp, NULL, "HDfopen");
/* Allocate space for the copy buffer */
@@ -2286,14 +2290,14 @@ insert_user_block(const char *old_name, const char *new_name,const char *str,siz
/* Write the data to the new file */
written = HDfwrite(copy_buf, (size_t)1, read_in, new_fp);
VERIFY(written, read_in, "HDfwrite");
- } /* end while */
+ }
/* Close the old file */
- ret=HDfclose(old_fp);
+ ret = HDfclose(old_fp);
VERIFY(ret, 0, "HDfclose");
/* Close the new file */
- ret=HDfclose(new_fp);
+ ret = HDfclose(new_fp);
VERIFY(ret, 0, "HDfclose");
/* Free the copy buffer */
@@ -2301,81 +2305,84 @@ insert_user_block(const char *old_name, const char *new_name,const char *str,siz
/* Free the user block */
HDfree(user_block);
-}
+
+} /* end misc13_insert_user_block() */
static void
-verify_file(const char *name, hsize_t blk_size, unsigned check_new_data)
+misc13_verify_file(const char *name, const unsigned *data, hsize_t userblock_size,
+ hbool_t check_for_new_dataset)
{
- hid_t fid; /* File ID */
- hid_t gid,gid2; /* Group IDs */
- hid_t tid; /* Datatype ID */
- hid_t fcpl; /* File creation property list ID */
- hsize_t userblock; /* Userblock size retrieved from FCPL */
- herr_t ret; /* Generic return value */
+ hid_t fid = -1; /* File ID */
+ hid_t gid1 = -1; /* Group IDs */
+ hid_t gid2 = -1; /* Group IDs */
+ hid_t tid = -1; /* Datatype ID */
+ hid_t fcplid = -1; /* File creation property list ID */
+ hsize_t ub_size_out; /* Userblock size retrieved from FCPL */
+ herr_t ret; /* Generic return value */
/* Open the file */
- fid=H5Fopen(name, H5F_ACC_RDONLY, H5P_DEFAULT);
+ fid = H5Fopen(name, H5F_ACC_RDONLY, H5P_DEFAULT);
CHECK(fid, FAIL, "H5Fopen");
/* Get the file's FCPL */
- fcpl=H5Fget_create_plist(fid);
- CHECK(fcpl, FAIL, "H5Fget_create_plist");
+ fcplid = H5Fget_create_plist(fid);
+ CHECK(fcplid, FAIL, "H5Fget_create_plist");
/* Get the user block size for the file */
- ret=H5Pget_userblock(fcpl,&userblock);
+ ret = H5Pget_userblock(fcplid, &ub_size_out);
CHECK(ret, FAIL, "H5Pget_userblock");
/* Check the userblock size */
- VERIFY(userblock, blk_size, "H5Pget_userblock");
+ VERIFY(userblock_size, ub_size_out, "H5Pget_userblock");
/* Close the FCPL */
- ret = H5Pclose(fcpl);
+ ret = H5Pclose(fcplid);
CHECK(ret, FAIL, "H5Pclose");
/* Verify the contiguous dataset in the root group */
- verify_dataset(fid,MISC13_DSET1_NAME);
+ misc13_verify_dataset(fid, MISC13_DSET1_NAME, data);
/* Verify the chunked dataset in the root group */
- verify_dataset(fid,MISC13_DSET2_NAME);
+ misc13_verify_dataset(fid, MISC13_DSET2_NAME, data);
/* Verify the "new" contiguous dataset in the root group, if asked */
- if(check_new_data)
- verify_dataset(fid,MISC13_DSET3_NAME);
+ if(check_for_new_dataset)
+ misc13_verify_dataset(fid, MISC13_DSET3_NAME, data);
/* Open the named datatype in the root group */
tid = H5Topen2(fid, MISC13_DTYPE_NAME, H5P_DEFAULT);
CHECK(tid, FAIL, "H5Topen2");
/* Verify the type is correct */
- VERIFY(H5Tequal(tid,H5T_NATIVE_INT), TRUE, "H5Tequal");
+ VERIFY(H5Tequal(tid, H5T_NATIVE_INT), TRUE, "H5Tequal");
/* Close named datatype */
- ret=H5Tclose(tid);
+ ret = H5Tclose(tid);
CHECK(ret, FAIL, "H5Tclose");
/* Open the first group */
- gid = H5Gopen2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT);
- CHECK(gid, FAIL, "H5Gopen2");
+ gid1 = H5Gopen2(fid, MISC13_GROUP1_NAME, H5P_DEFAULT);
+ CHECK(gid1, FAIL, "H5Gopen2");
/* Verify the contiguous dataset in the first group */
- verify_dataset(gid,MISC13_DSET1_NAME);
+ misc13_verify_dataset(gid1, MISC13_DSET1_NAME, data);
/* Verify the chunked dataset in the first group */
- verify_dataset(gid,MISC13_DSET2_NAME);
+ misc13_verify_dataset(gid1, MISC13_DSET2_NAME, data);
/* Open the named datatype in the first group */
- tid = H5Topen2(gid,MISC13_DTYPE_NAME, H5P_DEFAULT);
+ tid = H5Topen2(gid1, MISC13_DTYPE_NAME, H5P_DEFAULT);
CHECK(tid, FAIL, "H5Topen2");
/* Verify the type is correct */
- VERIFY(H5Tequal(tid,H5T_NATIVE_INT), TRUE, "H5Tequal");
+ VERIFY(H5Tequal(tid, H5T_NATIVE_INT), TRUE, "H5Tequal");
/* Close named datatype */
- ret=H5Tclose(tid);
+ ret = H5Tclose(tid);
CHECK(ret, FAIL, "H5Tclose");
/* Open the second group */
- gid2 = H5Gopen2(gid, MISC13_GROUP2_NAME, H5P_DEFAULT);
+ gid2 = H5Gopen2(gid1, MISC13_GROUP2_NAME, H5P_DEFAULT);
CHECK(gid2, FAIL, "H5Gopen2");
/* Close the second group */
@@ -2383,31 +2390,33 @@ verify_file(const char *name, hsize_t blk_size, unsigned check_new_data)
CHECK(ret, FAIL, "H5Gclose");
/* Close the first group */
- ret = H5Gclose(gid);
+ ret = H5Gclose(gid1);
CHECK(ret, FAIL, "H5Gclose");
/* Close the file */
ret = H5Fclose(fid);
CHECK(ret, FAIL, "H5Fclose");
-}
+
+} /* end misc13_verify_file() */
static void
-add_to_new_file(const char *name)
+misc13_add_to_new_file(const char *name, const unsigned *data)
{
- hid_t fid; /* File ID */
+ hid_t fid = -1; /* File ID */
herr_t ret; /* Generic return value */
/* Open the file */
- fid=H5Fopen(name, H5F_ACC_RDWR, H5P_DEFAULT);
+ fid = H5Fopen(name, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK(fid, FAIL, "H5Fopen");
/* Create new contiguous dataset in root group */
- create_dataset(fid, MISC13_DSET3_NAME, H5P_DEFAULT);
+ misc13_create_dataset(fid, MISC13_DSET3_NAME, H5P_DEFAULT, data);
/* Close the file */
ret = H5Fclose(fid);
CHECK(ret, FAIL, "H5Fclose");
-}
+
+} /* end misc13_add_to_new_file() */
/****************************************************************
**
@@ -2418,26 +2427,44 @@ add_to_new_file(const char *name)
static void
test_misc13(void)
{
+ unsigned *data = NULL; /* Data to write to dataset */
+ hsize_t userblock_size; /* Correct size of userblock */
+ hbool_t check_for_new_dataset; /* Whether to check for the post-userblock-creation dataset */
+
+ /* Create a data buffer for the datasets */
+ data = (unsigned *)HDcalloc(MISC13_DIM1, sizeof(unsigned));
+ CHECK(data, NULL, "HDcalloc");
+
/* Initialize data to write */
- init_data();
+ misc13_init_data(data);
/* Create first file, with no user block */
- create_hdf_file(MISC13_FILE_1);
+ misc13_create_hdf_file(MISC13_FILE_1, data);
/* Verify file contents are correct */
- verify_file(MISC13_FILE_1,(hsize_t)0,0);
+ userblock_size = 0;
+ check_for_new_dataset = FALSE;
+ misc13_verify_file(MISC13_FILE_1, data, userblock_size, check_for_new_dataset);
/* Create a new file by inserting a user block in front of the first file */
- insert_user_block(MISC13_FILE_1, MISC13_FILE_2, "Test String", (size_t)MISC13_USERBLOCK_SIZE);
+ misc13_insert_user_block(MISC13_FILE_1, MISC13_FILE_2, "Test String", (size_t)MISC13_USERBLOCK_SIZE);
/* Verify file contents are still correct */
- verify_file(MISC13_FILE_2,(hsize_t)MISC13_USERBLOCK_SIZE,0);
+ userblock_size = MISC13_USERBLOCK_SIZE;
+ check_for_new_dataset = FALSE;
+ misc13_verify_file(MISC13_FILE_2, data, userblock_size, check_for_new_dataset);
/* Make certain we can modify the new file */
- add_to_new_file(MISC13_FILE_2);
+ misc13_add_to_new_file(MISC13_FILE_2, data);
/* Verify file contents are still correct */
- verify_file(MISC13_FILE_2,(hsize_t)MISC13_USERBLOCK_SIZE,1);
+ userblock_size = MISC13_USERBLOCK_SIZE;
+ check_for_new_dataset = TRUE;
+ misc13_verify_file(MISC13_FILE_2, data, userblock_size, check_for_new_dataset);
+
+ /* Free the dataset buffer */
+ HDfree(data);
+
} /* end test_misc13() */
/****************************************************************
@@ -2984,19 +3011,21 @@ test_misc18(void)
static void
test_misc19(void)
{
- hid_t fid; /* File ID */
- hid_t sid; /* 'Space ID */
- hid_t did; /* Dataset ID */
- hid_t tid; /* 'Type ID */
- hid_t aid; /* Attribute ID */
- hid_t plid; /* Property List ID */
- hid_t pcid; /* Property Class ID */
- hid_t gid; /* Group ID */
- hid_t ecid; /* Error Class ID */
- hid_t emid; /* Error Message ID */
- hid_t esid; /* Error Stack ID */
- int rc; /* Reference count */
- herr_t ret; /* Generic return value */
+ hid_t fid = -1; /* File ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t did = -1; /* Dataset ID */
+ hid_t tid = -1; /* Datatype ID */
+ hid_t aid = -1; /* Attribute ID */
+ hid_t plid = -1; /* Property List ID */
+ hid_t pcid = -1; /* Property Class ID */
+ hid_t gid = -1; /* Group ID */
+ hid_t ecid = -1; /* Error Class ID */
+ hid_t emid = -1; /* Error Message ID */
+ hid_t esid = -1; /* Error Stack ID */
+ hid_t vfdid = -1; /* Virtual File Driver ID */
+ H5FD_class_t *vfd_cls = NULL; /* VFD class */
+ int rc; /* Reference count */
+ herr_t ret; /* Generic return value */
/* Check H5I operations on files */
@@ -3406,6 +3435,45 @@ test_misc19(void)
} H5E_END_TRY;
VERIFY(ret, FAIL, "H5Eclose_stack");
+
+/* Check H5I operations on virtual file drivers */
+
+ /* Get a VFD class to register */
+ vfd_cls = h5_get_dummy_vfd_class();
+ CHECK(vfd_cls, NULL, "h5_get_dummy_vfd_class");
+
+ /* Register a virtual file driver */
+ vfdid = H5FDregister(vfd_cls);
+ CHECK(vfdid, FAIL, "H5FDregister");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(vfdid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Increment the reference count */
+ rc = H5Iinc_ref(vfdid);
+ VERIFY(rc, 2, "H5Iinc_ref");
+
+ /* Unregister the VFD normally */
+ ret = H5FDunregister(vfdid);
+ CHECK(ret, FAIL, "H5FDunregister");
+
+ /* Check the reference count */
+ rc = H5Iget_ref(vfdid);
+ VERIFY(rc, 1, "H5Iget_ref");
+
+ /* Unregister the VFD by decrementing the reference count */
+ rc = H5Idec_ref(vfdid);
+ VERIFY(rc, 0, "H5Idec_ref");
+
+ /* Try unregistering the VFD again (should fail) */
+ H5E_BEGIN_TRY {
+ ret = H5FDunregister(vfdid);
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5FDunregister");
+
+ HDfree(vfd_cls);
+
} /* end test_misc19() */
/****************************************************************
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 298d326..e994b65 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -43,6 +43,7 @@ ENDMACRO (ADD_H5P_EXE file)
set (H5P_TESTS
t_mpi
+ t_bigio
t_cache
t_pflush1
t_pflush2
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index b87c1df..7029bd5 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -23,7 +23,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
# Test programs. These are our main targets.
#
-TEST_PROG_PARA=t_mpi testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame
+TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame
check_PROGRAMS = $(TEST_PROG_PARA)
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
new file mode 100644
index 0000000..a4a1323
--- /dev/null
+++ b/testpar/t_bigio.c
@@ -0,0 +1,2153 @@
+
+#include "hdf5.h"
+#include "testphdf5.h"
+#include "H5Dprivate.h" /* For Chunk tests */
+
+// int TestVerbosity = VERBO_LO; /* Default Verbosity is Low */
+
+/* Constants definitions */
+#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
+
+/* Define some handy debugging shorthands, routines, ... */
+/* debugging tools */
+
+#define MAINPROCESS (!mpi_rank) /* define process 0 as main process */
+
+/* Constants definitions */
+#define RANK 2
+
+#define IN_ORDER 1
+#define OUT_OF_ORDER 2
+
+#define DATASET1 "DSET1"
+#define DATASET2 "DSET2"
+#define DATASET3 "DSET3"
+#define DATASET4 "DSET4"
+#define DATASET5 "DSET5"
+#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
+#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
+#define DXFER_BIGCOUNT 536870916
+
+#define HYPER 1
+#define POINT 2
+#define ALL 3
+
+/* Dataset data type. Int's can be easily octo dumped. */
+typedef hsize_t B_DATATYPE;
+
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+size_t bigcount = DXFER_BIGCOUNT;
+char filename[20] = "bigio_test.h5";
+int nerrors = 0;
+int mpi_size, mpi_rank;
+
+hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096
+hsize_t space_dim2 = SPACE_DIM2;
+
+static void coll_chunktest(const char* filename, int chunk_factor, int select_factor,
+ int api_option, int file_selection, int mem_selection, int mode);
+hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
+
+/*
+ * Setup the coordinates for point selection.
+ */
+static void
+set_coords(hsize_t start[],
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ size_t num_points,
+ hsize_t coords[],
+ int order)
+{
+ hsize_t i,j, k = 0, m ,n, s1 ,s2;
+
+ if(OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if(IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for(i = 0 ; i < count[0]; i++)
+ for(j = 0 ; j < count[1]; j++)
+ for(m = 0 ; m < block[0]; m++)
+ for(n = 0 ; n < block[1]; n++)
+ if(OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if(IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+}
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2 and data is stored contiguous.
+ */
+static void
+fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
+{
+ B_DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* put some trivial data in the data_array */
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1));
+ dataptr++;
+ }
+ }
+}
+
+/*
+ * Setup the coordinates for point selection.
+ */
+void point_set(hsize_t start[],
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ size_t num_points,
+ hsize_t coords[],
+ int order)
+{
+ hsize_t i,j, k = 0, m ,n, s1 ,s2;
+
+ HDcompile_assert(RANK == 2);
+
+ if(OUT_OF_ORDER == order)
+ k = (num_points * RANK) - 1;
+ else if(IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for(i = 0 ; i < count[0]; i++)
+ for(j = 0 ; j < count[1]; j++)
+ for(m = 0 ; m < block[0]; m++)
+ for(n = 0 ; n < block[1]; n++)
+ if(OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if(IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+
+ if(VERBOSE_MED) {
+ printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ k = 0;
+ for(i = 0; i < num_points ; i++) {
+ printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ k += 2;
+ }
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static void
+dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
+{
+ B_DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ printf("%-8s", "Cols:");
+ for (j=0; j < block[1]; j++){
+ printf("%3lu ", (unsigned long)(start[1]+j));
+ }
+ printf("\n");
+
+ /* print the slab data */
+ for (i=0; i < block[0]; i++){
+ printf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ printf("%llu ", *dataptr++);
+ }
+ printf("\n");
+ }
+}
+
+
+/*
+ * Print the content of the dataset.
+ */
+static int
+verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset, B_DATATYPE *original)
+{
+ hsize_t i, j;
+ int vrfyerrs;
+
+ /* print it if VERBOSE_MED */
+ if(VERBOSE_MED) {
+ printf("verify_data dumping:::\n");
+ printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ printf("original values:\n");
+ dataset_print(start, block, original);
+ printf("compared values:\n");
+ dataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ if(*dataset != *original){
+ if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n",
+ (unsigned long)i, (unsigned long)j,
+ (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
+ *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
+ }
+ if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ printf("[more errors ...]\n");
+ if(vrfyerrs)
+ printf("%d errors found in verify_data\n", vrfyerrs);
+ return(vrfyerrs);
+}
+
+/* Set up the selection */
+static void
+ccslab_set(int mpi_rank,
+ int mpi_size,
+ hsize_t start[],
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ int mode)
+{
+
+ switch (mode){
+
+ case BYROW_CONT:
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = space_dim1;
+ count[1] = space_dim2;
+ start[0] = mpi_rank*count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_DISCONT:
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = space_dim1/(stride[0]*block[0]);
+ count[1] = (space_dim2)/(stride[1]*block[1]);
+ start[0] = space_dim1*mpi_rank;
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTNONE:
+ /* Each process takes a slabs of rows, there are
+ no selections for the last process. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1);
+ count[1] = space_dim2;
+ start[0] = mpi_rank*count[0];
+ start[1] = 0;
+
+ break;
+
+ case BYROW_SELECTUNBALANCE:
+ /* The first one-third of the number of processes only
+ select top half of the domain, The rest will select the bottom
+ half of the domain. */
+
+ block[0] = 1;
+ count[0] = 2;
+ stride[0] = space_dim1*mpi_size/4+1;
+ block[1] = space_dim2;
+ count[1] = 1;
+ start[1] = 0;
+ stride[1] = 1;
+ if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank;
+ else start[0] = 1 + space_dim1*mpi_size/2 + (mpi_rank-2*mpi_size/3);
+ break;
+
+ case BYROW_SELECTINCHUNK:
+ /* Each process will only select one chunk */
+
+ block[0] = 1;
+ count[0] = 1;
+ start[0] = mpi_rank*space_dim1;
+ stride[0]= 1;
+ block[1] = space_dim2;
+ count[1] = 1;
+ stride[1]= 1;
+ start[1] = 0;
+
+ break;
+
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = space_dim1*mpi_size;
+ block[1] = space_dim2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
+ }
+ if (VERBOSE_MED){
+ printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0]*block[1]*count[0]*count[1]));
+ }
+}
+
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2.
+ */
+static void
+ccdataset_fill(hsize_t start[],
+ hsize_t stride[],
+ hsize_t count[],
+ hsize_t block[],
+ DATATYPE * dataset,
+ int mem_selection)
+{
+ DATATYPE *dataptr = dataset;
+ DATATYPE *tmptr;
+ hsize_t i,j,k1,k2,k=0;
+ /* put some trivial data in the data_array */
+ tmptr = dataptr;
+
+ /* assign the disjoint block (two-dimensional)data array value
+ through the pointer */
+
+ for (k1 = 0; k1 < count[0]; k1++) {
+ for(i = 0; i < block[0]; i++) {
+ for(k2 = 0; k2 < count[1]; k2++) {
+ for(j = 0;j < block[1]; j++) {
+
+ if (ALL != mem_selection) {
+ dataptr = tmptr + ((start[0]+k1*stride[0]+i)*space_dim2+
+ start[1]+k2*stride[1]+j);
+ }
+ else {
+ dataptr = tmptr + k;
+ k++;
+ }
+
+ *dataptr = (DATATYPE)(k1+k2+i+j);
+ }
+ }
+ }
+ }
+}
+
+/*
+ * Print the first block of the content of the dataset.
+ */
+static void
+ccdataset_print(hsize_t start[],
+ hsize_t block[],
+ DATATYPE * dataset)
+
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ printf("Print only the first block of the dataset\n");
+ printf("%-8s", "Cols:");
+ for (j=0; j < block[1]; j++){
+ printf("%3lu ", (unsigned long)(start[1]+j));
+ }
+ printf("\n");
+
+ /* print the slab data */
+ for (i=0; i < block[0]; i++){
+ printf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ printf("%03d ", *dataptr++);
+ }
+ printf("\n");
+ }
+}
+
+/*
+ * Print the content of the dataset.
+ */
+static int
+ccdataset_vrfy(hsize_t start[],
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ DATATYPE *dataset,
+ DATATYPE *original,
+ int mem_selection)
+{
+ hsize_t i, j,k1,k2,k=0;
+ int vrfyerrs;
+ DATATYPE *dataptr,*oriptr;
+
+ /* print it if VERBOSE_MED */
+ if (VERBOSE_MED) {
+ printf("dataset_vrfy dumping:::\n");
+ printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ printf("original values:\n");
+ ccdataset_print(start, block, original);
+ printf("compared values:\n");
+ ccdataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+
+ for (k1=0;k1<count[0];k1++) {
+ for(i=0;i<block[0];i++) {
+ for(k2=0; k2<count[1];k2++) {
+ for(j=0;j<block[1];j++) {
+ if (ALL != mem_selection) {
+ dataptr = dataset + ((start[0]+k1*stride[0]+i)*space_dim2+
+ start[1]+k2*stride[1]+j);
+ oriptr = original + ((start[0]+k1*stride[0]+i)*space_dim2+
+ start[1]+k2*stride[1]+j);
+ }
+ else {
+ dataptr = dataset + k;
+ oriptr = original + k;
+ k++;
+ }
+ if (*dataptr != *oriptr){
+ if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j,
+ *(oriptr), *(dataptr));
+ }
+ }
+ }
+ }
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ printf("[more errors ...]\n");
+ if (vrfyerrs)
+ printf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ return(vrfyerrs);
+}
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+static void
+dataset_big_write(void)
+{
+
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset;
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t *coords = NULL;
+ int i;
+ herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hsize_t h;
+ size_t num_points;
+ B_DATATYPE * wdata;
+
+
+ /* allocate memory for data buffer */
+ wdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
+ VRFY((wdata != NULL), "wdata malloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+
+ /* Each process takes a slabs of rows. */
+ printf("\nTesting Dataset1 write by ROW\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = mpi_size;
+ sid = H5Screate_simple (RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0]/mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = mpi_rank*block[0];
+ start[1] = 0;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll by Row");
+ {
+ int j,k =0;
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ if(k < 10) {
+ printf("%lld ", wdata[k]);
+ k++;
+ }
+ }
+ }
+ printf("\n");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, wdata);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+
+
+ /* Each process takes a slabs of cols. */
+ printf("\nTesting Dataset2 write by COL\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = mpi_size;
+ sid = H5Screate_simple (RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0];
+ block[1] = dims[1]/mpi_size;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = mpi_rank*block[1];
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll by Col");
+ {
+ int j,k =0;
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ if(k < 10) {
+ printf("%lld ", wdata[k]);
+ k++;
+ }
+ }
+ }
+ printf("\n");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, wdata);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+
+
+ /* ALL selection */
+ printf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = 1;
+ sid = H5Screate_simple (RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ if(MAINPROCESS) {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sset_all succeeded");
+ }
+ else {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ VRFY((mem_dataspace >= 0), "");
+ if(!MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, dims, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ }
+
+ /* write data collectively */
+ MESG("writeAll by process 0");
+ {
+ int j,k =0;
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ if(k < 10) {
+ printf("%lld ", wdata[k]);
+ k++;
+ }
+ }
+ }
+ printf("\n");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, wdata);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+ /* Point selection */
+ printf("\nTesting Dataset4 write point selection\n");
+ /* Create a large dataset */
+ dims[0] = bigcount;
+ dims[1] = mpi_size * 4;
+ sid = H5Screate_simple (RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ block[0] = dims[0]/2;
+ block[1] = 2;
+ stride[0] = dims[0]/2;
+ stride[1] = 2;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = dims[1]/mpi_size * mpi_rank;
+
+ num_points = bigcount;
+
+ coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
+ set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ if(coords) free(coords);
+
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ /* create a memory dataspace */
+ mem_dataspace = H5Screate_simple (1, &bigcount, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, wdata);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+ /* Irregular selection */
+ /* Need larger memory for data buffer */
+ free(wdata);
+#if 0
+ wdata = (B_DATATYPE *)malloc(bigcount*4*sizeof(B_DATATYPE));
+ VRFY((wdata != NULL), "wdata malloc succeeded");
+
+ printf("\nTesting Dataset5 write irregular selection\n");
+ /* Create a large dataset */
+ dims[0] = bigcount/6;
+ dims[1] = mpi_size * 4;
+ sid = H5Screate_simple (RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ dataset = H5Dcreate2(fid, DATASET5, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(sid);
+
+ /* first select 1 col in this procs splice */
+ block[0] = dims[0];
+ block[1] = 1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = mpi_rank * 4;
+
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+
+ dims[1] = 4;
+ /* create a memory dataspace */
+ mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ start[1] = 0;
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* select every other row in the process splice and OR it with
+ the col selection to create an irregular selection */
+ for(h=0 ; h<dims[0] ; h+=2) {
+ block[0] = 1;
+ block[1] = 4;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = h;
+ start[1] = mpi_rank * 4;
+
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ start[1] = 0;
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ }
+ printf("Setting up for collective transfer\n");
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* fill the local slab with some trivial data */
+ fill_datasets(start, dims, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ }
+
+ ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, wdata);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+ free(wdata);
+#endif
+ H5Fclose(fid);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+static void
+dataset_big_read(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset;
+ B_DATATYPE *rdata = NULL; /* data buffer */
+ B_DATATYPE *wdata = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ int i,j,k;
+ hsize_t h;
+ size_t num_points;
+ hsize_t *coords = NULL;
+ herr_t ret; /* Generic return value */
+
+ /* allocate memory for data buffer */
+ rdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
+ VRFY((rdata != NULL), "rdata malloc succeeded");
+ wdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
+ VRFY((wdata != NULL), "wdata malloc succeeded");
+
+ memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+
+ /* setup file access template */
+ acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
+
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+
+ printf("\nRead Testing Dataset1 by COL\n");
+ dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = mpi_size;
+ /* Each process takes a slabs of cols. */
+ block[0] = dims[0];
+ block[1] = dims[1]/mpi_size;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = mpi_rank*block[1];
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, rdata);
+ VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+
+ {
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ if(k < 10) {
+ printf("%lld ", rdata[k]);
+ k++;
+ }
+ }
+ }
+ printf("\n");
+ }
+
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+
+ printf("\nRead Testing Dataset2 by ROW\n");
+ memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = mpi_size;
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0]/mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = mpi_rank*block[0];
+ start[1] = 0;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, rdata);
+ VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+
+ {
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ if(k < 10) {
+ printf("%lld ", rdata[k]);
+ k++;
+ }
+ }
+ }
+ printf("\n");
+ }
+
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+
+ printf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
+ memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = 1;
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ if(MAINPROCESS) {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sset_all succeeded");
+ }
+ else {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ VRFY((mem_dataspace >= 0), "");
+ if(!MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sset_none succeeded");
+ }
+
+ /* fill dataset with test data */
+ fill_datasets(start, dims, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, rdata);
+ VRFY((ret >= 0), "H5Dread dataset3 succeeded");
+
+ {
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ if(k < 10) {
+ printf("%lld ", rdata[k]);
+ k++;
+ }
+ }
+ }
+ printf("\n");
+ }
+
+ if(MAINPROCESS) {
+ /* verify the read data with original expected data */
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+ printf("\nRead Testing Dataset4 with Point selection\n");
+ dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = mpi_size * 4;
+
+ block[0] = dims[0]/2;
+ block[1] = 2;
+ stride[0] = dims[0]/2;
+ stride[1] = 2;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = dims[1]/mpi_size * mpi_rank;
+
+ fill_datasets(start, block, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
+ }
+
+ num_points = bigcount;
+
+ coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
+ set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ if(coords) free(coords);
+
+ /* create a memory dataspace */
+ mem_dataspace = H5Screate_simple (1, &bigcount, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, rdata);
+ VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+ printf("\nRead Testing Dataset5 with Irregular selection\n");
+ /* Need larger memory for data buffer */
+ free(wdata);
+ free(rdata);
+#if 0
+ wdata = (B_DATATYPE *)malloc(bigcount*4*sizeof(B_DATATYPE));
+ VRFY((wdata != NULL), "wdata malloc succeeded");
+ rdata = (B_DATATYPE *)malloc(bigcount*4*sizeof(B_DATATYPE));
+ VRFY((rdata != NULL), "rdata malloc succeeded");
+
+ dataset = H5Dopen2(fid, DATASET5, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dopen2 succeeded");
+
+ dims[0] = bigcount;
+ dims[1] = mpi_size * 4;
+
+ /* first select 1 col in this proc splice */
+ block[0] = dims[0];
+ block[1] = 1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = mpi_rank * 4;
+
+ /* get file dataspace */
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+
+ /* create a memory dataspace */
+ dims[1] = 4;
+ mem_dataspace = H5Screate_simple (RANK, dims, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ start[1] = 0;
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* select every other row in the process splice and OR it with
+ the col selection to create an irregular selection */
+ for(h=0 ; h<dims[0] ; h+=2) {
+ block[0] = 1;
+ block[1] = 4;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = h;
+ start[1] = mpi_rank * 4;
+
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ start[1] = 0;
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_OR, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ //fprintf(stderr, "%d: %d - %d\n", mpi_rank, (int)h, (int)H5Sget_select_npoints(mem_dataspace));
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
+ xfer_plist, rdata);
+ VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+
+ /* fill dataset with test data */
+ fill_datasets(start, dims, wdata);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ }
+
+
+
+ /* verify the read data with original expected data */
+ block[0] = dims[0];
+ block[1] = 1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+
+ for(h=0 ; h<dims[0] ; h+=2) {
+ block[0] = 1;
+ block[1] = 4;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = h;
+ start[1] = 0;
+ ret = verify_data(start, count, stride, block, rdata, wdata);
+ if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ }
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(rdata) free(rdata);
+ if(wdata) free(wdata);
+#endif
+} /* dataset_large_readAll */
+
+
+/*
+ * Create the appropriate File access property list
+ */
+hid_t
+create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
+{
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
+
+ /* need the rank for error checking macros */
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
+
+ if (l_facc_type == FACC_DEFAULT)
+ return (ret_pl);
+
+ if (l_facc_type == FACC_MPIO){
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ return(ret_pl);
+ }
+
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return(ret_pl);
+ }
+
+ /* unknown file access types */
+ return (ret_pl);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk1
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: One big singluar selection inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = space_dim1(5760)*mpi_size
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = space_dim1(5760)
+ * count1 = space_dim2(3)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk1(void)
+{
+ if (MAINPROCESS)
+ printf("coll_chunk1\n");
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk2
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
+ selection with a single chunk
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+ /* ------------------------------------------------------------------------
+ * Descriptions for the selection: many disjoint selections inside one chunk
+ * Two dimensions,
+ *
+ * dim1 = space_dim1*mpi_size(5760)
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = dim1
+ * chunk_dim2 = dim2
+ * block = 1 for all dimensions
+ * stride = 3 for all dimensions
+ * count0 = space_dim1/stride0(5760/3)
+ * count1 = space_dim2/stride(3/3 = 1)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+void
+coll_chunk2(void)
+{
+ if (MAINPROCESS)
+ printf("coll_chunk2\n");
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: coll_chunk3
+ *
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ selection with at least number of 2*mpi_size chunks
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* ------------------------------------------------------------------------
+ * Descriptions for the selection: one singular selection accross many chunks
+ * Two dimensions, Num of chunks = 2* mpi_size
+ *
+ * dim1 = space_dim1*mpi_size
+ * dim2 = space_dim2(3)
+ * chunk_dim1 = space_dim1
+ * chunk_dim2 = dim2/2
+ * block = 1 for all dimensions
+ * stride = 1 for all dimensions
+ * count0 = space_dim1
+ * count1 = space_dim2(3)
+ * start0 = mpi_rank*space_dim1
+ * start1 = 0
+ *
+ * ------------------------------------------------------------------------
+ */
+
+void
+coll_chunk3(void)
+{
+ if (MAINPROCESS)
+ printf("coll_chunk3\n");
+
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+}
+
+
+//-------------------------------------------------------------------------
+// Borrowed/Modified (slightly) from t_coll_chunk.c
+/*-------------------------------------------------------------------------
+ * Function: coll_chunktest
+ *
+ * Purpose: The real testing routine for regular selection of collective
+ chunking storage
+ testing both write and read,
+ If anything fails, it may be read or write. There is no
+ separation test between read and write.
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ * Modifications:
+ * Remove invalid temporary property checkings for API_LINK_HARD and
+ * API_LINK_TRUE cases.
+ * Programmer: Jonathan Kim
+ * Date: 2012-10-10
+ *
+ * Programmer: Unknown
+ * July 12th, 2004
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+coll_chunktest(const char* filename,
+ int chunk_factor,
+ int select_factor,
+ int api_option,
+ int file_selection,
+ int mem_selection,
+ int mode)
+{
+ hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t acc_plist,xfer_plist,crp_plist;
+
+ hsize_t dims[RANK], chunk_dims[RANK];
+ int* data_array1 = NULL;
+ int* data_origin1 = NULL;
+
+ hsize_t start[RANK],count[RANK],stride[RANK],block[RANK];
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ unsigned prop_value;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ herr_t status;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+ int i;
+
+ /* Create the data space */
+
+ acc_plist = create_faccess_plist(comm,info,facc_type);
+ VRFY((acc_plist >= 0),"");
+
+ file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist);
+ VRFY((file >= 0),"H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY((status >= 0),"");
+
+ /* setup dimensionality object */
+ dims[0] = space_dim1*mpi_size;
+ dims[1] = space_dim2;
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+
+ /* set up the coords array selection */
+ num_points = block[0] * block[1] * count[0] * count[1];
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+ point_set(start, count, stride, block, num_points, coords, mode);
+
+ file_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((file_dataspace >= 0), "file dataspace created succeeded");
+
+ if(ALL != mem_selection) {
+ mem_dataspace = H5Screate_simple(2, dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ crp_plist = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((crp_plist >= 0),"");
+
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0]/chunk_factor;
+
+ /* to decrease the testing time, maintain bigger chunk size */
+ (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2/2);
+ status = H5Pset_chunk(crp_plist, 2, chunk_dims);
+ VRFY((status >= 0),"chunk creation property list succeeded");
+
+ dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT,
+ file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT);
+ VRFY((dataset >= 0),"dataset created succeeded");
+
+ status = H5Pclose(crp_plist);
+ VRFY((status >= 0), "");
+
+ /*put some trivial data in the data array */
+ ccdataset_fill(start, stride, count,block, data_array1, mem_selection);
+
+ MESG("data_array initialized");
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0),"hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0),"Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0),"none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0),"hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0),"Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0),"none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* set up the collective transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((status>= 0),"MPIO collective transfer property succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status>= 0),"set independent IO collectively succeeded");
+ }
+
+ switch(api_option){
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
+ VRFY((status>= 0),"collective chunk optimization succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((status>= 0),"collective chunk optimization succeeded ");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
+ VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
+ VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
+ VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
+ break;
+
+ default:
+ ;
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if(facc_type == FACC_MPIO) {
+ switch(api_option) {
+ case API_LINK_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+
+ case API_MULTI_IND:
+ prop_value = H5D_XFER_COLL_CHUNK_DEF;
+ status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((status >= 0),"testing property list inserted succeeded");
+ break;
+
+ default:
+ ;
+ }
+ }
+#endif
+
+ /* write data collectively */
+ status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((status >= 0),"dataset write succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if(facc_type == FACC_MPIO) {
+ switch(api_option){
+ case API_LINK_HARD:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
+ break;
+
+ case API_MULTI_HARD:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ break;
+
+ case API_LINK_TRUE:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
+ break;
+
+ case API_LINK_FALSE:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
+ break;
+
+ case API_MULTI_COLL:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ break;
+
+ case API_MULTI_IND:
+ status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value);
+ VRFY((status >= 0),"testing property list get succeeded");
+ VRFY((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ break;
+
+ default:
+ ;
+ }
+ }
+#endif
+
+ status = H5Dclose(dataset);
+ VRFY((status >= 0),"");
+
+ status = H5Pclose(xfer_plist);
+ VRFY((status >= 0),"property list closed");
+
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0),"");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0),"");
+
+
+ status = H5Fclose(file);
+ VRFY((status >= 0),"");
+
+ if (data_array1) HDfree(data_array1);
+
+ /* Use collective read to verify the correctness of collective write. */
+
+ /* allocate memory for data buffer */
+ data_array1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_origin1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
+ VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ acc_plist = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
+
+ file = H5Fopen(filename,H5F_ACC_RDONLY,acc_plist);
+ VRFY((file >= 0),"H5Fcreate succeeded");
+
+ status = H5Pclose(acc_plist);
+ VRFY((status >= 0),"");
+
+ /* open the collective dataset*/
+ dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
+ VRFY((dataset >= 0), "");
+
+ /* set up dimensions of the slab this process accesses */
+ ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+
+ /* obtain the file and mem dataspace*/
+ file_dataspace = H5Dget_space (dataset);
+ VRFY((file_dataspace >= 0), "");
+
+ if (ALL != mem_selection) {
+ mem_dataspace = H5Dget_space (dataset);
+ VRFY((mem_dataspace >= 0), "");
+ }
+ else {
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ }
+
+ switch (file_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0),"hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0),"Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(file_dataspace);
+ VRFY((status >= 0),"none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(file_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ switch (mem_selection) {
+ case HYPER:
+ status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((status >= 0),"hyperslab selection succeeded");
+ break;
+
+ case POINT:
+ if (num_points) {
+ status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((status >= 0),"Element selection succeeded");
+ }
+ else {
+ status = H5Sselect_none(mem_dataspace);
+ VRFY((status >= 0),"none selection succeeded");
+ }
+ break;
+
+ case ALL:
+ status = H5Sselect_all(mem_dataspace);
+ VRFY((status >= 0), "H5Sselect_all succeeded");
+ break;
+ }
+
+ /* fill dataset with test data */
+ ccdataset_fill(start, stride,count,block, data_origin1, mem_selection);
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0),"");
+
+ status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((status>= 0),"MPIO collective transfer property succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status>= 0),"set independent IO collectively succeeded");
+ }
+
+ status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((status >=0),"dataset read succeeded");
+
+ /* verify the read data with original expected data */
+ status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
+ if (status) nerrors++;
+
+ status = H5Pclose(xfer_plist);
+ VRFY((status >= 0),"property list closed");
+
+ /* close dataset collectively */
+ status=H5Dclose(dataset);
+ VRFY((status >= 0), "H5Dclose");
+
+ /* release all IDs created */
+ status = H5Sclose(file_dataspace);
+ VRFY((status >= 0),"H5Sclose");
+
+ status = H5Sclose(mem_dataspace);
+ VRFY((status >= 0),"H5Sclose");
+
+ /* close the file collectively */
+ status = H5Fclose(file);
+ VRFY((status >= 0),"H5Fclose");
+
+ /* release data buffers */
+ if(coords) HDfree(coords);
+ if(data_array1) HDfree(data_array1);
+ if(data_origin1) HDfree(data_origin1);
+
+}
+
+
+
+/*****************************************************************************
+ *
+ * Function: do_express_test()
+ *
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
+ * by GetTestExpress() across all processes. Return this
+ * value.
+ *
+ * Envirmoment variables can be different across different
+ * processes. This function ensures that all processes agree
+ * on whether to do an express test.
+ *
+ * Return: Success: Maximum of the values returned by
+ * GetTestExpress() across all processes.
+ *
+ * Failure: -1
+ *
+ * Programmer: JRM -- 4/25/06
+ *
+ *****************************************************************************/
+static int
+do_express_test(int world_mpi_rank)
+{
+ int express_test;
+ int max_express_test;
+ int result;
+
+ express_test = GetTestExpress();
+
+ result = MPI_Allreduce((void *)&express_test,
+ (void *)&max_express_test,
+ 1,
+ MPI_INT,
+ MPI_MAX,
+ MPI_COMM_WORLD);
+
+ if ( result != MPI_SUCCESS ) {
+ nerrors++;
+ max_express_test = -1;
+ if ( VERBOSE_MED && (world_mpi_rank == 0)) {
+ HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n",
+ world_mpi_rank, FUNC );
+ }
+ }
+
+ return(max_express_test);
+
+} /* do_express_test() */
+
+
+int main(int argc, char **argv)
+{
+ int ExpressMode = 0;
+ hsize_t newsize = 1048576;
+ hsize_t oldsize = H5S_mpio_set_bigio_count(newsize);
+
+ if (newsize != oldsize) {
+ bigcount = newsize * 2;
+ }
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+
+ ExpressMode = do_express_test(mpi_rank);
+
+ dataset_big_write();
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ dataset_big_read();
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (ExpressMode > 0) {
+ printf("***Express test mode on. Several tests are skipped\n");
+ }
+ else {
+ coll_chunk1();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk2();
+ MPI_Barrier(MPI_COMM_WORLD);
+ coll_chunk3();
+ }
+
+ /* close HDF5 library */
+ H5close();
+
+ MPI_Finalize();
+
+ return 0;
+}
+
diff --git a/tools/lib/h5diff.c b/tools/lib/h5diff.c
index afb36d9..9da5b6b 100644
--- a/tools/lib/h5diff.c
+++ b/tools/lib/h5diff.c
@@ -11,8 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdlib.h>
-
#include "H5private.h"
#include "h5tools.h"
#include "h5tools_utils.h"
diff --git a/tools/lib/h5diff_array.c b/tools/lib/h5diff_array.c
index 0b1a96e..7236ad1 100644
--- a/tools/lib/h5diff_array.c
+++ b/tools/lib/h5diff_array.c
@@ -673,21 +673,33 @@ static hsize_t diff_datum(void *_mem1,
h5difftrace("diff_datum H5T_STRING variable\n");
/* Get pointer to first string */
s1 = *(char**) mem1;
- size1 = HDstrlen(s1);
+ if(s1)
+ size1 = HDstrlen(s1);
+ else
+ size1 = 0;
/* Get pointer to second string */
s2 = *(char**) mem2;
- size2 = HDstrlen(s2);
+ if(s2)
+ size2 = HDstrlen(s2);
+ else
+ size2 = 0;
}
else if (H5T_STR_NULLTERM == pad) {
h5difftrace("diff_datum H5T_STRING null term\n");
/* Get pointer to first string */
s1 = (char*) mem1;
- size1 = HDstrlen(s1);
+ if(s1)
+ size1 = HDstrlen(s1);
+ else
+ size1 = 0;
if (size1 > size_mtype)
size1 = size_mtype;
/* Get pointer to second string */
s2 = (char*) mem2;
- size2 = HDstrlen(s2);
+ if(s2)
+ size2 = HDstrlen(s2);
+ else
+ size2 = 0;
if (size2 > size_mtype)
size2 = size_mtype;
}
diff --git a/tools/lib/h5tools.c b/tools/lib/h5tools.c
index 3729cac..5031e44 100644
--- a/tools/lib/h5tools.c
+++ b/tools/lib/h5tools.c
@@ -12,16 +12,10 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Robb Matzke <matzke@llnl.gov>
- * Thursday, July 23, 1998
- *
* Purpose: A library for displaying the values of a dataset in a human
* readable format.
*/
-#include <stdio.h>
-#include <stdlib.h>
-
#include "h5tools.h"
#include "h5tools_dump.h"
#include "h5tools_ref.h"
diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c
index 1a57512..fb79b77 100644
--- a/tools/lib/h5tools_dump.c
+++ b/tools/lib/h5tools_dump.c
@@ -12,16 +12,10 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Robb Matzke <matzke@llnl.gov>
- * Thursday, July 23, 1998
- *
* Purpose: A library for displaying the values of a dataset in a human
* readable format.
*/
-#include <stdio.h>
-#include <stdlib.h>
-
#include "h5tools.h"
#include "h5tools_dump.h"
#include "h5tools_ref.h"
diff --git a/tools/lib/h5tools_ref.c b/tools/lib/h5tools_ref.c
index 85850e3..e000080 100644
--- a/tools/lib/h5tools_ref.c
+++ b/tools/lib/h5tools_ref.c
@@ -11,8 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdio.h>
-#include <stdlib.h>
#include "h5tools_ref.h"
#include "H5private.h"
#include "H5SLprivate.h"
diff --git a/tools/lib/h5tools_str.c b/tools/lib/h5tools_str.c
index fa15785..a66cfe5 100644
--- a/tools/lib/h5tools_str.c
+++ b/tools/lib/h5tools_str.c
@@ -12,16 +12,8 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Bill Wendling <wendling@ncsa.uiuc.edu>
- * Monday, 19. February 2001
- *
* Purpose: These are string functions for us to use and abuse.
*/
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
#include "H5private.h"
#include "h5tools.h" /* for h5tool_format_t structure */
#include "h5tools_ref.h"
diff --git a/tools/lib/h5tools_utils.c b/tools/lib/h5tools_utils.c
index c361e25..08213df 100644
--- a/tools/lib/h5tools_utils.c
+++ b/tools/lib/h5tools_utils.c
@@ -12,20 +12,10 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Bill Wendling <wendling@ncsa.uiuc.edu>
- * Tuesday, 6. March 2001
- */
-
-/*
* Portions of this work are derived from _Obfuscated C and Other Mysteries_,
* by Don Libes, copyright (c) 1993 by John Wiley & Sons, Inc.
*/
-#include <ctype.h>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-
#include "h5tools.h"
#include "h5tools_utils.h"
#include "H5private.h"
diff --git a/tools/lib/io_timer.c b/tools/lib/io_timer.c
index e3318e9..a6885df 100644
--- a/tools/lib/io_timer.c
+++ b/tools/lib/io_timer.c
@@ -22,16 +22,9 @@
* This is a module of useful timing functions for performance testing.
*/
-#include <stdio.h>
-#include <stdlib.h>
-
#include "H5private.h"
#include "hdf5.h"
-#ifdef H5_HAVE_PARALLEL
-#include <mpi.h>
-#endif
-
#include "io_timer.h"
/*
diff --git a/tools/lib/ph5diff.h b/tools/lib/ph5diff.h
index 9628d45..996a611 100644
--- a/tools/lib/ph5diff.h
+++ b/tools/lib/ph5diff.h
@@ -42,9 +42,5 @@ struct diffs_found
int not_cmp;
};
-#ifdef H5_HAVE_PARALLEL
-#include <mpi.h>
-#endif
-
#endif /* _PH5DIFF_H__ */
diff --git a/tools/src/h5copy/h5copy.c b/tools/src/h5copy/h5copy.c
index 390b93e..3f91fce 100644
--- a/tools/src/h5copy/h5copy.c
+++ b/tools/src/h5copy/h5copy.c
@@ -14,8 +14,6 @@
#include "H5private.h"
#include "h5tools.h"
#include "h5tools_utils.h"
-#include <string.h>
-#include <stdlib.h>
/* Name of tool */
#define PROGRAMNAME "h5copy"
diff --git a/tools/src/h5diff/h5diff_common.c b/tools/src/h5diff/h5diff_common.c
index 0537b9f..1069a31 100644
--- a/tools/src/h5diff/h5diff_common.c
+++ b/tools/src/h5diff/h5diff_common.c
@@ -11,8 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdlib.h>
-#include <string.h>
#include "H5private.h"
#include "h5diff.h"
#include "h5diff_common.h"
diff --git a/tools/src/h5diff/h5diff_main.c b/tools/src/h5diff/h5diff_main.c
index 92a1610..66ff71e 100644
--- a/tools/src/h5diff/h5diff_main.c
+++ b/tools/src/h5diff/h5diff_main.c
@@ -11,9 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdlib.h>
-#include <assert.h>
-#include <memory.h>
#include "H5private.h"
#include "h5diff.h"
#include "h5diff_common.h"
diff --git a/tools/src/h5diff/ph5diff_main.c b/tools/src/h5diff/ph5diff_main.c
index bfeb408..192067f 100644
--- a/tools/src/h5diff/ph5diff_main.c
+++ b/tools/src/h5diff/ph5diff_main.c
@@ -11,9 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdlib.h>
-#include <string.h>
-#include <assert.h>
#include "H5private.h"
#include "h5diff.h"
#include "ph5diff.h"
diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c
index b53c212..bf2e127 100644
--- a/tools/src/h5dump/h5dump.c
+++ b/tools/src/h5dump/h5dump.c
@@ -10,8 +10,6 @@
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdio.h>
-#include <stdlib.h>
#include "h5dump.h"
#include "h5dump_ddl.h"
diff --git a/tools/src/h5dump/h5dump_ddl.c b/tools/src/h5dump/h5dump_ddl.c
index 8ce6cd6..0a45840 100644
--- a/tools/src/h5dump/h5dump_ddl.c
+++ b/tools/src/h5dump/h5dump_ddl.c
@@ -10,8 +10,6 @@
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdio.h>
-#include <stdlib.h>
#include "H5private.h"
#include "h5tools.h"
diff --git a/tools/src/h5dump/h5dump_xml.c b/tools/src/h5dump/h5dump_xml.c
index 1c3978d..bb0fd7a 100644
--- a/tools/src/h5dump/h5dump_xml.c
+++ b/tools/src/h5dump/h5dump_xml.c
@@ -10,8 +10,6 @@
* If you do not have access to either file, you may request a copy from *
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdio.h>
-#include <stdlib.h>
#include "H5private.h"
#include "h5tools.h"
diff --git a/tools/src/h5repack/h5repack.c b/tools/src/h5repack/h5repack.c
index bc8527b..4860d9e 100644
--- a/tools/src/h5repack/h5repack.c
+++ b/tools/src/h5repack/h5repack.c
@@ -11,10 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdlib.h>
-#include <string.h>
-#include <ctype.h>
-
#include "H5private.h"
#include "h5repack.h"
#include "h5tools.h"
diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c
index 6aee7a8..6f196b4 100644
--- a/tools/src/h5stat/h5stat.c
+++ b/tools/src/h5stat/h5stat.c
@@ -11,8 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdlib.h>
-#include <string.h>
#include "H5private.h" /* Generic Functions */
#include "h5tools.h"
#include "h5tools_utils.h"
diff --git a/tools/src/misc/h5mkgrp.c b/tools/src/misc/h5mkgrp.c
index 597b6b3..43d6bfe 100644
--- a/tools/src/misc/h5mkgrp.c
+++ b/tools/src/misc/h5mkgrp.c
@@ -15,8 +15,6 @@
#include "H5private.h"
#include "h5tools.h"
#include "h5tools_utils.h"
-#include <string.h>
-#include <stdlib.h>
/* Name of tool */
#define PROGRAMNAME "h5mkgrp"
diff --git a/tools/src/misc/h5repart.c b/tools/src/misc/h5repart.c
index 4432621..cdc554f 100644
--- a/tools/src/misc/h5repart.c
+++ b/tools/src/misc/h5repart.c
@@ -25,39 +25,9 @@
/* See H5private.h for how to include system headers */
#include "hdf5.h"
#include "H5private.h"
-#ifdef H5_STDC_HEADERS
-# include <ctype.h>
-# include <errno.h>
-# include <fcntl.h>
-# include <stdio.h>
-# include <stdlib.h>
-# include <string.h>
-#endif
-
-#ifdef H5_HAVE_UNISTD_H
-# include <sys/types.h>
-# include <unistd.h>
-#endif
-
-#ifdef H5_HAVE_SYS_STAT_H
-# include <sys/stat.h>
-#endif
-
-#ifndef FALSE
-# define FALSE 0
-#endif
-#ifndef TRUE
-# define TRUE 1
-#endif
-# define NAMELEN 4096
-#define GB *1024*1024*1024
-#ifndef MIN
-# define MIN(X,Y) ((X)<(Y)?(X):(Y))
-#endif
-#ifndef MIN3
-# define MIN3(X,Y,Z) MIN(MIN(X,Y),Z)
-#endif
+#define NAMELEN 4096
+#define GB *1024*1024*1024
/*Make these 2 private properties(defined in H5Fprivate.h) available to h5repart.
*The first one updates the member file size in the superblock. The second one
diff --git a/tools/test/h5copy/h5copygentest.c b/tools/test/h5copy/h5copygentest.c
index d4d6a08..35f9132 100644
--- a/tools/test/h5copy/h5copygentest.c
+++ b/tools/test/h5copy/h5copygentest.c
@@ -14,7 +14,6 @@
/*
* Generate the binary hdf5 file for the h5copy tests
*/
-#include <stdlib.h>
#include "hdf5.h"
#include "H5private.h"
diff --git a/tools/test/h5diff/CMakeTests.cmake b/tools/test/h5diff/CMakeTests.cmake
index 72dda6b..02db446 100644
--- a/tools/test/h5diff/CMakeTests.cmake
+++ b/tools/test/h5diff/CMakeTests.cmake
@@ -95,6 +95,8 @@
${HDF5_TOOLS_DIR}/testfiles/vds/5_b.h5
${HDF5_TOOLS_DIR}/testfiles/vds/5_c.h5
${HDF5_TOOLS_DIR}/testfiles/vds/5_vds.h5
+ # tools/testfiles
+ ${HDF5_TOOLS_DIR}/testfiles/tvlstr.h5
)
set (LIST_OTHER_TEST_FILES
@@ -286,6 +288,7 @@
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_v1.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_v2.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_v3.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_vlstr.txt
)
set (LIST_WIN_TEST_FILES
@@ -311,6 +314,8 @@
HDFTEST_COPY_FILE("${h5_tstfiles}" "${PROJECT_BINARY_DIR}/PAR/testfiles/${fname}" "h5diff_files")
endif ()
endforeach ()
+ # copy second version of tvlstr.h5
+ HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/tvlstr.h5" "${PROJECT_BINARY_DIR}/testfiles/tvlstr2.h5" "h5diff_files")
#
@@ -888,6 +893,8 @@
h5diff_v2.out.err
h5diff_v3.out
h5diff_v3.out.err
+ h5diff_vlstr.out
+ h5diff_vlstr.out.err
)
set_tests_properties (H5DIFF-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
if (NOT "${last_test}" STREQUAL "")
@@ -1465,6 +1472,7 @@ ADD_H5_TEST (h5diff_487 1 -v --exclude-path "/group1/dset" h5diff_exclude3-1.h5
# # diff various multiple vlen and fixed strings in a compound type dataset
# ##############################################################################
ADD_H5_TEST (h5diff_530 0 -v ${COMP_VL_STRS_FILE} ${COMP_VL_STRS_FILE} /group /group_copy)
+ADD_H5_TEST (h5diff_vlstr 0 -v tvlstr.h5 tvlstr2.h5)
# ##############################################################################
# # Test container types (array,vlen) with multiple nested compound types
diff --git a/tools/test/h5diff/testfiles/h5diff_vlstr.txt b/tools/test/h5diff/testfiles/h5diff_vlstr.txt
new file mode 100644
index 0000000..67141f1
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_vlstr.txt
@@ -0,0 +1,16 @@
+
+file1 file2
+---------------------------------------
+ x x /
+ x x /Dataset1
+ x x /vl_string_type
+
+group : </> and </>
+0 differences found
+attribute: <test_scalar of </>> and <test_scalar of </>>
+0 differences found
+dataset: </Dataset1> and </Dataset1>
+0 differences found
+datatype: </vl_string_type> and </vl_string_type>
+0 differences found
+EXIT CODE: 0
diff --git a/tools/test/h5diff/testh5diff.sh.in b/tools/test/h5diff/testh5diff.sh.in
index d769c23..df472d4 100644
--- a/tools/test/h5diff/testh5diff.sh.in
+++ b/tools/test/h5diff/testh5diff.sh.in
@@ -124,6 +124,7 @@ $SRC_H5DIFF_TESTFILES/tmpSingleSiteBethe.reference.h5
$SRC_H5DIFF_TESTFILES/tmpSingleSiteBethe.output.h5
$SRC_H5DIFF_TESTFILES/diff_strings1.h5
$SRC_H5DIFF_TESTFILES/diff_strings2.h5
+$SRC_TOOLS_TESTFILES/tvlstr.h5
"
LIST_HDF5_VDS_TEST_FILES="
@@ -341,6 +342,7 @@ $SRC_H5DIFF_TESTFILES/h5diff_tmp2.txt
$SRC_H5DIFF_TESTFILES/h5diff_v1.txt
$SRC_H5DIFF_TESTFILES/h5diff_v2.txt
$SRC_H5DIFF_TESTFILES/h5diff_v3.txt
+$SRC_H5DIFF_TESTFILES/h5diff_vlstr.txt
"
#
@@ -566,6 +568,8 @@ SKIP() {
##############################################################################
# prepare for test
COPY_TESTFILES_TO_TESTDIR
+# second copy of tvlstr.h5
+$CP -f $SRC_TOOLS_TESTFILES/tvlstr.h5 $TESTDIR/tvlstr2.h5
# ############################################################################
# # Common usage
@@ -1141,6 +1145,7 @@ TOOLTEST h5diff_487.txt -v --exclude-path "/group1/dset" h5diff_exclude3-1.h5 h5
# # diff various multiple vlen and fixed strings in a compound type dataset
# ##############################################################################
TOOLTEST h5diff_530.txt -v h5diff_comp_vl_strs.h5 h5diff_comp_vl_strs.h5 /group /group_copy
+TOOLTEST h5diff_vlstr.txt -v tvlstr.h5 tvlstr2.h5
# ##############################################################################
# # Test container types (array,vlen) with multiple nested compound types
diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c
index 44c4369..2128acb 100644
--- a/tools/test/h5dump/h5dumpgentest.c
+++ b/tools/test/h5dump/h5dumpgentest.c
@@ -20,7 +20,6 @@
* trying it on a new platform, ...), you need to verify the correctness
* of the expected output and update the corresponding *.ddl files.
*/
-#include <limits.h>
#include "hdf5.h"
#include "H5private.h"
diff --git a/tools/test/h5import/h5importtest.c b/tools/test/h5import/h5importtest.c
index 00ae2e7..489bc01 100644
--- a/tools/test/h5import/h5importtest.c
+++ b/tools/test/h5import/h5importtest.c
@@ -11,7 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdio.h>
#include "H5private.h"
#ifdef H5_HAVE_WIN32_API
diff --git a/tools/test/h5jam/h5jamgentest.c b/tools/test/h5jam/h5jamgentest.c
index d713bb9..9f3d000 100644
--- a/tools/test/h5jam/h5jamgentest.c
+++ b/tools/test/h5jam/h5jamgentest.c
@@ -20,8 +20,6 @@
* trying it on a new platform, ...), you need to verify the correctness
* of the expected output and update the corresponding *.ddl files.
*/
-#include <assert.h>
-#include <limits.h>
#include "hdf5.h"
#include "H5private.h"
diff --git a/tools/test/h5jam/tellub.c b/tools/test/h5jam/tellub.c
index fad14b7..8e4b3ac 100644
--- a/tools/test/h5jam/tellub.c
+++ b/tools/test/h5jam/tellub.c
@@ -11,12 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdio.h>
-
-#ifdef H5_HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-
#include "hdf5.h"
#include "H5private.h"
#include "h5tools.h"
diff --git a/tools/test/h5repack/testh5repack_detect_szip.c b/tools/test/h5repack/testh5repack_detect_szip.c
index e08d5ab..6e7a24e 100644
--- a/tools/test/h5repack/testh5repack_detect_szip.c
+++ b/tools/test/h5repack/testh5repack_detect_szip.c
@@ -11,7 +11,6 @@
* help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include <stdio.h>
#include "h5repack.h"
#include "h5tools.h"
#include "h5tools_utils.h"
diff --git a/tools/test/misc/repart_test.c b/tools/test/misc/repart_test.c
index 372f46a..4016ee8 100644
--- a/tools/test/misc/repart_test.c
+++ b/tools/test/misc/repart_test.c
@@ -12,19 +12,16 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Raymond Lu <slu@ncsa.uiuc.edu>
- * June 1, 2005
- *
* Purpose: This program tests family files after being repartitioned
* by h5repart. It simply tries to reopen the files with
* correct family driver and member size.
*/
#include "hdf5.h"
+#include "H5private.h"
#define KB 1024
#define FAMILY_H5REPART_SIZE1 20000
#define FAMILY_H5REPART_SIZE2 (5*KB)
-#define MAX(a,b) (a>b ? a:b)
const char *FILENAME[] = {
"fst_family%05d.h5",
@@ -42,52 +39,54 @@ herr_t test_sec2_h5repart_opens(void);
*
* Purpose: Tries to reopen family files.
*
- * Return: Success: exit(0)
- *
- * Failure: exit(1)
+ * Return: SUCCEED/FAIL
*
- * Programmer: Raymond Lu
- * June 1, 2005
- *
- * Modifications:
*-------------------------------------------------------------------------
*/
herr_t
test_family_h5repart_opens(void)
{
- hid_t file=(-1), fapl=(-1);
+ hid_t fid = -1;
+ hid_t fapl_id = -1;
/* open 1st file(single member file) with correct family size(20000 byte) */
- if ((fapl=H5Pcreate(H5P_FILE_ACCESS))<0)
+ if ((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
goto error;
- if(H5Pset_fapl_family(fapl, (hsize_t)FAMILY_H5REPART_SIZE1, H5P_DEFAULT)<0)
+ if (H5Pset_fapl_family(fapl_id, (hsize_t)FAMILY_H5REPART_SIZE1, H5P_DEFAULT) < 0)
goto error;
- if((file=H5Fopen(FILENAME[0], H5F_ACC_RDWR, fapl))<0)
+ if ((fid = H5Fopen(FILENAME[0], H5F_ACC_RDWR, fapl_id))<0)
goto error;
- if(H5Fclose(file)<0)
+ if (H5Fclose(fid) < 0)
goto error;
/* open 2nd file(multiple member files) with correct family size(5KB) */
- if(H5Pset_fapl_family(fapl, (hsize_t)FAMILY_H5REPART_SIZE2, H5P_DEFAULT)<0)
+ if (H5Pset_fapl_family(fapl_id, (hsize_t)FAMILY_H5REPART_SIZE2, H5P_DEFAULT) < 0)
+ goto error;
+
+ if ((fid = H5Fopen(FILENAME[1], H5F_ACC_RDWR, fapl_id)) < 0)
goto error;
- if((file=H5Fopen(FILENAME[1], H5F_ACC_RDWR, fapl))<0)
+ if (H5Pclose(fapl_id) < 0)
goto error;
- if(H5Fclose(file)<0)
+ if (H5Fclose(fid) < 0)
goto error;
- return 0;
+ return SUCCEED;
error:
H5E_BEGIN_TRY {
- H5Fclose(file);
+ H5Pclose(fapl_id);
+ H5Fclose(fid);
} H5E_END_TRY;
- return -1;
-}
+
+ return FAIL;
+
+} /* end test_family_h5repart_opens() */
+
/*-------------------------------------------------------------------------
@@ -95,36 +94,32 @@ error:
*
* Purpose: Tries to reopen a sec2 file.
*
- * Return: Success: exit(0)
+ * Return: SUCCEED/FAIL
*
- * Failure: exit(1)
- *
- * Programmer: Raymond Lu
- * June 21, 2005
- *
- * Modifications:
*-------------------------------------------------------------------------
*/
herr_t
test_sec2_h5repart_opens(void)
{
- hid_t file=(-1);
+ hid_t fid = -1;
/* open the sec2 file */
- if((file=H5Fopen(FILENAME[2], H5F_ACC_RDWR, H5P_DEFAULT))<0)
+ if ((fid = H5Fopen(FILENAME[2], H5F_ACC_RDWR, H5P_DEFAULT)) < 0)
goto error;
- if(H5Fclose(file)<0)
+ if (H5Fclose(fid) < 0)
goto error;
- return 0;
+ return SUCCEED;
error:
H5E_BEGIN_TRY {
- H5Fclose(file);
+ H5Fclose(fid);
} H5E_END_TRY;
- return -1;
-}
+
+ return FAIL;
+
+} /* end test_sec2_h5repart_opens() */
/*-------------------------------------------------------------------------
@@ -132,32 +127,26 @@ error:
*
* Purpose: Tests h5repart-ed family files
*
- * Return: Success: exit(0)
- *
- * Failure: exit(1)
- *
- * Programmer: Raymond Lu
- * June 1, 2005
- *
- * Modifications:
+ * Return: EXIT_SUCCESS/EXIT_FAILURE
*
*-------------------------------------------------------------------------
*/
int
main(void)
{
- int nerrors=0;
+ int nerrors = 0;
- nerrors += test_family_h5repart_opens()<0 ?1:0;
- nerrors += test_sec2_h5repart_opens()<0 ?1:0;
+ nerrors += test_family_h5repart_opens() < 0 ? 1 : 0;
+ nerrors += test_sec2_h5repart_opens() < 0 ? 1 : 0;
- if (nerrors) goto error;
+ if (nerrors)
+ goto error;
- return 0;
+ HDexit(EXIT_SUCCESS);
error:
nerrors = MAX(1, nerrors);
- printf("***** %d FAMILY FILE TEST%s FAILED! *****\n",
+ HDprintf("***** %d FAMILY FILE TEST%s FAILED! *****\n",
nerrors, 1 == nerrors ? "" : "S");
- return 1;
-}
+ HDexit(EXIT_FAILURE);
+} /* end main() */
diff --git a/tools/test/misc/talign.c b/tools/test/misc/talign.c
index 9a72557..ce866b4 100644
--- a/tools/test/misc/talign.c
+++ b/tools/test/misc/talign.c
@@ -15,9 +15,6 @@
* Small program to illustrate the "misalignment" of members within a compound
* datatype, in a datatype fixed by H5Tget_native_type().
*/
-#include <string.h>
-#include <stdlib.h>
-/*#include <unistd.h> *//* Required for unlink() */
#include "hdf5.h"
#include "H5private.h"
diff --git a/tools/test/perform/CMakeTests.cmake b/tools/test/perform/CMakeTests.cmake
index 2933563..39faa73 100644
--- a/tools/test/perform/CMakeTests.cmake
+++ b/tools/test/perform/CMakeTests.cmake
@@ -146,6 +146,7 @@ else ()
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
endif ()
+set_tests_properties (PERFORM_zip_perf PROPERTIES DEPENDS PERFORM_zip_perf_help)
if (H5_HAVE_PARALLEL)
add_test (NAME PERFORM_h5perf COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:h5perf>)