summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeLists.txt2
-rw-r--r--testpar/CMakeTests.cmake119
-rw-r--r--testpar/CMakeVFDTests.cmake73
-rw-r--r--testpar/Makefile.am12
-rw-r--r--testpar/Makefile.in34
-rw-r--r--testpar/t_bigio.c4
-rw-r--r--testpar/t_cache.c501
-rw-r--r--testpar/t_coll_md_read.c336
-rw-r--r--testpar/t_dset.c4
-rw-r--r--testpar/t_filters_parallel.c359
-rw-r--r--testpar/t_mdset.c173
-rw-r--r--testpar/t_pflush1.c247
-rw-r--r--testpar/t_pflush2.c265
-rw-r--r--testpar/t_pread.c422
-rw-r--r--testpar/testpflush.sh.in64
-rw-r--r--testpar/testphdf5.c5
-rw-r--r--testpar/testphdf5.h2
17 files changed, 1821 insertions, 801 deletions
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 0b3cbe3..71459c7 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -47,7 +47,7 @@ set (H5P_TESTS
t_mpi
t_bigio
t_cache
- t_cache_image
+ #t_cache_image
t_pflush1
t_pflush2
t_pread
diff --git a/testpar/CMakeTests.cmake b/testpar/CMakeTests.cmake
index 87470f3..12ccc58 100644
--- a/testpar/CMakeTests.cmake
+++ b/testpar/CMakeTests.cmake
@@ -15,59 +15,90 @@
### T E S T I N G ###
##############################################################################
##############################################################################
+# Remove any output file left over from previous test run
+add_test (NAME MPI_TEST-clear-testphdf5-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ ParaTest.h5
+ WORKING_DIRECTORY
+ ${HDF5_TEST_PAR_BINARY_DIR}
+)
+set_tests_properties (MPI_TEST-clear-testphdf5-objects PROPERTIES FIXTURES_SETUP par_clear_testphdf5)
-add_test (NAME TEST_PAR_testphdf5 COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:testphdf5> ${MPIEXEC_POSTFLAGS})
+set (SKIP_testphdf5 "")
+#if (${HDF5_OPENMPI_VERSION_SKIP})
+# set (SKIP_testphdf5 "${SKIP_testphdf5};-x;ecdsetw")
+#endif ()
-foreach (testp ${H5P_TESTS})
- add_test (NAME TEST_PAR_${testp} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:${testp}> ${MPIEXEC_POSTFLAGS})
-endforeach ()
+add_test (NAME MPI_TEST_testphdf5 COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:testphdf5> ${MPIEXEC_POSTFLAGS} ${SKIP_testphdf5})
+set_tests_properties (MPI_TEST_testphdf5 PROPERTIES
+ FIXTURES_REQUIRED par_clear_testphdf5
+ ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}"
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}
+)
+if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (MPI_TEST_testphdf5 PROPERTIES DEPENDS ${last_test})
+endif ()
+set (last_test "MPI_TEST_testphdf5")
-# The following will only be correct on windows shared
-#set_tests_properties (TEST_PAR_t_pflush1 PROPERTIES WILL_FAIL "true")
-set_property (TEST TEST_PAR_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED")
-set_tests_properties (TEST_PAR_t_pflush2 PROPERTIES DEPENDS TEST_PAR_t_pflush1)
+#if (${HDF5_OPENMPI_VERSION_SKIP})
+# list (REMOVE_ITEM H5P_TESTS t_shapesame)
+#endif ()
-if (HDF5_TEST_VFD)
+set (test_par_CLEANFILES
+ t_cache_image_00.h5
+ t_cache_image_01.h5
+ t_cache_image_02.h5
+ flush.h5
+ noflush.h5
+ reloc_t_pread_data_file.h5
+ reloc_t_pread_group_0_file.h5
+ reloc_t_pread_group_1_file.h5
+ shutdown.h5
+ after_mpi_fin.h5
+ #the following should have been removed by the programs
+ bigio_test.h5
+ CacheTestDummy.h5
+ t_filters_parallel.h5
+ MPItest.h5
+ ShapeSameTest.h5
+)
- set (VFD_LIST
- sec2
- stdio
- core
- split
- multi
- family
- )
+# Remove any output file left over from previous test run
+add_test (NAME MPI_TEST-clear-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ ${test_par_CLEANFILES}
+ WORKING_DIRECTORY
+ ${HDF5_TEST_PAR_BINARY_DIR}
+)
+set_tests_properties (MPI_TEST-clear-objects PROPERTIES FIXTURES_SETUP par_clear_objects)
- set (H5P_VFD_TESTS
- t_pflush1
- t_pflush2
+foreach (testp ${H5P_TESTS})
+ add_test (NAME MPI_TEST_${testp} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:${testp}> ${MPIEXEC_POSTFLAGS})
+ set_tests_properties (MPI_TEST_${testp} PROPERTIES
+ FIXTURES_REQUIRED par_clear_objects
+ ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}"
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}
)
-
- if (DIRECT_VFD)
- set (VFD_LIST ${VFD_LIST} direct)
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (MPI_TEST_${testp} PROPERTIES DEPENDS ${last_test})
endif ()
+ set (last_test "MPI_TEST_${testp}")
+endforeach ()
- macro (ADD_VFD_TEST vfdname resultcode)
- if (NOT HDF5_ENABLE_USING_MEMCHECKER)
- foreach (test ${H5P_VFD_TESTS})
- add_test (
- NAME TEST_PAR_VFD-${vfdname}-${test}
- COMMAND "${CMAKE_COMMAND}"
- -D "TEST_PROGRAM=$<TARGET_FILE:${test}>"
- -D "TEST_ARGS:STRING="
- -D "TEST_VFD:STRING=${vfdname}"
- -D "TEST_EXPECT=${resultcode}"
- -D "TEST_OUTPUT=${test}"
- -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
- -P "${HDF_RESOURCES_DIR}/vfdTest.cmake"
- )
- endforeach ()
- endif ()
- endmacro ()
+# The t_pflush1 test is hard-coded to fail.
+set_tests_properties (MPI_TEST_t_pflush1 PROPERTIES WILL_FAIL "true")
+#set_property (TEST MPI_TEST_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED")
+set_tests_properties (MPI_TEST_t_pflush2 PROPERTIES DEPENDS MPI_TEST_t_pflush1)
+set_tests_properties (MPI_TEST_t_prestart PROPERTIES DEPENDS MPI_TEST_t_pshutdown)
- # Run test with different Virtual File Driver
- foreach (vfd ${VFD_LIST})
- ADD_VFD_TEST (${vfd} 0)
- endforeach ()
+##############################################################################
+##############################################################################
+### V F D T E S T S ###
+##############################################################################
+##############################################################################
+if (HDF5_TEST_VFD)
+ include (CMakeVFDTests.cmake)
endif ()
diff --git a/testpar/CMakeVFDTests.cmake b/testpar/CMakeVFDTests.cmake
new file mode 100644
index 0000000..de16a31
--- /dev/null
+++ b/testpar/CMakeVFDTests.cmake
@@ -0,0 +1,73 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+##############################################################################
+##############################################################################
+### T E S T I N G ###
+##############################################################################
+##############################################################################
+ set (VFD_LIST
+ sec2
+ stdio
+ core
+ split
+ multi
+ family
+ )
+
+ set (H5P_VFD_TESTS
+ t_pflush1
+ t_pflush2
+ )
+
+ if (DIRECT_VFD)
+ set (VFD_LIST ${VFD_LIST} direct)
+ endif ()
+
+foreach (vfdtest ${VFD_LIST})
+ file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/${vfdtest}")
+endforeach ()
+
+ macro (ADD_VFD_TEST vfdname resultcode)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ foreach (test ${H5P_VFD_TESTS})
+ add_test (
+ NAME MPI_TEST_VFD-${vfdname}-${test}-clear-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ ${vfdname}-shared/${vfdname}-${test}.out
+ ${vfdname}-shared/${vfdname}-${test}.out.err
+ )
+ add_test (
+ NAME MPI_TEST_VFD-${vfdname}-${test}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:${test}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_VFD:STRING=${vfdname}"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_OUTPUT=${vfdname}-${test}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/${vfdname}"
+ -P "${HDF_RESOURCES_DIR}/vfdTest.cmake"
+ )
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-${test} PROPERTIES
+ DEPENDS MPI_TEST_VFD-${vfdname}-${test}-clear-objects
+ ENVIRONMENT "srcdir=${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}"
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}
+ )
+ endforeach ()
+ endif ()
+ endmacro ()
+
+ # Run test with different Virtual File Driver
+ foreach (vfd ${VFD_LIST})
+ ADD_VFD_TEST (${vfd} 0)
+ endforeach ()
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 7262ca6..a11099d 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -21,11 +21,19 @@ include $(top_srcdir)/config/commence.am
AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
+# Test scripts--
+# testpflush.sh:
+TEST_SCRIPT_PARA = testpflush.sh
+SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
+
+check_SCRIPTS = $(TEST_SCRIPT_PARA)
+
# Test programs. These are our main targets.
#
-TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel
+TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel
-check_PROGRAMS = $(TEST_PROG_PARA)
+# t_pflush1 and t_pflush2 are used by testpflush.sh
+check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2
testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \
t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \
diff --git a/testpar/Makefile.in b/testpar/Makefile.in
index 5688cd7..7afbfad 100644
--- a/testpar/Makefile.in
+++ b/testpar/Makefile.in
@@ -102,7 +102,7 @@ PRE_UNINSTALL = :
POST_UNINSTALL = :
build_triplet = @build@
host_triplet = @host@
-check_PROGRAMS = $(am__EXEEXT_1)
+check_PROGRAMS = $(am__EXEEXT_1) t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
TESTS =
subdir = testpar
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -130,13 +130,12 @@ DIST_COMMON = $(srcdir)/Makefile.am $(am__DIST_COMMON)
mkinstalldirs = $(install_sh) -d
CONFIG_HEADER = $(top_builddir)/src/H5config.h \
$(top_builddir)/fortran/src/H5config_f.inc
-CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_FILES = testpflush.sh
CONFIG_CLEAN_VPATH_FILES =
am__EXEEXT_1 = t_mpi$(EXEEXT) t_bigio$(EXEEXT) testphdf5$(EXEEXT) \
- t_cache$(EXEEXT) t_cache_image$(EXEEXT) t_pflush1$(EXEEXT) \
- t_pflush2$(EXEEXT) t_pread$(EXEEXT) t_pshutdown$(EXEEXT) \
- t_prestart$(EXEEXT) t_init_term$(EXEEXT) t_shapesame$(EXEEXT) \
- t_filters_parallel$(EXEEXT)
+ t_cache$(EXEEXT) t_cache_image$(EXEEXT) t_pread$(EXEEXT) \
+ t_pshutdown$(EXEEXT) t_prestart$(EXEEXT) t_init_term$(EXEEXT) \
+ t_shapesame$(EXEEXT) t_filters_parallel$(EXEEXT)
t_bigio_SOURCES = t_bigio.c
t_bigio_OBJECTS = t_bigio.$(OBJEXT)
t_bigio_LDADD = $(LDADD)
@@ -463,8 +462,9 @@ am__set_b = \
*) \
b='$*';; \
esac
-am__DIST_COMMON = $(srcdir)/Makefile.in $(top_srcdir)/bin/depcomp \
- $(top_srcdir)/bin/test-driver $(top_srcdir)/config/commence.am \
+am__DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/testpflush.sh.in \
+ $(top_srcdir)/bin/depcomp $(top_srcdir)/bin/test-driver \
+ $(top_srcdir)/config/commence.am \
$(top_srcdir)/config/conclude.am COPYING
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
ACLOCAL = @ACLOCAL@
@@ -608,7 +608,6 @@ MAINT = @MAINT@
MAKEINFO = @MAKEINFO@
MANIFEST_TOOL = @MANIFEST_TOOL@
MEMORYALLOCSANITYCHECK = @MEMORYALLOCSANITYCHECK@
-METADATATRACEFILE = @METADATATRACEFILE@
MKDIR_P = @MKDIR_P@
MPE = @MPE@
NM = @NM@
@@ -642,6 +641,7 @@ PAC_FORTRAN_NUM_INTEGER_KINDS = @PAC_FORTRAN_NUM_INTEGER_KINDS@
PARALLEL = @PARALLEL@
PARALLEL_FILTERED_WRITES = @PARALLEL_FILTERED_WRITES@
PATH_SEPARATOR = @PATH_SEPARATOR@
+PREADWRITE = @PREADWRITE@
PROFILING = @PROFILING@
RANLIB = @RANLIB@
ROOT = @ROOT@
@@ -798,9 +798,15 @@ CHECK_CLEANFILES = *.chkexe *.chklog *.clog *.clog2 MPItest.h5 \
Para*.h5 bigio_test.h5 CacheTestDummy.h5 ShapeSameTest.h5 \
shutdown.h5 after_mpi_fin.h5 go
+# Test scripts--
+# testpflush.sh:
+TEST_SCRIPT_PARA = testpflush.sh
+SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
+check_SCRIPTS = $(TEST_SCRIPT_PARA)
+
# Test programs. These are our main targets.
#
-TEST_PROG_PARA = t_mpi t_bigio testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel
+TEST_PROG_PARA = t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel
testphdf5_SOURCES = testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \
t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \
t_prop.c t_coll_md_read.c
@@ -863,6 +869,8 @@ $(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
$(am__aclocal_m4_deps):
+testpflush.sh: $(top_builddir)/config.status $(srcdir)/testpflush.sh.in
+ cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@
clean-checkPROGRAMS:
@list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \
@@ -1157,7 +1165,7 @@ $(TEST_SUITE_LOG): $(TEST_LOGS)
echo "$$col$$br$$std"; \
fi; \
$$success || exit 1
-recheck: all $(check_PROGRAMS)
+recheck: all $(check_PROGRAMS) $(check_SCRIPTS)
@test -z "$(TEST_SUITE_LOG)" || rm -f $(TEST_SUITE_LOG)
@set +e; $(am__set_TESTS_bases); \
bases=`for i in $$bases; do echo $$i; done \
@@ -1214,7 +1222,7 @@ distdir: $(DISTFILES)
fi; \
done
check-am: all-am
- $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS)
+ $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) $(check_SCRIPTS)
$(MAKE) $(AM_MAKEFLAGS) check-TESTS
check: check-am
all-am: Makefile all-local
@@ -1549,7 +1557,7 @@ build-check-p: $(LIB) $(PROGS) $(chk_TESTS)
echo "**** Hint ****"; \
echo "Parallel test files reside in the current directory" \
"by default."; \
- echo "Set HDF5_PARAPREFIX to use another directory. E.g.,"; \
+ echo "Set HDF5_PARAPREFIX to use another directory. e.g.,"; \
echo " HDF5_PARAPREFIX=/PFS/user/me"; \
echo " export HDF5_PARAPREFIX"; \
echo " make check"; \
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index fdd3488..1d882b8 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -671,7 +671,7 @@ dataset_big_write(void)
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, dims, NULL);
VRFY((mem_dataspace >= 0), "");
- if(!mpi_rank == 0) {
+ if(mpi_rank != 0) {
ret = H5Sselect_none(mem_dataspace);
VRFY((ret >= 0), "H5Sset_none succeeded");
}
@@ -980,7 +980,7 @@ dataset_big_read(void)
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, dims, NULL);
VRFY((mem_dataspace >= 0), "");
- if(!mpi_rank == 0) {
+ if(mpi_rank != 0) {
ret = H5Sselect_none(mem_dataspace);
VRFY((ret >= 0), "H5Sset_none succeeded");
}
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index c27fc23..29c6f4a 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -467,9 +467,7 @@ static void lock_and_unlock_random_entry(H5F_t * file_ptr,
static void lock_entry(H5F_t * file_ptr, int32_t idx);
static void mark_entry_dirty(int32_t idx);
static void pin_entry(H5F_t * file_ptr, int32_t idx, hbool_t global, hbool_t dirty);
-#ifdef H5_METADATA_TRACE_FILE
static void pin_protected_entry(int32_t idx, hbool_t global);
-#endif /* H5_METADATA_TRACE_FILE */
static void move_entry(H5F_t * file_ptr, int32_t old_idx, int32_t new_idx);
static hbool_t reset_server_counts(void);
static void resize_entry(int32_t idx, size_t new_size);
@@ -1217,20 +1215,20 @@ setup_derived_types(void)
struct mssg_t sample; /* used to compute displacements */
/* setup the displacements array */
- if ( ( MPI_SUCCESS != MPI_Address(&sample.req, &displs[0]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.src, &displs[1]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.dest, &displs[2]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.mssg_num, &displs[3]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.base_addr, &displs[4]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.len, &displs[5]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.ver, &displs[6]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.count, &displs[7]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.magic, &displs[8]) ) ) {
+ if ( ( MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]) ) ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Get_Address() call failed.\n",
world_mpi_rank, FUNC);
}
@@ -1245,7 +1243,7 @@ setup_derived_types(void)
if ( success ) {
- result = MPI_Type_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
+ result = MPI_Type_create_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
if ( result != MPI_SUCCESS ) {
@@ -3693,7 +3691,6 @@ pin_entry(H5F_t * file_ptr,
} /* pin_entry() */
-#ifdef H5_METADATA_TRACE_FILE
/*****************************************************************************
* Function: pin_protected_entry()
@@ -3762,7 +3759,6 @@ pin_protected_entry(int32_t idx,
return;
} /* pin_protected_entry() */
-#endif /* H5_METADATA_TRACE_FILE */
/*****************************************************************************
@@ -6809,8 +6805,6 @@ smoke_check_5(int metadata_write_strategy)
* - H5AC_expunge_entry()
* - H5AC_resize_entry()
*
- * This test is skipped if H5_METADATA_TRACE_FILE is undefined.
- *
* Return: Success: TRUE
*
* Failure: FALSE
@@ -6823,63 +6817,63 @@ trace_file_check(int metadata_write_strategy)
{
hbool_t success = TRUE;
-#ifdef H5_METADATA_TRACE_FILE
-
const char *((* expected_output)[]) = NULL;
const char * expected_output_0[] =
{
"### HDF5 metadata cache trace file version 1 ###\n",
- "H5AC_set_cache_auto_resize_config 1 0 1 0 \"t_cache_trace.txt\" 1 0 2097152 0.300000 33554432 1048576 50000 1 0.900000 2.000000 1 1.000000 0.250000 1 4194304 3 0.999000 0.900000 1 1048576 3 1 0.100000 262144 0 0\n",
- "H5AC_insert_entry 0x400 27 0x0 2 0\n",
- "H5AC_insert_entry 0x402 27 0x0 2 0\n",
- "H5AC_insert_entry 0x404 27 0x0 4 0\n",
- "H5AC_insert_entry 0x408 27 0x0 6 0\n",
- "H5AC_protect 0x400 27 0x0 2 1\n",
- "H5AC_mark_entry_dirty 0x400 0\n",
- "H5AC_unprotect 0x400 27 0x0 0\n",
- "H5AC_protect 0x402 27 0x0 2 1\n",
- "H5AC_pin_protected_entry 0x402 0\n",
- "H5AC_unprotect 0x402 27 0x0 0\n",
- "H5AC_unpin_entry 0x402 0\n",
- "H5AC_expunge_entry 0x402 27 0\n",
- "H5AC_protect 0x404 27 0x0 4 1\n",
- "H5AC_pin_protected_entry 0x404 0\n",
- "H5AC_unprotect 0x404 27 0x0 0\n",
- "H5AC_mark_entry_dirty 0x404 0\n",
- "H5AC_resize_entry 0x404 2 0\n",
- "H5AC_resize_entry 0x404 4 0\n",
- "H5AC_unpin_entry 0x404 0\n",
- "H5AC_move_entry 0x400 0x8e65 27 0\n",
- "H5AC_move_entry 0x8e65 0x400 27 0\n",
- "H5AC_flush 0\n",
+ "H5AC_set_cache_auto_resize_config",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_protect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_unprotect",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_unpin_entry",
+ "H5AC_expunge_entry",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_resize_entry",
+ "H5AC_resize_entry",
+ "H5AC_unpin_entry",
+ "H5AC_move_entry",
+ "H5AC_move_entry",
+ "H5AC_flush",
+ "H5AC_flush",
NULL
};
const char * expected_output_1[] =
{
"### HDF5 metadata cache trace file version 1 ###\n",
- "H5AC_set_cache_auto_resize_config 1 0 1 0 \"t_cache_trace.txt\" 1 0 2097152 0.300000 33554432 1048576 50000 1 0.900000 2.000000 1 1.000000 0.250000 1 4194304 3 0.999000 0.900000 1 1048576 3 1 0.100000 262144 1 0\n",
- "H5AC_insert_entry 0x400 27 0x0 2 0\n",
- "H5AC_insert_entry 0x402 27 0x0 2 0\n",
- "H5AC_insert_entry 0x404 27 0x0 4 0\n",
- "H5AC_insert_entry 0x408 27 0x0 6 0\n",
- "H5AC_protect 0x400 27 0x0 2 1\n",
- "H5AC_mark_entry_dirty 0x400 0\n",
- "H5AC_unprotect 0x400 27 0x0 0\n",
- "H5AC_protect 0x402 27 0x0 2 1\n",
- "H5AC_pin_protected_entry 0x402 0\n",
- "H5AC_unprotect 0x402 27 0x0 0\n",
- "H5AC_unpin_entry 0x402 0\n",
- "H5AC_expunge_entry 0x402 27 0\n",
- "H5AC_protect 0x404 27 0x0 4 1\n",
- "H5AC_pin_protected_entry 0x404 0\n",
- "H5AC_unprotect 0x404 27 0x0 0\n",
- "H5AC_mark_entry_dirty 0x404 0\n",
- "H5AC_resize_entry 0x404 2 0\n",
- "H5AC_resize_entry 0x404 4 0\n",
- "H5AC_unpin_entry 0x404 0\n",
- "H5AC_move_entry 0x400 0x8e65 27 0\n",
- "H5AC_move_entry 0x8e65 0x400 27 0\n",
- "H5AC_flush 0\n",
+ "H5AC_set_cache_auto_resize_config",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_protect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_unprotect",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_unpin_entry",
+ "H5AC_expunge_entry",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_resize_entry",
+ "H5AC_resize_entry",
+ "H5AC_unpin_entry",
+ "H5AC_move_entry",
+ "H5AC_move_entry",
+ "H5AC_flush",
+ "H5AC_flush",
NULL
};
char buffer[256];
@@ -6887,8 +6881,8 @@ trace_file_check(int metadata_write_strategy)
hbool_t done = FALSE;
int i;
int max_nerrors;
- int expected_line_len;
- int actual_line_len;
+ size_t expected_line_len;
+ size_t actual_line_len;
hid_t fid = -1;
H5F_t * file_ptr = NULL;
H5C_t * cache_ptr = NULL;
@@ -6896,188 +6890,151 @@ trace_file_check(int metadata_write_strategy)
H5AC_cache_config_t config;
struct mssg_t mssg;
-#endif /* H5_METADATA_TRACE_FILE */
- switch ( metadata_write_strategy ) {
+ switch(metadata_write_strategy) {
+
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
-#ifdef H5_METADATA_TRACE_FILE
expected_output = &expected_output_0;
-#endif /* H5_METADATA_TRACE_FILE */
- if ( world_mpi_rank == 0 ) {
- TESTING(
- "trace file collection -- process 0 only md write strategy");
- }
- break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
-#ifdef H5_METADATA_TRACE_FILE
+ if(world_mpi_rank == 0)
+ TESTING("trace file collection -- process 0 only md write strategy");
+ break;
+
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+
expected_output = &expected_output_1;
-#endif /* H5_METADATA_TRACE_FILE */
- if ( world_mpi_rank == 0 ) {
- TESTING(
- "trace file collection -- distributed md write strategy");
- }
- break;
+
+ if(world_mpi_rank == 0)
+ TESTING("trace file collection -- distributed md write strategy");
+ break;
default:
-#ifdef H5_METADATA_TRACE_FILE
+
/* this will almost certainly cause a failure, but it keeps us
* from de-referenceing a NULL pointer.
*/
expected_output = &expected_output_0;
-#endif /* H5_METADATA_TRACE_FILE */
- if ( world_mpi_rank == 0 ) {
- TESTING("trace file collection -- unknown md write strategy");
- }
- break;
- }
-#ifdef H5_METADATA_TRACE_FILE
+ if(world_mpi_rank == 0)
+ TESTING("trace file collection -- unknown md write strategy");
+ break;
+ } /* end switch */
+
nerrors = 0;
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if(world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if(!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
- }
+ if ( verbose )
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
- else /* run the clients */
- {
+ else {
+ /* run the clients */
- if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
- metadata_write_strategy) ) {
+ if(!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy) ) {
nerrors++;
fid = -1;
cache_ptr = NULL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
- if ( nerrors == 0 ) {
+ if(nerrors == 0) {
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
-
- nerrors++;
- HDfprintf(stdout,
- "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
-
- } else {
-
+ if(H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ nerrors++;
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
+ }
+ else {
config.open_trace_file = TRUE;
- strcpy(config.trace_file_name, "t_cache_trace.txt");
-
- if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ strcpy(config.trace_file_name, "t_cache_trace.txt");
- nerrors++;
- HDfprintf(stdout,
- "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
+ if(H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ nerrors++;
+ HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
}
}
- }
+ } /* end if */
- insert_entry(cache_ptr, file_ptr, 0, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 1, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 2, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 3, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 0, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 1, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 2, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 3, H5AC__NO_FLAGS_SET);
- lock_entry(file_ptr, 0);
- mark_entry_dirty(0);
- unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET);
+ lock_entry(file_ptr, 0);
+ mark_entry_dirty(0);
+ unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET);
- lock_entry(file_ptr, 1);
+ lock_entry(file_ptr, 1);
pin_protected_entry(1, TRUE);
- unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET);
+ unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET);
unpin_entry(file_ptr, 1, TRUE, FALSE, FALSE);
expunge_entry(file_ptr, 1);
- lock_entry(file_ptr, 2);
+ lock_entry(file_ptr, 2);
pin_protected_entry(2, TRUE);
- unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET);
- mark_entry_dirty(2);
+ unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET);
+ mark_entry_dirty(2);
resize_entry(2, data[2].len / 2);
resize_entry(2, data[2].len);
unpin_entry(file_ptr, 2, TRUE, FALSE, FALSE);
- move_entry(file_ptr, 0, 20);
- move_entry(file_ptr, 0, 20);
+ move_entry(file_ptr, 0, 20);
+ move_entry(file_ptr, 0, 20);
- if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
- if ( nerrors == 0 ) {
-
+ if(nerrors == 0) {
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
-
- nerrors++;
- HDfprintf(stdout,
- "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
-
- } else {
-
+ if(H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ nerrors++;
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
+ }
+ else {
config.open_trace_file = FALSE;
config.close_trace_file = TRUE;
- config.trace_file_name[0] = '\0';
-
- if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ config.trace_file_name[0] = '\0';
- nerrors++;
- HDfprintf(stdout,
- "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
+ if(H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ nerrors++;
+ HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
}
}
- }
+ } /* end if */
- if ( fid >= 0 ) {
-
- if ( ! take_down_cache(fid, cache_ptr) ) {
+ if(fid >= 0) {
+ if(!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
- }
+ } /* end if */
/* verify that all instance of datum are back where the started
* and are clean.
*/
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- HDassert( data_index[i] == i );
- HDassert( ! (data[i].dirty) );
+ for(i = 0; i < NUM_DATA_ENTRIES; i++) {
+ HDassert(data_index[i] == i);
+ HDassert(!(data[i].dirty));
}
/* compose the done message */
@@ -7091,117 +7048,121 @@ trace_file_check(int metadata_write_strategy)
mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( success ) {
-
+ if(success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
-
+ if(!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
- }
-
- if ( nerrors == 0 ) {
+ } /* end if */
- sprintf(trace_file_name, "t_cache_trace.txt.%d",
- (int)file_mpi_rank);
+ if(nerrors == 0) {
+ HDsprintf(trace_file_name, "t_cache_trace.txt.%d", (int)file_mpi_rank);
- if ( (trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) {
+ if((trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: HDfopen failed.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: HDfopen failed.\n", world_mpi_rank, FUNC);
}
- }
-
- i = 0;
- while ( ( nerrors == 0 ) && ( ! done ) )
- {
- if ( (*expected_output)[i] == NULL ) {
-
- expected_line_len = 0;
-
- } else {
-
- expected_line_len = HDstrlen((*expected_output)[i]);
- }
-
- if ( HDfgets(buffer, 255, trace_file_ptr) != NULL ) {
-
- actual_line_len = HDstrlen(buffer);
+ } /* end if */
- } else {
- actual_line_len = 0;
- }
+ i = 0;
+ while((nerrors == 0) && (!done)) {
+ /* Get lines of actual and expected data */
+ if((*expected_output)[i] == NULL)
+ expected_line_len = (size_t)0;
+ else
+ expected_line_len = HDstrlen((*expected_output)[i]);
+
+ if(HDfgets(buffer, 255, trace_file_ptr) != NULL)
+ actual_line_len = HDstrlen(buffer);
+ else
+ actual_line_len = (size_t)0;
+
+ /* Compare the lines */
+ /* Handle running out of data */
+ if((actual_line_len == 0) || (expected_line_len == 0)) {
+ if((actual_line_len == 0) && (expected_line_len == 0)) {
+ /* Both ran out at the same time - we're done */
+ done = TRUE;
+ }
+ else {
+ /* One ran out before the other - BADNESS */
+ nerrors++;
+ if(verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
+ if(expected_line_len == 0) {
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC, "<EMPTY>", expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC, buffer, actual_line_len);
+ }
+ if(actual_line_len == 0) {
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC, (*expected_output)[i], expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC, "<EMPTY>", actual_line_len);
+ }
+ }
+ HDfprintf(stdout, "BADNESS BADNESS BADNESS\n");
+ }
+ }
+ /* We directly compare the header line (line 0) */
+ else if(0 == i) {
+ if((actual_line_len != expected_line_len) || (HDstrcmp(buffer, (*expected_output)[i]) != 0 )) {
- if ( ( actual_line_len == 0 ) && ( expected_line_len == 0 ) ) {
+ nerrors++;
+ if(verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC, (*expected_output)[i], expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC, buffer, actual_line_len);
+ }
+ }
+ }
+ /* All other lines we tokenize and just compare the function name. This
+ * keeps the test from being too fragile.
+ */
+ else {
+ char *tok = NULL; /* token for actual line */
- done = TRUE;
+ tok = HDstrtok(buffer, " ");
- } else if ( ( actual_line_len != expected_line_len ) ||
- ( HDstrcmp(buffer, (*expected_output)[i]) != 0 ) ) {
+ if(HDstrcmp(tok, (*expected_output)[i]) != 0 ) {
- nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: Unexpected data in trace file line %d.\n",
- world_mpi_rank, FUNC, i);
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n",
- world_mpi_rank, FUNC, (*expected_output)[i],
- expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n",
- world_mpi_rank, FUNC, buffer,
- actual_line_len);
+ nerrors++;
+ if(verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\"\n", world_mpi_rank, FUNC, (*expected_output)[i]);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\"\n", world_mpi_rank, FUNC, tok);
+ }
}
- } else {
- i++;
- }
- }
+ } /* end else */
- if ( trace_file_ptr != NULL ) {
+ i++;
+ } /* end while */
- HDfclose(trace_file_ptr);
- trace_file_ptr = NULL;
-#if 1
- HDremove(trace_file_name);
-#endif
+ /* Clean up the trace file */
+ if(trace_file_ptr != NULL) {
+ HDfclose(trace_file_ptr);
+ trace_file_ptr = NULL;
+ HDremove(trace_file_name);
}
- }
+ } /* end giant else that runs clients */
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
-
- if ( max_nerrors == 0 ) {
-
- PASSED();
-
- } else {
+ if(world_mpi_rank == 0) {
+ if(max_nerrors == 0) {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
-
-#else /* H5_METADATA_TRACE_FILE */
-
- if ( world_mpi_rank == 0 ) {
-
- SKIPPED();
-
- HDfprintf(stdout, " trace file support disabled.\n");
- }
-
-#endif /* H5_METADATA_TRACE_FILE */
+ success = ((success) && (max_nerrors == 0));
return(success);
@@ -7264,7 +7225,7 @@ smoke_check_6(int metadata_write_strategy)
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7280,7 +7241,7 @@ smoke_check_6(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7289,7 +7250,7 @@ smoke_check_6(int metadata_write_strategy)
virt_num_data_entries = NUM_DATA_ENTRIES;
/* insert the first half collectively */
- file_ptr->coll_md_read = H5P_USER_TRUE;
+ H5CX_set_coll_metadata_read(TRUE);
for ( i = 0; i < virt_num_data_entries/2; i++ )
{
struct datum * entry_ptr;
@@ -7310,7 +7271,7 @@ smoke_check_6(int metadata_write_strategy)
}
/* insert the other half independently */
- file_ptr->coll_md_read = H5P_USER_FALSE;
+ H5CX_set_coll_metadata_read(FALSE);
for ( i = virt_num_data_entries/2; i < virt_num_data_entries; i++ )
{
struct datum * entry_ptr;
@@ -7330,7 +7291,7 @@ smoke_check_6(int metadata_write_strategy)
HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
- /* flush the file */
+ /* flush the file */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
@@ -7340,7 +7301,7 @@ smoke_check_6(int metadata_write_strategy)
}
/* Protect the first half of the entries collectively */
- file_ptr->coll_md_read = H5P_USER_TRUE;
+ H5CX_set_coll_metadata_read(TRUE);
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
struct datum * entry_ptr;
@@ -7361,13 +7322,13 @@ smoke_check_6(int metadata_write_strategy)
}
/* protect the other half independently */
- file_ptr->coll_md_read = H5P_USER_FALSE;
+ H5CX_set_coll_metadata_read(FALSE);
for ( i = virt_num_data_entries/2; i < virt_num_data_entries; i++ )
{
struct datum * entry_ptr;
entry_ptr = &(data[i]);
- lock_entry(file_ptr, i);
+ lock_entry(file_ptr, i);
if(FALSE != entry_ptr->header.coll_access) {
nerrors++;
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
index f945d2b..912388c 100644
--- a/testpar/t_coll_md_read.c
+++ b/testpar/t_coll_md_read.c
@@ -32,6 +32,14 @@
#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5
#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5
+#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2
+
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_NO_SEL_PROCESS (mpi_rank == mpi_size - 1)
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE 20000
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE 1
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
+
/*
* A test for issue HDFFV-10501. A parallel hang was reported which occurred
* in linked-chunk I/O when collective metadata reads are enabled and some ranks
@@ -57,13 +65,13 @@ void test_partial_no_selection_coll_md_read(void)
hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS];
hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS];
hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS];
- hid_t file_id = -1;
- hid_t fapl_id = -1;
- hid_t dset_id = -1;
- hid_t dcpl_id = -1;
- hid_t dxpl_id = -1;
- hid_t fspace_id = -1;
- hid_t mspace_id = -1;
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
int mpi_rank, mpi_size;
void *data = NULL;
void *read_buf = NULL;
@@ -86,7 +94,7 @@ void test_partial_no_selection_coll_md_read(void)
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
VRFY((file_id >= 0), "H5Fcreate succeeded");
- dataset_dims = malloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
+ dataset_dims = HDmalloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
VRFY((dataset_dims != NULL), "malloc succeeded");
dataset_dims[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE * mpi_size;
@@ -129,7 +137,7 @@ void test_partial_no_selection_coll_md_read(void)
mspace_id = H5Screate_simple(1, sel_dims, NULL);
VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
- data = calloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
+ data = HDcalloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
VRFY((data != NULL), "calloc succeeded");
dxpl_id = H5Pcreate(H5P_DATASET_XFER);
@@ -151,7 +159,7 @@ void test_partial_no_selection_coll_md_read(void)
*/
VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
- read_buf = malloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
+ read_buf = HDmalloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
VRFY((read_buf != NULL), "malloc succeeded");
/*
@@ -171,21 +179,321 @@ void test_partial_no_selection_coll_md_read(void)
* Check data integrity just to be sure.
*/
if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
- VRFY((!memcmp(data, read_buf, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int))), "memcmp succeeded");
+ VRFY((!HDmemcmp(data, read_buf, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int))), "memcmp succeeded");
+ }
+
+ if (dataset_dims) {
+ HDfree(dataset_dims);
+ dataset_dims = NULL;
}
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for HDFFV-10562 which attempts to verify that using multi-chunk
+ * I/O with collective metadata reads enabled doesn't causes issues due to
+ * collective metadata reads being made only by process 0 in H5D__chunk_addrmap().
+ *
+ * Failure in this test may either cause a hang, or, due to how the MPI calls
+ * pertaining to this issue might mistakenly match up, may cause an MPI error
+ * message similar to:
+ *
+ * #008: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): MPI_BCast failed
+ * major: Internal error (too specific to document in detail)
+ * minor: Some MPI function failed
+ * #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack:
+ *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006) failed
+ *MPIR_Bcast_impl(1452).............:
+ *MPIR_Bcast(1476)..................:
+ *MPIR_Bcast_intra(1249)............:
+ *MPIR_SMP_Bcast(1088)..............:
+ *MPIR_Bcast_binomial(239)..........:
+ *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer size is 18
+ * major: Internal error (too specific to document in detail)
+ * minor: MPI Error String
+ *
+ */
+void test_multi_chunk_io_addrmap_issue(void)
+{
+ const char *filename;
+ hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5};
+ hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5};
+ hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+ int mpi_rank;
+ int data[5][5] = { {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4} };
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ filename = GetTestParameters();
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ space_id = H5Screate_simple(MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, dims, max_dims);
+ VRFY((space_id >= 0), "H5Screate_simple succeeded");
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 5;
+ block[0] = block[1] = 1;
+
+ if (mpi_rank == 0)
+ start[0] = 0;
+ else
+ start[0] = 5;
+
+ VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+ if (mpi_rank != 0)
+ VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, space_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ read_buf = HDmalloc(50 * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(space_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for HDFFV-10562 which attempts to verify that using linked-chunk
+ * I/O with collective metadata reads enabled doesn't cause issues due to
+ * collective metadata reads being made only by process 0 in H5D__sort_chunk().
+ *
+ * NOTE: Due to the way that the threshold value which pertains to this test
+ * is currently calculated within HDF5, there are several conditions that this
+ * test must maintain. Refer to the function H5D__sort_chunk in H5Dmpio.c for
+ * a better idea of why.
+ *
+ * Condition 1: We need to make sure that the test always selects every single
+ * chunk in the dataset. It is fine if the selection is split up among multiple
+ * ranks, but their combined selection must cover the whole dataset.
+ *
+ * Condition 2: The number of chunks in the dataset divided by the number of MPI
+ * ranks must exceed or equal 10000. In other words, each MPI rank must be
+ * responsible for 10000 or more unique chunks.
+ *
+ * Condition 3: This test will currently only be reliably reproducable for 2 or 3
+ * MPI ranks. The threshold value calculated reduces to a constant 100 / mpi_size,
+ * and is compared against a default value of 30%.
+ *
+ * Failure in this test may either cause a hang, or, due to how the MPI calls
+ * pertaining to this issue might mistakenly match up, may cause an MPI error
+ * message similar to:
+ *
+ * #008: H5Dmpio.c line 2338 in H5D__sort_chunk(): MPI_BCast failed
+ * major: Internal error (too specific to document in detail)
+ * minor: Some MPI function failed
+ * #009: H5Dmpio.c line 2338 in H5D__sort_chunk(): Other MPI error, error stack:
+ *PMPI_Bcast(1600)........: MPI_Bcast(buf=0x7eae610, count=320000, MPI_BYTE, root=0, comm=0x84000006) failed
+ *MPIR_Bcast_impl(1452)...:
+ *MPIR_Bcast(1476)........:
+ *MPIR_Bcast_intra(1249)..:
+ *MPIR_SMP_Bcast(1088)....:
+ *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received 2096 but expected 320000
+ * major: Internal error (too specific to document in detail)
+ * minor: MPI Error String
+ */
+void test_link_chunk_io_sort_chunk_issue(void)
+{
+ const char *filename;
+ hsize_t *dataset_dims = NULL;
+ hsize_t max_dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t sel_dims[1];
+ hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS] = { LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS };
+ hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = GetTestParameters();
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ dataset_dims = HDmalloc(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS * sizeof(*dataset_dims));
+ VRFY((dataset_dims != NULL), "malloc succeeded");
+
+ dataset_dims[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE * mpi_size * LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE;
+ max_dataset_dims[0] = H5S_UNLIMITED;
+
+ fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, max_dataset_dims);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Set up chunking on the dataset in order to reproduce the problem.
+ */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Setup hyperslab selection to split the dataset among the ranks.
+ *
+ * The ranks will write rows across the dataset.
+ */
+ stride[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
+ count[0] = (dataset_dims[0] / LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) / mpi_size;
+ start[0] = count[0] * mpi_rank;
+ block[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ data = HDcalloc(1, count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ VRFY((data != NULL), "calloc succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ /*
+ * Enable collective access for the data transfer.
+ */
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ /*
+ * Ensure that linked-chunk I/O is performed since this is
+ * the particular code path where the issue lies and we don't
+ * want the library doing multi-chunk I/O behind our backs.
+ */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ read_buf = HDmalloc(count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
+
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ read_buf = HDrealloc(read_buf, count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ VRFY((read_buf != NULL), "realloc succeeded");
+
+ /*
+ * Finally have each rank read their section of data back from the dataset.
+ */
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+
if (dataset_dims) {
- free(dataset_dims);
+ HDfree(dataset_dims);
dataset_dims = NULL;
}
if (data) {
- free(data);
+ HDfree(data);
data = NULL;
}
if (read_buf) {
- free(read_buf);
+ HDfree(read_buf);
read_buf = NULL;
}
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index 281d027..d4e556d 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -2649,7 +2649,7 @@ compress_readAll(void)
/* Try reading the data */
ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY((ret >= 0), "H5Dread succeeded");
/* Verify data read */
for(u=0; u<dim; u++)
@@ -2659,8 +2659,10 @@ compress_readAll(void)
nerrors++;
}
+#if MPI_VERSION >= 3
ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
VRFY((ret >= 0), "H5Dwrite succeeded");
+#endif
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0), "H5Pclose succeeded");
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
index f436c8f..1f26e0d 100644
--- a/testpar/t_filters_parallel.c
+++ b/testpar/t_filters_parallel.c
@@ -32,9 +32,28 @@ char filenames[1][256];
int nerrors = 0;
size_t cur_filter_idx = 0;
+#define GZIP_INDEX 0
+#define FLETCHER32_INDEX 1
#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+/*
+ * Used to check if a filter is available before running a test.
+ */
+#define CHECK_CUR_FILTER_AVAIL() \
+{ \
+ htri_t filter_is_avail; \
+ \
+ if (cur_filter_idx == GZIP_INDEX) { \
+ if ((filter_is_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE)) != TRUE) { \
+ if (MAINPROCESS) { \
+ HDputs(" - SKIPPED - Deflate filter not available"); \
+ } \
+ return; \
+ } \
+ } \
+}
+
static herr_t set_dcpl_filter(hid_t dcpl);
#if MPI_VERSION >= 3
@@ -144,9 +163,9 @@ static herr_t
set_dcpl_filter(hid_t dcpl)
{
switch (cur_filter_idx) {
- case 0:
+ case GZIP_INDEX:
return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
- case 1:
+ case FLETCHER32_INDEX:
return H5Pset_fletcher32(dcpl);
default:
return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
@@ -178,7 +197,9 @@ test_write_one_chunk_filtered_dataset(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to one-chunk filtered dataset");
+ if (MAINPROCESS) HDputs("Testing write to one-chunk filtered dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -238,9 +259,9 @@ test_write_one_chunk_filtered_dataset(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -292,7 +313,7 @@ test_write_one_chunk_filtered_dataset(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -333,7 +354,9 @@ test_write_filtered_dataset_no_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -394,9 +417,9 @@ test_write_filtered_dataset_no_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -450,7 +473,7 @@ test_write_filtered_dataset_no_overlap(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -492,7 +515,9 @@ test_write_filtered_dataset_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to shared filtered chunks");
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -553,9 +578,9 @@ test_write_filtered_dataset_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -609,7 +634,7 @@ test_write_filtered_dataset_overlap(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -654,7 +679,9 @@ test_write_filtered_dataset_single_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to filtered chunks with a single process having no selection");
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with a single process having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -718,9 +745,9 @@ test_write_filtered_dataset_single_no_selection(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -782,7 +809,7 @@ test_write_filtered_dataset_single_no_selection(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -823,7 +850,9 @@ test_write_filtered_dataset_all_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to filtered chunks with all processes having no selection");
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with all processes having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -912,7 +941,7 @@ test_write_filtered_dataset_all_no_selection(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -949,7 +978,9 @@ test_write_filtered_dataset_point_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to filtered chunks with point selection");
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with point selection");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1058,7 +1089,7 @@ test_write_filtered_dataset_point_selection(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (coords) HDfree(coords);
@@ -1102,7 +1133,9 @@ test_write_filtered_dataset_interleaved_write(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing interleaved write to filtered chunks");
+ if (MAINPROCESS) HDputs("Testing interleaved write to filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1163,9 +1196,9 @@ test_write_filtered_dataset_interleaved_write(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1225,7 +1258,7 @@ test_write_filtered_dataset_interleaved_write(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1265,7 +1298,9 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks on separate pages in 3D dataset");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1333,9 +1368,9 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
start[2] = (hsize_t) mpi_rank;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1385,7 +1420,7 @@ test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1426,7 +1461,9 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
hid_t file_id, dset_id, plist_id;
hid_t filespace, memspace;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks on the same pages in 3D dataset");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1494,9 +1531,9 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
start[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1549,7 +1586,7 @@ test_write_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1590,7 +1627,9 @@ test_write_3d_filtered_dataset_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to shared filtered chunks in 3D dataset");
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1658,9 +1697,9 @@ test_write_3d_filtered_dataset_overlap(void)
start[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1722,7 +1761,7 @@ test_write_3d_filtered_dataset_overlap(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1762,7 +1801,9 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -1834,9 +1875,9 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -1902,7 +1943,7 @@ test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -1943,7 +1984,9 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
hid_t file_id, dset_id, plist_id, memtype;
hid_t filespace, memspace;
- if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -2015,9 +2058,9 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -2086,7 +2129,7 @@ test_write_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -2132,7 +2175,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -2215,9 +2260,9 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -2269,7 +2314,7 @@ test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -2316,7 +2361,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
hid_t file_id, dset_id, plist_id, filetype, memtype;
hid_t filespace, memspace;
- if (MAINPROCESS) puts("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -2399,9 +2446,9 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -2453,7 +2500,7 @@ test_write_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -2503,6 +2550,10 @@ test_read_one_chunk_filtered_dataset(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from one-chunk filtered dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
@@ -2517,8 +2568,6 @@ test_read_one_chunk_filtered_dataset(void)
+ ((C_DATATYPE) i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
if (MAINPROCESS) {
- puts("Testing read from one-chunk filtered dataset");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -2606,9 +2655,9 @@ test_read_one_chunk_filtered_dataset(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -2648,7 +2697,7 @@ test_read_one_chunk_filtered_dataset(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -2698,6 +2747,10 @@ test_read_filtered_dataset_no_overlap(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
@@ -2714,8 +2767,6 @@ test_read_filtered_dataset_no_overlap(void)
);
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -2803,9 +2854,9 @@ test_read_filtered_dataset_no_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -2845,7 +2896,7 @@ test_read_filtered_dataset_no_overlap(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -2896,6 +2947,10 @@ test_read_filtered_dataset_overlap(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS;
@@ -2913,8 +2968,6 @@ test_read_filtered_dataset_overlap(void)
);
if (MAINPROCESS) {
- puts("Testing read from shared filtered chunks");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3002,9 +3055,9 @@ test_read_filtered_dataset_overlap(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -3059,7 +3112,7 @@ test_read_filtered_dataset_overlap(void)
}
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -3111,6 +3164,10 @@ test_read_filtered_dataset_single_no_selection(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with a single process having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3133,8 +3190,6 @@ test_read_filtered_dataset_single_no_selection(void)
0, segment_length * sizeof(*correct_buf));
if (MAINPROCESS) {
- puts("Testing read from filtered chunks with a single process having no selection");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3225,9 +3280,9 @@ test_read_filtered_dataset_single_no_selection(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
@@ -3275,7 +3330,7 @@ test_read_filtered_dataset_single_no_selection(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -3319,6 +3374,10 @@ test_read_filtered_dataset_all_no_selection(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with all processes having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3329,8 +3388,6 @@ test_read_filtered_dataset_all_no_selection(void)
VRFY((NULL != correct_buf), "HDcalloc succeeded");
if (MAINPROCESS) {
- puts("Testing read from filtered chunks with all processes having no selection");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3460,6 +3517,10 @@ test_read_filtered_dataset_point_selection(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with point selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
dataset_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
@@ -3477,8 +3538,6 @@ test_read_filtered_dataset_point_selection(void)
);
if (MAINPROCESS) {
- puts("Testing read from filtered chunks with point selection");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3615,7 +3674,7 @@ test_read_filtered_dataset_point_selection(void)
}
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -3669,6 +3728,10 @@ test_read_filtered_dataset_interleaved_read(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing interleaved read from filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NROWS;
dataset_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
@@ -3692,8 +3755,6 @@ test_read_filtered_dataset_interleaved_read(void)
);
if (MAINPROCESS) {
- puts("Testing interleaved read from filtered chunks");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3781,9 +3842,9 @@ test_read_filtered_dataset_interleaved_read(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -3838,7 +3899,7 @@ test_read_filtered_dataset_interleaved_read(void)
}
}
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -3889,6 +3950,10 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
@@ -3903,8 +3968,6 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
correct_buf[i] = (C_DATATYPE) ((i % (hsize_t) mpi_size) + (i / (hsize_t) mpi_size));
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks on separate pages in 3D dataset");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -3998,9 +4061,9 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
start[2] = (hsize_t) mpi_rank;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4043,7 +4106,7 @@ test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
"MPI_Allgather succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
@@ -4096,6 +4159,10 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
@@ -4113,8 +4180,6 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
);
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks on the same pages in 3D dataset");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -4208,9 +4273,9 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
start[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4250,7 +4315,7 @@ test_read_3d_filtered_dataset_no_overlap_same_pages(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -4302,6 +4367,10 @@ test_read_3d_filtered_dataset_overlap(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
dataset_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
@@ -4328,8 +4397,6 @@ test_read_3d_filtered_dataset_overlap(void)
);
if (MAINPROCESS) {
- puts("Testing read from shared filtered chunks in 3D dataset");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -4423,9 +4490,9 @@ test_read_3d_filtered_dataset_overlap(void)
start[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4473,7 +4540,7 @@ test_read_3d_filtered_dataset_overlap(void)
VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
@@ -4525,6 +4592,10 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
@@ -4563,8 +4634,6 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
"Datatype insertion succeeded");
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -4652,9 +4721,9 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4694,7 +4763,7 @@ test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -4746,6 +4815,10 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
@@ -4787,8 +4860,6 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
"Datatype insertion succeeded");
if (MAINPROCESS) {
- puts("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype conversion");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -4876,9 +4947,9 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -4918,7 +4989,7 @@ test_read_cmpd_filtered_dataset_no_conversion_shared(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -4970,6 +5041,10 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
@@ -5019,8 +5094,6 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
"Datatype insertion succeeded");
if (MAINPROCESS) {
- puts("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -5108,9 +5181,9 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -5150,7 +5223,7 @@ test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -5203,6 +5276,10 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
int *recvcounts = NULL;
int *displs = NULL;
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
@@ -5255,8 +5332,6 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
"Datatype insertion succeeded");
if (MAINPROCESS) {
- puts("Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
-
plist_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((plist_id >= 0), "FAPL creation succeeded");
@@ -5344,9 +5419,9 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
@@ -5386,7 +5461,7 @@ test_read_cmpd_filtered_dataset_type_conversion_shared(void)
VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
"MPI_Allgatherv succeeded");
- VRFY((0 == memcmp(global_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (displs) HDfree(displs);
@@ -5427,7 +5502,9 @@ test_write_serial_read_parallel(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1;
- if (MAINPROCESS) puts("Testing write file serially; read file in parallel");
+ if (MAINPROCESS) HDputs("Testing write file serially; read file in parallel");
+
+ CHECK_CUR_FILTER_AVAIL();
dataset_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NROWS;
dataset_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NCOLS;
@@ -5527,7 +5604,7 @@ test_write_serial_read_parallel(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
if (correct_buf) HDfree(correct_buf);
@@ -5568,7 +5645,9 @@ test_write_parallel_read_serial(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing write file in parallel; read serially");
+ if (MAINPROCESS) HDputs("Testing write file in parallel; read serially");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -5636,9 +5715,9 @@ test_write_parallel_read_serial(void)
offset[2] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -5707,7 +5786,7 @@ test_write_parallel_read_serial(void)
VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0),
"Dataset read succeeded");
- VRFY((0 == memcmp(read_buf, correct_buf, correct_buf_size)),
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
"Data verification succeeded");
VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
@@ -5741,7 +5820,9 @@ test_shrinking_growing_chunks(void)
hid_t file_id = -1, dset_id = -1, plist_id = -1;
hid_t filespace = -1, memspace = -1;
- if (MAINPROCESS) puts("Testing continually shrinking/growing chunks");
+ if (MAINPROCESS) HDputs("Testing continually shrinking/growing chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
/* Set up file access property list with parallel I/O access */
plist_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -5803,9 +5884,9 @@ test_shrinking_growing_chunks(void)
start[1] = 0;
if (VERBOSE_MED) {
- printf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
- fflush(stdout);
+ HDfflush(stdout);
}
/* Select hyperslab in the file */
@@ -5868,8 +5949,8 @@ main(int argc, char** argv)
if (mpi_size <= 0) {
if (MAINPROCESS) {
- printf("The Parallel Filters tests require at least 1 rank.\n");
- printf("Quitting...\n");
+ HDprintf("The Parallel Filters tests require at least 1 rank.\n");
+ HDprintf("Quitting...\n");
}
MPI_Abort(MPI_COMM_WORLD, 1);
@@ -5877,16 +5958,16 @@ main(int argc, char** argv)
if (H5dont_atexit() < 0) {
if (MAINPROCESS) {
- printf("Failed to turn off atexit processing. Continue.\n");
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
}
}
H5open();
if (MAINPROCESS) {
- printf("==========================\n");
- printf("Parallel Filters tests\n");
- printf("==========================\n\n");
+ HDprintf("==========================\n");
+ HDprintf("Parallel Filters tests\n");
+ HDprintf("==========================\n\n");
}
if (VERBOSE_MED) h5_show_hostname();
@@ -5942,9 +6023,9 @@ main(int argc, char** argv)
VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
if (MAINPROCESS) {
- printf("\n=================================================================\n");
- printf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n");
- printf("=================================================================\n\n");
+ HDprintf("\n=================================================================\n");
+ HDprintf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n");
+ HDprintf("=================================================================\n\n");
}
for (i = 0; i < ARRAY_SIZE(tests); i++) {
@@ -5959,12 +6040,12 @@ main(int argc, char** argv)
if (nerrors) goto exit;
- if (MAINPROCESS) puts("All Parallel Filters tests passed\n");
+ if (MAINPROCESS) HDputs("All Parallel Filters tests passed\n");
exit:
if (nerrors)
if (MAINPROCESS)
- printf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors,
+ HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors,
nerrors > 1 ? "S" : "");
ALARM_OFF;
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 5d989bb..16eb13c 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -12,6 +12,7 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "testphdf5.h"
+#include "H5Dprivate.h"
#define DIM 2
#define SIZE 32
@@ -311,13 +312,27 @@ void compact_dataset(void)
VRFY((ret>= 0),"set independent IO collectively succeeded");
}
-
dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dopen2 succeeded");
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0), "H5Pinsert2() succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme);
VRFY((ret >= 0), "H5Dread succeeded");
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "H5Pget succeeded");
+ VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),"rank 0 Bcast optimization was performed for a compact dataset");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
/* Verify data value */
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
@@ -603,8 +618,8 @@ void dataset_fillvalue(void)
hsize_t req_count[4] = {1, 6, 7, 8};
hsize_t dset_size; /* Dataset size */
int *rdata, *wdata; /* Buffers for data to read and write */
- int *twdata, *trdata; /* Temporary pointer into buffer */
- int acc, i, j, k, l; /* Local index variables */
+ int *twdata, *trdata; /* Temporary pointer into buffer */
+ int acc, i, j, k, l, ii; /* Local index variables */
herr_t ret; /* Generic return value */
const char *filename;
@@ -645,27 +660,60 @@ void dataset_fillvalue(void)
/*
* Read dataset before any data is written.
*/
- /* set entire read buffer with the constant 2 */
- HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
- /* Independently read the entire dataset back */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
- VRFY((ret >= 0), "H5Dread succeeded");
- /* Verify all data read are the fill value 0 */
- trdata = rdata;
- err_num = 0;
- for(i = 0; i < (int)dset_dims[0]; i++)
+ /* Create DXPL for I/O */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "H5Pcreate succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0),"testing property list inserted succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for(ii = 0; ii < 2; ii++) {
+
+ if(ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
+
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if(ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify all data read are the fill value 0 */
+ trdata = rdata;
+ err_num = 0;
+ for(i = 0; i < (int)dset_dims[0]; i++)
for(j = 0; j < (int)dset_dims[1]; j++)
- for(k = 0; k < (int)dset_dims[2]; k++)
- for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
- if(*trdata != 0)
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata);
- if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ for(k = 0; k < (int)dset_dims[2]; k++)
+ for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ if(*trdata != 0)
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata);
+ if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
printf("[more errors ...]\n");
- if(err_num){
+ if(err_num) {
printf("%d errors found in check_value\n", err_num);
- nerrors++;
+ nerrors++;
+ }
}
/* Barrier to ensure all processes have completed the above test. */
@@ -681,10 +729,6 @@ void dataset_fillvalue(void)
ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
- /* Create DXPL for collective I/O */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "H5Pcreate succeeded");
-
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
@@ -711,37 +755,64 @@ void dataset_fillvalue(void)
/*
* Read dataset after partial write.
*/
- /* set entire read buffer with the constant 2 */
- HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
- /* Independently read the entire dataset back */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
- VRFY((ret >= 0), "H5Dread succeeded");
- /* Verify correct data read */
- twdata=wdata;
- trdata=rdata;
- err_num=0;
- for(i=0; i<(int)dset_dims[0]; i++)
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), " H5Pset succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for(ii = 0; ii < 2; ii++) {
+
+ if(ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
+
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if(ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify correct data read */
+ twdata=wdata;
+ trdata=rdata;
+ err_num=0;
+ for(i=0; i<(int)dset_dims[0]; i++)
for(j=0; j<(int)dset_dims[1]; j++)
- for(k=0; k<(int)dset_dims[2]; k++)
- for(l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++)
- if(i<mpi_size) {
- if(*twdata != *trdata )
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
- } /* end if */
- else {
- if(*trdata != 0)
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata);
- } /* end else */
- if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ for(k=0; k<(int)dset_dims[2]; k++)
+ for(l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++)
+ if(i<mpi_size) {
+ if(*twdata != *trdata )
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
+ } /* end if */
+ else {
+ if(*trdata != 0)
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata);
+ } /* end else */
+ if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
printf("[more errors ...]\n");
- if(err_num){
+ if(err_num){
printf("%d errors found in check_value\n", err_num);
- nerrors++;
+ nerrors++;
+ }
}
-
+
/* Close all file objects */
ret = H5Dclose(dataset);
VRFY((ret >= 0), "H5Dclose succeeded");
@@ -856,7 +927,7 @@ void collective_group_write(void)
if(!((m+1) % 10)) {
printf("created %d groups\n", m+1);
MPI_Barrier(MPI_COMM_WORLD);
- }
+ }
#endif /* BARRIER_CHECKS */
}
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index 4677bfe..27b561b 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -15,11 +15,11 @@
* Programmer: Leon Arber <larber@uiuc.edu>
* Sept. 28, 2006.
*
- * Purpose: This is the first half of a two-part test that makes sure
- * that a file can be read after a parallel application crashes as long
- * as the file was flushed first. We simulate a crash by
- * calling _exit(0) since this doesn't flush HDF5 caches but
- * still exits with success.
+ * Purpose: This is the first half of a two-part test that makes sure
+ * that a file can be read after a parallel application crashes
+ * as long as the file was flushed first. We simulate a crash by
+ * calling _exit() since this doesn't flush HDF5 caches but
+ * still exits with success.
*/
#include "h5test.h"
@@ -29,171 +29,190 @@ const char *FILENAME[] = {
NULL
};
-static double the_data[100][100];
+static int data_g[100][100];
+#define N_GROUPS 100
+
+
/*-------------------------------------------------------------------------
- * Function: create_file
- *
- * Purpose: Creates file used in part 1 of the test
+ * Function: create_test_file
*
- * Return: Success: 0
+ * Purpose: Creates the file used in part 1 of the test
*
- * Failure: 1
+ * Return: Success: A valid file ID
+ * Failure: H5I_INVALID_HID
*
- * Programmer: Leon Arber
+ * Programmer: Leon Arber
* Sept. 26, 2006
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static hid_t
-create_file(char* name, hid_t fapl)
+create_test_file(char *name, hid_t fapl_id)
{
- hid_t file, dcpl, space, dset, groups, grp, plist;
- hsize_t ds_size[2] = {100, 100};
- hsize_t ch_size[2] = {5, 5};
- hsize_t i, j;
-
-
-
- if((file=H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) goto error;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t did = H5I_INVALID_HID;
+ hid_t top_level_gid = H5I_INVALID_HID;
+ hid_t gid = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {100, 100};
+ hsize_t chunk_dims[2] = {5, 5};
+ hsize_t i, j;
+
+ if((fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
+ goto error;
/* Create a chunked dataset */
- if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
- if(H5Pset_chunk(dcpl, 2, ch_size) < 0) goto error;
- if((space = H5Screate_simple(2, ds_size, NULL)) < 0) goto error;
- if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_FLOAT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
- goto error;
-
- plist = H5Pcreate(H5P_DATASET_XFER);
- H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
-
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk(dcpl_id, 2, chunk_dims) < 0)
+ goto error;
+ if((sid = H5Screate_simple(2, dims, NULL)) < 0)
+ goto error;
+ if((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto error;
+ if(H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
+ goto error;
/* Write some data */
- for(i = 0; i < ds_size[0]; i++) {
- /*
- * The extra cast in the following statement is a bug workaround
- * for the Win32 version 5.0 compiler.
- * 1998-11-06 ptl
- */
- for(j = 0; j < ds_size[1]; j++)
- the_data[i][j] = (double)(hssize_t)i/(hssize_t)(j+1);
- }
- if(H5Dwrite(dset, H5T_NATIVE_DOUBLE, space, space, plist, the_data) < 0) goto error;
+ for(i = 0; i < dims[0]; i++)
+ for(j = 0; j < dims[1]; j++)
+ data_g[i][j] = (int)(i + (i * j) + j);
+
+ if(H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
+ goto error;
/* Create some groups */
- if((groups = H5Gcreate2(file, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
- for(i = 0; i < 100; i++) {
- sprintf(name, "grp%02u", (unsigned)i);
- if((grp = H5Gcreate2(groups, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
- if(H5Gclose(grp) < 0) goto error;
+ if((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+ for(i = 0; i < N_GROUPS; i++) {
+ HDsprintf(name, "grp%02u", (unsigned)i);
+ if((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+ if(H5Gclose(gid) < 0)
+ goto error;
}
- return file;
+ return fid;
error:
- HD_exit(1);
-}
+ return H5I_INVALID_HID;
+} /* end create_test_file() */
+
/*-------------------------------------------------------------------------
* Function: main
*
- * Purpose: Part 1 of a two-part H5Fflush() test.
- *
- * Return: Success: 0
+ * Purpose: Part 1 of a two-part parallel H5Fflush() test.
*
- * Failure: 1
+ * Return: EXIT_FAILURE (always)
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Friday, October 23, 1998
*
- * Modifications:
- * Leon Arber
- * Sept. 26, 2006, expand test to check for failure if H5Fflush is not called.
- *
- *
*-------------------------------------------------------------------------
*/
int
main(int argc, char* argv[])
{
- hid_t file1, file2, fapl;
- MPI_File *mpifh_p = NULL;
- char name[1024];
- const char *envval = NULL;
- int mpi_size, mpi_rank;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
+ hid_t fid1 = H5I_INVALID_HID;
+ hid_t fid2 = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ MPI_File *mpifh_p = NULL;
+ char name[1024];
+ const char *envval = NULL;
+ int mpi_size;
+ int mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl, comm, info);
-
if(mpi_rank == 0)
- TESTING("H5Fflush (part1)");
+ TESTING("H5Fflush (part1)");
+
+ /* Don't run using the split VFD */
envval = HDgetenv("HDF5_DRIVER");
if(envval == NULL)
envval = "nomatch";
- if(HDstrcmp(envval, "split")) {
+
+ if(!HDstrcmp(envval, "split")) {
+ if(mpi_rank == 0) {
+ SKIPPED();
+ HDputs(" Test not compatible with current Virtual File Driver");
+ }
+ MPI_Finalize();
+ HDexit(EXIT_FAILURE);
+ }
+
+ if((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_fapl_mpio(fapl_id, comm, info) < 0)
+ goto error;
+
/* Create the file */
- h5_fixname(FILENAME[0], fapl, name, sizeof name);
- file1 = create_file(name, fapl);
+ h5_fixname(FILENAME[0], fapl_id, name, sizeof(name));
+ if((fid1 = create_test_file(name, fapl_id)) < 0)
+ goto error;
/* Flush and exit without closing the library */
- if(H5Fflush(file1, H5F_SCOPE_GLOBAL) < 0) goto error;
+ if(H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0)
+ goto error;
/* Create the other file which will not be flushed */
- h5_fixname(FILENAME[1], fapl, name, sizeof name);
- file2 = create_file(name, fapl);
-
+ h5_fixname(FILENAME[1], fapl_id, name, sizeof(name));
+ if((fid2 = create_test_file(name, fapl_id)) < 0)
+ goto error;
if(mpi_rank == 0)
PASSED();
- fflush(stdout);
- fflush(stderr);
- } /* end if */
- else {
- SKIPPED();
- puts(" Test not compatible with current Virtual File Driver");
- } /* end else */
-
- /*
- * Some systems like AIX do not like files not closed when MPI_Finalize
+
+ HDfflush(stdout);
+ HDfflush(stderr);
+
+ /* Some systems like AIX do not like files not being closed when MPI_Finalize
* is called. So, we need to get the MPI file handles, close them by hand.
* Then the _exit is still needed to stop at_exit from happening in some systems.
* Note that MPIO VFD returns the address of the file-handle in the VFD struct
* because MPI_File_close wants to modify the file-handle variable.
*/
- /* close file1 */
- if(H5Fget_vfd_handle(file1, fapl, (void **)&mpifh_p) < 0) {
- printf("H5Fget_vfd_handle for file1 failed\n");
- goto error;
- } /* end if */
- if(MPI_File_close(mpifh_p) != MPI_SUCCESS) {
- printf("MPI_File_close for file1 failed\n");
- goto error;
- } /* end if */
- /* close file2 */
- if(H5Fget_vfd_handle(file2, fapl, (void **)&mpifh_p) < 0) {
- printf("H5Fget_vfd_handle for file2 failed\n");
- goto error;
- } /* end if */
- if(MPI_File_close(mpifh_p) != MPI_SUCCESS) {
- printf("MPI_File_close for file2 failed\n");
- goto error;
- } /* end if */
-
- fflush(stdout);
- fflush(stderr);
- HD_exit(0);
+ /* Close file 1 */
+ if(H5Fget_vfd_handle(fid1, fapl_id, (void **)&mpifh_p) < 0)
+ goto error;
+ if(MPI_File_close(mpifh_p) != MPI_SUCCESS)
+ goto error;
+
+ /* Close file 2 */
+ if(H5Fget_vfd_handle(fid2, fapl_id, (void **)&mpifh_p) < 0)
+ goto error;
+ if(MPI_File_close(mpifh_p) != MPI_SUCCESS)
+ goto error;
+
+ HDfflush(stdout);
+ HDfflush(stderr);
+
+ /* Always exit with a failure code!
+ *
+ * In accordance with the standard, not having all processes
+ * call MPI_Finalize() can be considered an error, so mpiexec
+ * et al. may indicate failure on return. It's much easier to
+ * always ignore the failure condition than to handle some
+ * platforms returning success and others failure.
+ */
+ HD_exit(EXIT_FAILURE);
error:
- fflush(stdout);
- fflush(stderr);
- HD_exit(1);
-}
+ HDfflush(stdout);
+ HDfflush(stderr);
+ HDprintf("*** ERROR ***\n");
+ HDprintf("THERE WAS A REAL ERROR IN t_pflush1.\n");
+ HD_exit(EXIT_FAILURE);
+} /* end main() */
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index 2051f4e..f58e5a5 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -30,116 +30,124 @@ const char *FILENAME[] = {
NULL
};
-static double the_data[100][100];
+static int data_g[100][100];
+#define N_GROUPS 100
/*-------------------------------------------------------------------------
- * Function: check_file
+ * Function: check_test_file
*
- * Purpose: Part 2 of a two-part H5Fflush() test.
+ * Purpose: Part 2 of a two-part H5Fflush() test.
*
- * Return: Success: 0
+ * Return: SUCCEED/FAIL
*
- * Failure: 1
- *
- * Programmer: Leon Arber
+ * Programmer: Leon Arber
* Sept. 26, 2006.
*
*-------------------------------------------------------------------------
*/
-static int
-check_file(char* name, hid_t fapl)
+static herr_t
+check_test_file(char* name, hid_t fapl_id)
{
- hid_t file, space, dset, groups, grp, plist;
- hsize_t ds_size[2];
- double error;
- hsize_t i, j;
-
- plist = H5Pcreate(H5P_DATASET_XFER);
- H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
- if((file = H5Fopen(name, H5F_ACC_RDONLY, fapl)) < 0) goto error;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t did = H5I_INVALID_HID;
+ hid_t top_level_gid = H5I_INVALID_HID;
+ hid_t gid = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hsize_t dims[2];
+ int val;
+ hsize_t i, j;
+
+ if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto error;
+ if(H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
+ goto error;
+ if((fid = H5Fopen(name, H5F_ACC_RDONLY, fapl_id)) < 0)
+ goto error;
/* Open the dataset */
- if((dset = H5Dopen2(file, "dset", H5P_DEFAULT)) < 0) goto error;
- if((space = H5Dget_space(dset)) < 0) goto error;
- if(H5Sget_simple_extent_dims(space, ds_size, NULL) < 0) goto error;
- assert(100==ds_size[0] && 100==ds_size[1]);
+ if((did = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0)
+ goto error;
+ if((sid = H5Dget_space(did)) < 0)
+ goto error;
+ if(H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
+ goto error;
+ HDassert(100 == dims[0] && 100 == dims[1]);
/* Read some data */
- if (H5Dread(dset, H5T_NATIVE_DOUBLE, space, space, plist,
- the_data) < 0) goto error;
- for (i=0; i<ds_size[0]; i++) {
- for (j=0; j<ds_size[1]; j++) {
- /*
- * The extra cast in the following statement is a bug workaround
- * for the Win32 version 5.0 compiler.
- * 1998-11-06 ptl
- */
- error = fabs(the_data[i][j]-(double)(hssize_t)i/((hssize_t)j+1));
- if (error>0.0001) {
- H5_FAILED();
- printf(" dset[%lu][%lu] = %g\n",
- (unsigned long)i, (unsigned long)j, the_data[i][j]);
- printf(" should be %g\n",
- (double)(hssize_t)i/(hssize_t)(j+1));
- goto error;
- }
- }
+ if(H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
+ goto error;
+ for(i = 0; i < dims[0]; i++) {
+ for(j = 0; j < dims[1]; j++) {
+ val = (int)(i + (i * j) + j);
+ if(data_g[i][j] != val) {
+ H5_FAILED();
+ HDprintf(" data_g[%lu][%lu] = %d\n", (unsigned long)i, (unsigned long)j, data_g[i][j]);
+ HDprintf(" should be %d\n", val);
+ }
+ }
}
/* Open some groups */
- if((groups = H5Gopen2(file, "some_groups", H5P_DEFAULT)) < 0) goto error;
- for(i = 0; i < 100; i++) {
- sprintf(name, "grp%02u", (unsigned)i);
- if((grp = H5Gopen2(groups, name, H5P_DEFAULT)) < 0) goto error;
- if(H5Gclose(grp) < 0) goto error;
+ if((top_level_gid = H5Gopen2(fid, "some_groups", H5P_DEFAULT)) < 0)
+ goto error;
+ for(i = 0; i < N_GROUPS; i++) {
+ HDsprintf(name, "grp%02u", (unsigned)i);
+ if((gid = H5Gopen2(top_level_gid, name, H5P_DEFAULT)) < 0)
+ goto error;
+ if(H5Gclose(gid) < 0)
+ goto error;
}
- if(H5Gclose(groups) < 0) goto error;
- if(H5Dclose(dset) < 0) goto error;
- if(H5Fclose(file) < 0) goto error;
- if(H5Pclose(plist) < 0) goto error;
- if(H5Sclose(space) < 0) goto error;
+ if(H5Gclose(top_level_gid) < 0)
+ goto error;
+ if(H5Dclose(did) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+ if(H5Pclose(dxpl_id) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
- return 0;
+ return SUCCEED;
error:
H5E_BEGIN_TRY {
- H5Pclose(plist);
- H5Gclose(groups);
- H5Dclose(dset);
- H5Fclose(file);
- H5Sclose(space);
+ H5Pclose(dxpl_id);
+ H5Gclose(top_level_gid);
+ H5Dclose(did);
+ H5Fclose(fid);
+ H5Sclose(sid);
+ H5Gclose(gid);
} H5E_END_TRY;
- return 1;
-}
+ return FAIL;
+} /* end check_test_file() */
/*-------------------------------------------------------------------------
- * Function: main
+ * Function: main
*
- * Purpose: Part 2 of a two-part H5Fflush() test.
+ * Purpose: Part 2 of a two-part H5Fflush() test.
*
- * Return: Success: 0
+ * Return: EXIT_SUCCESS/EXIT_FAIL
*
- * Failure: 1
- *
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Friday, October 23, 1998
*
- * Modifications:
- * Leon Arber
- * Sept. 26, 2006, expand to check for case where the was file not flushed.
- *
*-------------------------------------------------------------------------
*/
int
-main(int argc, char* argv[])
+main(int argc, char *argv[])
{
+ hid_t fapl_id1 = H5I_INVALID_HID;
+ hid_t fapl_id2 = H5I_INVALID_HID;
H5E_auto2_t func;
- char name[1024];
+ char name[1024];
const char *envval = NULL;
- int mpi_size, mpi_rank;
+ int mpi_size;
+ int mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
@@ -148,69 +156,70 @@ main(int argc, char* argv[])
MPI_Comm_rank(comm, &mpi_rank);
if(mpi_rank == 0)
- TESTING("H5Fflush (part2 with flush)");
+ TESTING("H5Fflush (part2 with flush)");
- /* Don't run this test using the core or split file drivers */
+ /* Don't run using the split VFD */
envval = HDgetenv("HDF5_DRIVER");
- if (envval == NULL)
+ if(envval == NULL)
envval = "nomatch";
- if (HDstrcmp(envval, "core") && HDstrcmp(envval, "split")) {
- hid_t fapl1, fapl2;
-
- fapl1 = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl1, comm, info);
-
- fapl2 = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl2, comm, info);
-
- /* Check the case where the file was flushed */
- h5_fixname(FILENAME[0], fapl1, name, sizeof name);
- if(check_file(name, fapl1))
- {
- H5_FAILED()
- goto error;
- }
- else if(mpi_rank == 0)
- {
- PASSED()
- }
-
- /* Check the case where the file was not flushed. This should give an error
- * so we turn off the error stack temporarily */
- if(mpi_rank == 0)
- TESTING("H5Fflush (part2 without flush)");
- H5Eget_auto2(H5E_DEFAULT,&func,NULL);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
-
- h5_fixname(FILENAME[1], fapl2, name, sizeof name);
- if(check_file(name, fapl2))
- {
- if(mpi_rank == 0)
- {
- PASSED()
- }
- }
- else
- {
- H5_FAILED()
- goto error;
- }
- H5Eset_auto2(H5E_DEFAULT, func, NULL);
-
-
- h5_clean_files(&FILENAME[0], fapl1);
- h5_clean_files(&FILENAME[1], fapl2);
+
+ if(!HDstrcmp(envval, "split")) {
+ if(mpi_rank == 0) {
+ SKIPPED();
+ HDputs(" Test not compatible with current Virtual File Driver");
+ }
+ MPI_Finalize();
+ HDexit(EXIT_FAILURE);
+ }
+
+ if((fapl_id1 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_fapl_mpio(fapl_id1, comm, info) < 0)
+ goto error;
+
+ if((fapl_id2 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_fapl_mpio(fapl_id2, comm, info) < 0)
+ goto error;
+
+ /* Check the case where the file was flushed */
+ h5_fixname(FILENAME[0], fapl_id1, name, sizeof(name));
+ if(check_test_file(name, fapl_id1)) {
+ H5_FAILED()
+ goto error;
}
- else
- {
- SKIPPED();
- puts(" Test not compatible with current Virtual File Driver");
+ else if(mpi_rank == 0) {
+ PASSED()
}
+ /* Check the case where the file was not flushed. This should give an error
+ * so we turn off the error stack temporarily.
+ */
+ if(mpi_rank == 0)
+ TESTING("H5Fflush (part2 without flush)");
+ H5Eget_auto2(H5E_DEFAULT,&func, NULL);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name));
+ if(check_test_file(name, fapl_id2)) {
+ if(mpi_rank == 0)
+ PASSED()
+ }
+ else {
+ H5_FAILED()
+ goto error;
+ }
+
+ H5Eset_auto2(H5E_DEFAULT, func, NULL);
+
+ h5_clean_files(&FILENAME[0], fapl_id1);
+ h5_clean_files(&FILENAME[1], fapl_id2);
+
MPI_Finalize();
- return 0;
- error:
- return 1;
-}
+ HDexit(EXIT_SUCCESS);
+
+error:
+ HDexit(EXIT_FAILURE);
+} /* end main() */
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
index 19ccf56..ac33b88 100644
--- a/testpar/t_pread.c
+++ b/testpar/t_pread.c
@@ -17,6 +17,7 @@
*/
#include "testpar.h"
+#include "H5Dprivate.h"
/* The collection of files is included below to aid
* an external "cleanup" process if required.
@@ -34,6 +35,8 @@ const char *FILENAMES[NFILENAME + 1]={"reloc_t_pread_data_file",
#define COUNT 1000
+#define LIMIT_NPROC 6
+
hbool_t pass = true;
static const char *random_hdf5_text =
"Now is the time for all first-time-users of HDF5 to read their \
@@ -46,7 +49,7 @@ completely foolproof is to underestimate the ingenuity of complete\n\
fools.\n";
static int generate_test_file(MPI_Comm comm, int mpi_rank, int group);
-static int test_parallel_read(MPI_Comm comm, int mpi_rank, int group);
+static int test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group);
static char *test_argv0 = NULL;
@@ -108,6 +111,9 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
hid_t fapl_id = -1;
hid_t dxpl_id = -1;
hid_t dset_id = -1;
+ hid_t dset_id_ch = -1;
+ hid_t dcpl_id = H5P_DEFAULT;
+ hsize_t chunk[1];
float nextValue;
float *data_slice = NULL;
@@ -272,6 +278,55 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
}
}
+
+ /* create a chunked dataset */
+ chunk[0] = COUNT/8;
+
+ if ( pass ) {
+ if ( (dcpl_id = H5Pcreate (H5P_DATASET_CREATE)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pcreate() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_chunk (dcpl_id, 1, chunk) ) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pset_chunk() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ if ( (dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT,
+ filespace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dcreate2() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Dwrite(dset_id_ch, H5T_NATIVE_FLOAT, memspace,
+ filespace, dxpl_id, data_slice)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dwrite() failed.\n";
+ }
+ }
+ if ( pass || (dcpl_id != -1)) {
+ if ( H5Pclose(dcpl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(dcpl_id) failed.\n";
+ }
+ }
+
+ if ( pass || (dset_id_ch != -1)) {
+ if ( H5Dclose(dset_id_ch) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
+ }
+ }
+
/* close file, etc. */
if ( pass || (dset_id != -1)) {
if ( H5Dclose(dset_id) < 0 ) {
@@ -413,7 +468,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
* Function: test_parallel_read
*
* Purpose: This actually tests the superblock optimization
- * and covers the two primary cases we're interested in.
+ * and covers the three primary cases we're interested in.
* 1). That HDF5 files can be opened in parallel by
* the rank 0 process and that the superblock
* offset is correctly broadcast to the other
@@ -423,6 +478,10 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
* subgroups of MPI_COMM_WORLD and that each
* subgroup operates as described in (1) to
* collectively read the data.
+ * 3). Testing proc0-read-and-MPI_Bcast using
+ * sub-communicators, and reading into
+ * a memory space that is different from the
+ * file space, and chunked datasets.
*
* The global MPI rank is used for reading and
* writing data for process specific data in the
@@ -444,7 +503,7 @@ generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
*-------------------------------------------------------------------------
*/
static int
-test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
+test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
{
const char *failure_mssg;
const char *fcn_name = "test_parallel_read()";
@@ -457,8 +516,13 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
hid_t fapl_id = -1;
hid_t file_id = -1;
hid_t dset_id = -1;
+ hid_t dset_id_ch = -1;
+ hid_t dxpl_id = H5P_DEFAULT;
hid_t memspace = -1;
hid_t filespace = -1;
+ hid_t filetype = -1;
+ size_t filetype_size;
+ hssize_t dset_size;
hsize_t i;
hsize_t offset;
hsize_t count = COUNT;
@@ -552,6 +616,14 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
}
}
+ /* open the chunked data set */
+ if ( pass ) {
+ if ( (dset_id_ch = H5Dopen2(file_id, "dataset0_chunked", H5P_DEFAULT)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed\n";
+ }
+ }
+
/* setup memspace */
if ( pass ) {
dims[0] = count;
@@ -606,14 +678,6 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
}
}
- /* close file, etc. */
- if ( pass || (dset_id != -1) ) {
- if ( H5Dclose(dset_id) < 0 ) {
- pass = false;
- failure_mssg = "H5Dclose(dset_id) failed.\n";
- }
- }
-
if ( pass || (memspace != -1) ) {
if ( H5Sclose(memspace) < 0 ) {
pass = false;
@@ -628,6 +692,330 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
}
}
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ /*
+ * Test reading proc0-read-and-bcast with sub-communicators
+ */
+
+ /* Don't test with more than LIMIT_NPROC processes to avoid memory issues */
+
+ if( group_size <= LIMIT_NPROC ) {
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ if ( (filespace = H5Dget_space(dset_id )) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dget_space failed.\n";
+ }
+
+ if ( (dset_size = H5Sget_simple_extent_npoints(filespace)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Sget_simple_extent_npoints failed.\n";
+ }
+
+ if ( (filetype = H5Dget_type(dset_id)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dget_type failed.\n";
+ }
+
+ if ( (filetype_size = H5Tget_size(filetype)) == 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Tget_size failed.\n";
+ }
+
+ if ( H5Tclose(filetype) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Tclose failed.\n";
+ };
+
+ if ( (data_slice = (float *)HDmalloc((size_t)dset_size*filetype_size)) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+
+ if ( pass ) {
+ if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if(H5Pinsert2(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pinsert2() failed\n";
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* read H5S_ALL section */
+ if ( pass ) {
+ if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, H5S_ALL,
+ H5S_ALL, dxpl_id, data_slice)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = FALSE;
+ if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if(prop_value != TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
+ }
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* verify the data */
+ if ( pass ) {
+
+ if ( comm == MPI_COMM_WORLD ) /* test 1 */
+ nextValue = 0;
+ else if ( group_id == 0 ) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
+
+ i = 0;
+ while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ( ( data_slice[i] > nextValue ) ||
+ ( data_slice[i] < nextValue ) ) {
+ pass = FALSE;
+ failure_mssg = "Unexpected dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ /* read H5S_ALL section for the chunked dataset */
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if(H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pset() failed\n";
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for ( i = 0; i < (hsize_t)dset_size; i++) {
+ data_slice[i] = 0;
+ }
+ if ( pass ) {
+ if ( (H5Dread(dset_id_ch, H5T_NATIVE_FLOAT, H5S_ALL,
+ H5S_ALL, dxpl_id, data_slice)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = FALSE;
+ if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if(prop_value == TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly performed for chunked dataset\n";
+ }
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* verify the data */
+ if ( pass ) {
+
+ if ( comm == MPI_COMM_WORLD ) /* test 1 */
+ nextValue = 0;
+ else if ( group_id == 0 ) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
+
+ i = 0;
+ while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ( ( data_slice[i] > nextValue ) ||
+ ( data_slice[i] < nextValue ) ) {
+ pass = FALSE;
+ failure_mssg = "Unexpected chunked dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ if ( pass || (filespace != -1) ) {
+ if ( H5Sclose(filespace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(filespace) failed.\n";
+ }
+ }
+
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ /*
+ * Read an H5S_ALL filespace into a hyperslab defined memory space
+ */
+
+ if ( (data_slice = (float *)HDmalloc((size_t)(dset_size*2)*filetype_size)) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+
+ /* setup memspace */
+ if ( pass ) {
+ dims[0] = (hsize_t)dset_size*2;
+ if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
+ }
+ }
+ if ( pass ) {
+ offset = (hsize_t)dset_size;
+ if ( (H5Sselect_hyperslab(memspace, H5S_SELECT_SET,
+ &offset, NULL, &offset, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if(H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pset() failed\n";
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* read this processes section of the data */
+ if ( pass ) {
+ if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
+ H5S_ALL, dxpl_id, data_slice)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = FALSE;
+ if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if(prop_value != TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
+ }
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* verify the data */
+ if ( pass ) {
+
+ if ( comm == MPI_COMM_WORLD ) /* test 1 */
+ nextValue = 0;
+ else if ( group_id == 0 ) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)(mpi_size / 2)*count);
+
+ i = (hsize_t)dset_size;
+ while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ( ( data_slice[i] > nextValue ) ||
+ ( data_slice[i] < nextValue ) ) {
+ pass = FALSE;
+ failure_mssg = "Unexpected dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ if ( pass || (memspace != -1) ) {
+ if ( H5Sclose(memspace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(memspace) failed.\n";
+ }
+ }
+
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ if ( pass || (dxpl_id != -1) ) {
+ if ( H5Pclose(dxpl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(dxpl_id) failed.\n";
+ }
+ }
+ }
+
+ /* close file, etc. */
+ if ( pass || (dset_id != -1) ) {
+ if ( H5Dclose(dset_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id) failed.\n";
+ }
+ }
+
+ if ( pass || (dset_id_ch != -1) ) {
+ if ( H5Dclose(dset_id_ch) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
+ }
+ }
+
if ( pass || (file_id != -1) ) {
if ( H5Fclose(file_id) < 0 ) {
pass = false;
@@ -668,17 +1056,9 @@ test_parallel_read(MPI_Comm comm, int mpi_rank, int group_id)
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
fcn_name, failure_mssg);
}
-
HDremove(reloc_data_filename);
}
- /* free data_slice if it has been allocated */
- if ( data_slice != NULL ) {
- HDfree(data_slice);
- data_slice = NULL;
- }
-
-
return( ! pass );
} /* test_parallel_read() */
@@ -810,7 +1190,7 @@ main( int argc, char **argv)
}
/* Now read the generated test file (stil using MPI_COMM_WORLD) */
- nerrs += test_parallel_read( MPI_COMM_WORLD, mpi_rank, which_group);
+ nerrs += test_parallel_read( MPI_COMM_WORLD, mpi_rank, mpi_size, which_group);
if ( nerrs > 0 ) {
if ( mpi_rank == 0 ) {
@@ -826,7 +1206,7 @@ main( int argc, char **argv)
}
/* run the 2nd set of tests */
- nerrs += test_parallel_read(group_comm, mpi_rank, which_group);
+ nerrs += test_parallel_read(group_comm, mpi_rank, mpi_size, which_group);
if ( nerrs > 0 ) {
if ( mpi_rank == 0 ) {
diff --git a/testpar/testpflush.sh.in b/testpar/testpflush.sh.in
new file mode 100644
index 0000000..02f0e26
--- /dev/null
+++ b/testpar/testpflush.sh.in
@@ -0,0 +1,64 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+#
+# Test script for the parallel flush test
+#
+# The parallel flush test uses two programs to test flush operations
+# in parallel HDF5. The first program purposely exits without calling
+# MPI_Finalize(), which is an error under the MPI standard and mpiexec
+# in some implementations will return an error code even though all
+# processes exit successfully. This script lets us swallow the error
+# from the first program.
+#
+# True errors in the first program will be detected as errors in the
+# second program, so watch out for that.
+#
+# Programmer: Dana Robinson
+# Fall 2018
+
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Turn the $$ we use to avoid Autotools munging into $
+#
+# Allowing $$ to substitute in both the RUNPARALLEL string and the
+# regexp is intentional. There doesn't seem to be a way around
+# this using quote shenanigans. The downside is that there is a remote
+# chance that the shell's pid will match a number in the RUNPARALLEL
+# variable, but that seems less likely to cause problems than expecting
+# library builders to specify two almost identical versions of the
+# RUNPARALLEL command, one for use in scripts and one via Makefiles.
+RUNPARALLELSCRIPT=`echo "@RUNPARALLEL@" | sed "s/$$/\$/g"`
+
+# ==========================================
+# Run the first parallel flush test program
+# (note that we ignore any errors here)
+# ==========================================
+echo "*** NOTE ***********************************************************"
+echo "You may see complaints from mpiexec et al. that not all processes"
+echo "called MPI_Finalize(). This is an intended characteristic of the"
+echo "test and should not be considered an error."
+echo "********************************************************************"
+eval ${RUNPARALLELSCRIPT} ./t_pflush1
+
+
+# ===========================================
+# Run the second parallel flush test program
+# The return code of this call is the return
+# code of the script.
+# ===========================================
+eval ${RUNPARALLELSCRIPT} ./t_pflush2
+
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 69b66ae..999e17a 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -549,7 +549,10 @@ int main(int argc, char **argv)
AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL,
"Collective Metadata read with some ranks having no selection", PARATESTFILE);
-
+ AddTest("MC coll MD read", test_multi_chunk_io_addrmap_issue, NULL,
+ "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE);
+ AddTest("LC coll MD read", test_link_chunk_io_sort_chunk_issue, NULL,
+ "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE);
/* Display testing information */
TestInfo(argv[0]);
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 176574e..4409221 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -295,6 +295,8 @@ void compress_readAll(void);
#endif /* H5_HAVE_FILTER_DEFLATE */
void test_dense_attr(void);
void test_partial_no_selection_coll_md_read(void);
+void test_multi_chunk_io_addrmap_issue(void);
+void test_link_chunk_io_sort_chunk_issue(void);
/* commonly used prototypes */
hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);