summaryrefslogtreecommitdiffstats
path: root/testpar
diff options
context:
space:
mode:
Diffstat (limited to 'testpar')
-rw-r--r--testpar/CMakeLists.txt62
-rw-r--r--testpar/CMakeTests.cmake145
-rw-r--r--testpar/CMakeVFDTests.cmake76
-rw-r--r--testpar/Makefile.am24
-rw-r--r--testpar/t_2Gio.c4994
-rw-r--r--testpar/t_bigio.c1392
-rw-r--r--testpar/t_cache.c3165
-rw-r--r--testpar/t_cache_image.c779
-rw-r--r--testpar/t_chunk_alloc.c165
-rw-r--r--testpar/t_coll_chunk.c63
-rw-r--r--testpar/t_coll_md_read.c506
-rw-r--r--testpar/t_dset.c1310
-rw-r--r--testpar/t_file.c353
-rw-r--r--testpar/t_file_image.c62
-rw-r--r--testpar/t_filter_read.c72
-rw-r--r--testpar/t_filters_parallel.c6065
-rw-r--r--testpar/t_filters_parallel.h339
-rw-r--r--testpar/t_init_term.c4
-rw-r--r--testpar/t_mdset.c1264
-rw-r--r--testpar/t_mpi.c1591
-rw-r--r--testpar/t_pflush1.c264
-rw-r--r--testpar/t_pflush2.c265
-rw-r--r--testpar/t_ph5basic.c135
-rw-r--r--testpar/t_pread.c1251
-rw-r--r--testpar/t_prestart.c25
-rw-r--r--testpar/t_prop.c18
-rw-r--r--testpar/t_pshutdown.c16
-rw-r--r--testpar/t_shapesame.c1821
-rw-r--r--testpar/t_span_tree.c901
-rw-r--r--testpar/testpar.h36
-rw-r--r--testpar/testpflush.sh.in64
-rw-r--r--testpar/testphdf5.c478
-rw-r--r--testpar/testphdf5.h62
33 files changed, 20510 insertions, 7257 deletions
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index e994b65..a9f45d5 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -1,14 +1,7 @@
-cmake_minimum_required (VERSION 3.2.2)
-PROJECT (HDF5_TEST_PAR)
+cmake_minimum_required (VERSION 3.12)
+project (HDF5_TEST_PAR C)
#-----------------------------------------------------------------------------
-# Apply Definitions to compiler in this directory and below
-#-----------------------------------------------------------------------------
-add_definitions (${HDF_EXTRA_C_FLAGS})
-
-INCLUDE_DIRECTORIES (${HDF5_TEST_SRC_DIR})
-INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib )
-#-----------------------------------------------------------------------------
# Define Tests
#-----------------------------------------------------------------------------
@@ -24,20 +17,47 @@ set (testphdf5_SOURCES
${HDF5_TEST_PAR_SOURCE_DIR}/t_chunk_alloc.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_filter_read.c
${HDF5_TEST_PAR_SOURCE_DIR}/t_prop.c
+ ${HDF5_TEST_PAR_SOURCE_DIR}/t_coll_md_read.c
)
#-- Adding test for testhdf5
add_executable (testphdf5 ${testphdf5_SOURCES})
-TARGET_NAMING (testphdf5 STATIC)
-TARGET_C_PROPERTIES (testphdf5 STATIC " " " ")
-target_link_libraries (testphdf5 ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} ${LINK_LIBS})
+target_compile_options(testphdf5 PRIVATE "${HDF5_CMAKE_C_FLAGS}")
+target_include_directories (testphdf5
+ PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+)
+if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (testphdf5 STATIC)
+ target_link_libraries (testphdf5
+ PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_LIBRARIES}>"
+ )
+else ()
+ TARGET_C_PROPERTIES (testphdf5 SHARED)
+ target_link_libraries (testphdf5
+ PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_LIBRARIES}>"
+ )
+endif ()
set_target_properties (testphdf5 PROPERTIES FOLDER test/par)
MACRO (ADD_H5P_EXE file)
add_executable (${file} ${HDF5_TEST_PAR_SOURCE_DIR}/${file}.c)
- TARGET_NAMING (${file} STATIC)
- TARGET_C_PROPERTIES (${file} STATIC " " " ")
- target_link_libraries (${file} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} ${LINK_LIBS})
+ target_compile_options(${file} PRIVATE "${HDF5_CMAKE_C_FLAGS}")
+ target_include_directories (${file}
+ PRIVATE "${HDF5_SRC_DIR};${HDF5_BINARY_DIR};$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_INCLUDE_DIRS}>"
+ )
+ if (NOT BUILD_SHARED_LIBS)
+ TARGET_C_PROPERTIES (${file} STATIC)
+ target_link_libraries (${file}
+ PRIVATE ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_LIBRARIES}>"
+ $<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:MinGW>>:ws2_32.lib>
+ )
+ else ()
+ TARGET_C_PROPERTIES (${file} SHARED)
+ target_link_libraries (${file}
+ PRIVATE ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET} "$<$<BOOL:${HDF5_ENABLE_PARALLEL}>:${MPI_C_LIBRARIES}>"
+ $<$<OR:$<PLATFORM_ID:Windows>,$<PLATFORM_ID:MinGW>>:ws2_32.lib>
+ )
+ endif ()
set_target_properties (${file} PROPERTIES FOLDER test/par)
ENDMACRO (ADD_H5P_EXE file)
@@ -45,16 +65,22 @@ set (H5P_TESTS
t_mpi
t_bigio
t_cache
+ t_cache_image
t_pflush1
t_pflush2
+ t_pread
t_pshutdown
t_prestart
t_init_term
t_shapesame
+ t_filters_parallel
+ t_2Gio
)
-foreach (testp ${H5P_TESTS})
- ADD_H5P_EXE(${testp})
+foreach (h5_testp ${H5P_TESTS})
+ ADD_H5P_EXE(${h5_testp})
endforeach ()
-include (CMakeTests.cmake)
+if (HDF5_TEST_PARALLEL)
+ include (CMakeTests.cmake)
+endif ()
diff --git a/testpar/CMakeTests.cmake b/testpar/CMakeTests.cmake
index 6e2b05e..5848c60 100644
--- a/testpar/CMakeTests.cmake
+++ b/testpar/CMakeTests.cmake
@@ -15,59 +15,118 @@
### T E S T I N G ###
##############################################################################
##############################################################################
+# Remove any output file left over from previous test run
+add_test (
+ NAME MPI_TEST-clear-testphdf5-objects
+ COMMAND ${CMAKE_COMMAND} -E remove ParaTest.h5
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}
+)
+set_tests_properties (MPI_TEST-clear-testphdf5-objects PROPERTIES FIXTURES_SETUP par_clear_testphdf5)
-add_test (NAME TEST_PAR_testphdf5 COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:testphdf5>)
+set (SKIP_tests
+ cchunk1
+ cchunk2
+ cchunk3
+ cchunk4
+ ecdsetw
+ eidsetw2
+ selnone
+ cngrpw-ingrpr
+ cschunkw
+ ccchunkw
+ tldsc
+ actualio
+ MC_coll_MD_read
+)
+set (SKIP_testphdf5 "")
+foreach (skiptest ${SKIP_tests})
+ set (SKIP_testphdf5 "${SKIP_testphdf5};-x;${skiptest}")
+endforeach ()
+
+add_test (NAME MPI_TEST_testphdf5 COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:testphdf5> ${MPIEXEC_POSTFLAGS} ${SKIP_testphdf5})
+set_tests_properties (MPI_TEST_testphdf5 PROPERTIES
+ FIXTURES_REQUIRED par_clear_testphdf5
+ ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}"
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}
+)
+if (last_test)
+ set_tests_properties (MPI_TEST_testphdf5 PROPERTIES DEPENDS ${last_test})
+endif ()
+set (last_test "MPI_TEST_testphdf5")
-foreach (testp ${H5P_TESTS})
- add_test (NAME TEST_PAR_${testp} COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:${testp}>)
+#execute the skipped tests
+foreach (skiptest ${SKIP_tests})
+ add_test (NAME MPI_TEST_testphdf5_${skiptest} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:testphdf5> ${MPIEXEC_POSTFLAGS} -o ${skiptest})
+ set_tests_properties (MPI_TEST_testphdf5_${skiptest} PROPERTIES
+ FIXTURES_REQUIRED par_clear_testphdf5
+ ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}"
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}
+ )
+ if (last_test)
+ set_tests_properties (MPI_TEST_testphdf5_${skiptest} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "MPI_TEST_testphdf5_${skiptest}")
endforeach ()
-# The following will only be correct on windows shared
-#set_tests_properties (TEST_PAR_t_pflush1 PROPERTIES WILL_FAIL "true")
-set_property (TEST TEST_PAR_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED")
-set_tests_properties (TEST_PAR_t_pflush2 PROPERTIES DEPENDS TEST_PAR_t_pflush1)
+#if (HDF5_OPENMPI_VERSION_SKIP)
+# list (REMOVE_ITEM H5P_TESTS t_shapesame)
+#endif ()
-if (HDF5_TEST_VFD)
+# do not test until new version is added
+list (REMOVE_ITEM H5P_TESTS t_cache_image)
- set (VFD_LIST
- sec2
- stdio
- core
- split
- multi
- family
- )
+set (test_par_CLEANFILES
+ t_cache_image_00.h5
+ t_cache_image_01.h5
+ t_cache_image_02.h5
+ flush.h5
+ noflush.h5
+ reloc_t_pread_data_file.h5
+ reloc_t_pread_group_0_file.h5
+ reloc_t_pread_group_1_file.h5
+ shutdown.h5
+ after_mpi_fin.h5
+ #the following should have been removed by the programs
+ bigio_test.h5
+ CacheTestDummy.h5
+ t_filters_parallel.h5
+ MPItest.h5
+ ShapeSameTest.h5
+)
- set (H5P_VFD_TESTS
- t_pflush1
- t_pflush2
- )
+# Remove any output file left over from previous test run
+add_test (
+ NAME MPI_TEST-clear-objects
+ COMMAND ${CMAKE_COMMAND} -E remove ${test_par_CLEANFILES}
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}
+)
+set_tests_properties (MPI_TEST-clear-objects PROPERTIES FIXTURES_SETUP par_clear_objects)
- if (DIRECT_VFD)
- set (VFD_LIST ${VFD_LIST} direct)
+foreach (h5_testp ${H5P_TESTS})
+ add_test (NAME MPI_TEST_${h5_testp} COMMAND ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_PREFLAGS} $<TARGET_FILE:${h5_testp}> ${MPIEXEC_POSTFLAGS})
+ set_tests_properties (MPI_TEST_${h5_testp} PROPERTIES
+ FIXTURES_REQUIRED par_clear_objects
+ ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_PAR_BINARY_DIR}"
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}
+ )
+ if (last_test)
+ set_tests_properties (MPI_TEST_${h5_testp} PROPERTIES DEPENDS ${last_test})
endif ()
+ set (last_test "MPI_TEST_${h5_testp}")
+endforeach ()
- macro (ADD_VFD_TEST vfdname resultcode)
- if (NOT HDF5_ENABLE_USING_MEMCHECKER)
- foreach (test ${H5P_VFD_TESTS})
- add_test (
- NAME TEST_PAR_VFD-${vfdname}-${test}
- COMMAND "${CMAKE_COMMAND}"
- -D "TEST_PROGRAM=$<TARGET_FILE:${test}>"
- -D "TEST_ARGS:STRING="
- -D "TEST_VFD:STRING=${vfdname}"
- -D "TEST_EXPECT=${resultcode}"
- -D "TEST_OUTPUT=${test}"
- -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
- -P "${HDF_RESOURCES_DIR}/vfdTest.cmake"
- )
- endforeach ()
- endif ()
- endmacro ()
+# The t_pflush1 test is hard-coded to fail.
+set_tests_properties (MPI_TEST_t_pflush1 PROPERTIES WILL_FAIL "true")
+#set_property (TEST MPI_TEST_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED")
+set_tests_properties (MPI_TEST_t_pflush2 PROPERTIES DEPENDS MPI_TEST_t_pflush1)
+set_tests_properties (MPI_TEST_t_prestart PROPERTIES DEPENDS MPI_TEST_t_pshutdown)
- # Run test with different Virtual File Driver
- foreach (vfd ${VFD_LIST})
- ADD_VFD_TEST (${vfd} 0)
- endforeach ()
+##############################################################################
+##############################################################################
+### V F D T E S T S ###
+##############################################################################
+##############################################################################
+if (HDF5_TEST_VFD)
+ include (CMakeVFDTests.cmake)
endif ()
diff --git a/testpar/CMakeVFDTests.cmake b/testpar/CMakeVFDTests.cmake
new file mode 100644
index 0000000..4d6b18c
--- /dev/null
+++ b/testpar/CMakeVFDTests.cmake
@@ -0,0 +1,76 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+##############################################################################
+##############################################################################
+### T E S T I N G ###
+##############################################################################
+##############################################################################
+set (VFD_LIST
+ sec2
+ stdio
+ core
+ core_paged
+ split
+ multi
+ family
+)
+
+set (H5P_VFD_TESTS
+ t_pflush1
+ t_pflush2
+)
+
+if (DIRECT_VFD)
+ set (VFD_LIST ${VFD_LIST} direct)
+endif ()
+
+foreach (vfdtest ${VFD_LIST})
+ file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/${vfdtest}")
+endforeach ()
+
+macro (ADD_VFD_TEST vfdname resultcode)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ foreach (h5_test ${H5P_VFD_TESTS})
+ add_test (
+ NAME MPI_TEST_VFD-${vfdname}-${h5_test}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_EMULATOR=${CMAKE_CROSSCOMPILING_EMULATOR}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:${h5_test}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_VFD:STRING=${vfdname}"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_OUTPUT=${vfdname}-${h5_test}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/${vfdname}"
+ -P "${HDF_RESOURCES_DIR}/vfdTest.cmake"
+ )
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-${h5_test} PROPERTIES
+ ENVIRONMENT "srcdir=${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}"
+ WORKING_DIRECTORY ${HDF5_TEST_PAR_BINARY_DIR}/${vfdname}
+ )
+ endforeach ()
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-pflush1 PROPERTIES WILL_FAIL "true")
+ #set_property (TEST MPI_TEST_t_pflush1 PROPERTY PASS_REGULAR_EXPRESSION "PASSED")
+ set_tests_properties (MPI_TEST_VFD-${vfdname}-pflush2 PROPERTIES DEPENDS MPI_TEST_VFD-${vfdname}-pflush1)
+ endif ()
+endmacro ()
+
+##############################################################################
+##############################################################################
+### T H E T E S T S ###
+##############################################################################
+##############################################################################
+
+# Run test with different Virtual File Driver
+foreach (h5_vfd ${VFD_LIST})
+ ADD_VFD_TEST (${h5_vfd} 0)
+endforeach ()
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 7029bd5..4509945 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -21,25 +21,41 @@ include $(top_srcdir)/config/commence.am
AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
+# Test scripts--
+# testpflush.sh:
+TEST_SCRIPT_PARA = testpflush.sh
+SCRIPT_DEPEND = t_pflush1$(EXEEXT) t_pflush2$(EXEEXT)
+
+check_SCRIPTS = $(TEST_SCRIPT_PARA)
+
# Test programs. These are our main targets.
#
-TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame
+TEST_PROG_PARA=t_mpi t_bigio testphdf5 t_cache t_cache_image t_pread t_pshutdown t_prestart t_init_term t_shapesame t_filters_parallel t_2Gio
-check_PROGRAMS = $(TEST_PROG_PARA)
+# t_pflush1 and t_pflush2 are used by testpflush.sh
+check_PROGRAMS = $(TEST_PROG_PARA) t_pflush1 t_pflush2
testphdf5_SOURCES=testphdf5.c t_dset.c t_file.c t_file_image.c t_mdset.c \
t_ph5basic.c t_coll_chunk.c t_span_tree.c t_chunk_alloc.c t_filter_read.c \
- t_prop.c
+ t_prop.c t_coll_md_read.c
# The tests all depend on the hdf5 library and the test library
LDADD = $(LIBH5TEST) $(LIBHDF5)
+# Test with just the native connector, with a single pass-through connector
+# and with a doubly-stacked pass-through.
+VOL_LIST = native "pass_through under_vol=0;under_info={}" \
+ "pass_through under_vol=505;under_info={under_vol=0;under_info={}}"
+
# Temporary files
# MPItest.h5 is from t_mpi
# Para*.h5 are from testphdf
+# bigio_test.h5 is from t_bigio
+# ShapeSameTest.h5 is from t_shapesame
# shutdown.h5 is from t_pshutdown
# after_mpi_fin.h5 is from t_init_term
# go is used for debugging. See testphdf5.c.
-CHECK_CLEANFILES+=MPItest.h5 Para*.h5 CacheTestDummy.h5 shutdown.h5 after_mpi_fin.h5 go
+CHECK_CLEANFILES+=MPItest.h5 Para*.h5 bigio_test.h5 CacheTestDummy.h5 \
+ ShapeSameTest.h5 shutdown.h5 after_mpi_fin.h5 go
include $(top_srcdir)/config/conclude.am
diff --git a/testpar/t_2Gio.c b/testpar/t_2Gio.c
new file mode 100644
index 0000000..54ea546
--- /dev/null
+++ b/testpar/t_2Gio.c
@@ -0,0 +1,4994 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Parallel tests for datasets
+ */
+
+/*
+ * Example of using the parallel HDF5 library to access datasets.
+ *
+ * This program contains three major parts. Part 1 tests fixed dimension
+ * datasets, for both independent and collective transfer modes.
+ * Part 2 tests extendible datasets, for independent transfer mode
+ * only.
+ * Part 3 tests extendible datasets, for collective transfer mode
+ * only.
+ */
+
+#include <stdio.h>
+#include "hdf5.h"
+#include "testphdf5.h"
+
+#include "mpi.h"
+
+
+/* For this test, we don't want to inherit the RANK definition
+ * from testphdf5.h. We'll define MAX_RANK to accomodate 3D arrays
+ * and use that definition rather than RANK.
+ */
+#ifndef MAX_RANK
+#define MAX_RANK 2
+#endif
+
+/* As with RANK vs MAX_RANK, we use BIG_X_FACTOR vs ROW_FACTOR
+ * and BIG_Y_FACTOR vs COL_FACTOR. We introduce BIG_Z_FACTOR
+ * for the 3rd dimension.
+ */
+
+#ifndef BIG_X_FACTOR
+#define BIG_X_FACTOR 1048576
+#endif
+#ifndef BIG_Y_FACTOR
+#define BIG_Y_FACTOR 32
+#endif
+#ifndef BIG_Z_FACTOR
+#define BIG_Z_FACTOR 2048
+#endif
+
+#ifndef PATH_MAX
+#define PATH_MAX 512
+#endif /* !PATH_MAX */
+
+/* global variables */
+int dim0;
+int dim1;
+int dim2;
+int chunkdim0;
+int chunkdim1;
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
+int ngroups = 512; /* number of groups to create in root
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
+int dxfer_coll_type = DXFER_COLLECTIVE_IO;
+
+H5E_auto2_t old_func; /* previous error handler */
+void *old_client_data; /* previous error handler arg.*/
+
+#define NFILENAME 3
+#define PARATESTFILE filenames[0]
+const char *FILENAME[NFILENAME]={
+ "ParaTest",
+ "Hugefile",
+ NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
+MPI_Comm test_comm = MPI_COMM_WORLD;
+
+// static int enable_error_stack = 0; /* enable error stack; disable=0 enable=1 */
+// static const char *TestProgName = NULL;
+// static void (*TestPrivateUsage)(void) = NULL;
+// static int (*TestPrivateParser)(int ac, char *av[]) = NULL;
+
+/*
+ * The following are various utility routines used by the tests.
+ */
+
+
+/*
+ * Show command usage
+ */
+static void
+usage(void)
+{
+ HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ HDprintf("\t-m<n_datasets>"
+ "\tset number of datasets for the multiple dataset test\n");
+ HDprintf("\t-n<n_groups>"
+ "\tset number of groups for the multiple group test\n");
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+ HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
+ BIG_X_FACTOR, BIG_Y_FACTOR);
+ HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
+ HDprintf("\n");
+}
+
+/*
+ * parse the command line options
+ */
+static int
+parse_options(int argc, char **argv)
+{
+ int mpi_size, mpi_rank; /* mpi variables */
+
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
+
+ /* setup default chunk-size. Make sure sizes are > 0 */
+
+ chunkdim0 = (dim0+9)/10;
+ chunkdim1 = (dim1+9)/10;
+
+ while (--argc){
+ if (**(++argv) != '-'){
+ break;
+ }else{
+ switch(*(*argv+1)){
+ case 'm': ndatasets = atoi((*argv+1)+1);
+ if (ndatasets < 0){
+ nerrors++;
+ return(1);
+ }
+ break;
+ case 'n': ngroups = atoi((*argv+1)+1);
+ if (ngroups < 0){
+ nerrors++;
+ return(1);
+ }
+ break;
+ case 'f': if (--argc < 1) {
+ nerrors++;
+ return(1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return(1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2){
+ nerrors++;
+ return(1);
+ }
+ dim0 = atoi(*(++argv))*mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv))*mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0+9)/10;
+ chunkdim1 = (dim1+9)/10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2){
+ nerrors++;
+ return(1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return(1);
+ default: HDprintf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return(1);
+ }
+ }
+ } /*while*/
+
+ /* check validity of dimension and chunk sizes */
+ if (dim0 <= 0 || dim1 <= 0){
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return(1);
+ }
+ if (chunkdim0 <= 0 || chunkdim1 <= 0){
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return(1);
+ }
+
+ /* Make sure datasets can be divided into equal portions by the processes */
+ if ((dim0 % mpi_size) || (dim1 % mpi_size)){
+ if (MAINPROCESS)
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
+ dim0, dim1, mpi_size);
+ nerrors++;
+ return(1);
+ }
+
+ /* compose the test filenames */
+ {
+ int i, n;
+
+ n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i=0; i < n; i++)
+ if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
+ == NULL){
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return(1);
+ }
+
+ if (MAINPROCESS) {
+ HDprintf("Test filenames are:\n");
+ for (i=0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
+ }
+ }
+
+ return(0);
+}
+
+/*
+ * Create the appropriate File access property list
+ */
+hid_t
+create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
+{
+ hid_t ret_pl = -1;
+ herr_t ret; /* generic return value */
+ int mpi_rank; /* mpi variables */
+
+ /* need the rank for error checking macros */
+ MPI_Comm_rank(test_comm, &mpi_rank);
+
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
+
+ if (l_facc_type == FACC_DEFAULT)
+ return (ret_pl);
+
+ if (l_facc_type == FACC_MPIO){
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
+ VRFY((ret >= 0), "");
+ return(ret_pl);
+ }
+
+ if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return(ret_pl);
+ }
+
+ /* unknown file access types */
+ return (ret_pl);
+}
+
+
+/*
+ * Setup the dimensions of the hyperslab.
+ * Two modes--by rows or by columns.
+ * Assume dimension rank is 2.
+ * BYROW divide into slabs of rows
+ * BYCOL divide into blocks of columns
+ * ZROW same as BYROW except process 0 gets 0 rows
+ * ZCOL same as BYCOL except process 0 gets 0 columns
+ */
+static void
+slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[],
+ hsize_t stride[], hsize_t block[], int mode)
+{
+ switch (mode) {
+ case BYROW:
+ /* Each process takes a slabs of rows. */
+ block[0] = (hsize_t)dim0 / (hsize_t)mpi_size;
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYROW\n");
+ break;
+ case BYCOL:
+ /* Each process takes a block of columns. */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1 / (hsize_t)mpi_size;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYCOL\n");
+ break;
+ case ZROW:
+ /* Similar to BYROW except process 0 gets 0 row */
+ block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = (hsize_t)dim1;
+ stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)(mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZROW\n");
+ break;
+ case ZCOL:
+ /* Similar to BYCOL except process 0 gets 0 column */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
+ stride[0] = block[0];
+ stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)(mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZCOL\n");
+ break;
+ default:
+ /* Unknown mode. Set it to cover the whole dataset. */
+ HDprintf("unknown slab_set mode (%d)\n", mode);
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set wholeset\n");
+ break;
+ }
+ if (VERBOSE_MED) {
+ HDprintf(
+ "start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
+ (unsigned long) start[0], (unsigned long) start[1],
+ (unsigned long) count[0], (unsigned long) count[1],
+ (unsigned long) stride[0], (unsigned long) stride[1],
+ (unsigned long) block[0], (unsigned long) block[1],
+ (unsigned long) (block[0] * block[1] * count[0] * count[1]));
+ }
+}
+
+/*
+ * Setup the coordinates for point selection.
+ */
+void point_set(hsize_t start[],
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ size_t num_points,
+ hsize_t coords[],
+ int order)
+{
+ hsize_t i,j, k = 0, m ,n, s1 ,s2;
+
+ // HDcompile_assert(MAX_RANK == 3);
+ HDcompile_assert(MAX_RANK == 2);
+
+ if(OUT_OF_ORDER == order)
+ k = (num_points * MAX_RANK) - 1;
+ else if(IN_ORDER == order)
+ k = 0;
+
+ s1 = start[0];
+ s2 = start[1];
+
+ for(i = 0 ; i < count[0]; i++)
+ for(j = 0 ; j < count[1]; j++)
+ for(m = 0 ; m < block[0]; m++)
+ for(n = 0 ; n < block[1]; n++)
+ if(OUT_OF_ORDER == order) {
+ coords[k--] = s2 + (stride[1] * j) + n;
+ coords[k--] = s1 + (stride[0] * i) + m;
+ }
+ else if(IN_ORDER == order) {
+ coords[k++] = s1 + stride[0] * i + m;
+ coords[k++] = s2 + stride[1] * j + n;
+ }
+
+ if(VERBOSE_MED) {
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0] * block[1] * count[0] * count[1]));
+ k = 0;
+ for(i = 0; i < num_points ; i++) {
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ k += 2;
+ }
+ }
+}
+
+
+/*
+ * Fill the dataset with trivial data for testing.
+ * Assume dimension rank is 2 and data is stored contiguous.
+ */
+static void
+dataset_fill(hsize_t start[], hsize_t block[], DATATYPE * dataset)
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* put some trivial data in the data_array */
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1));
+ dataptr++;
+ }
+ }
+}
+
+
+/*
+ * Print the content of the dataset.
+ */
+static void
+dataset_print(hsize_t start[], hsize_t block[], DATATYPE * dataset)
+{
+ DATATYPE *dataptr = dataset;
+ hsize_t i, j;
+
+ /* print the column heading */
+ HDprintf("%-8s", "Cols:");
+ for (j=0; j < block[1]; j++){
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
+ }
+ HDprintf("\n");
+
+ /* print the slab data */
+ for (i=0; i < block[0]; i++){
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
+ }
+}
+
+
+/*
+ * Print the content of the dataset.
+ */
+int
+dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], DATATYPE *dataset, DATATYPE *original)
+{
+ hsize_t i, j;
+ int vrfyerrs;
+
+ /* print it if VERBOSE_MED */
+ if(VERBOSE_MED) {
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
+ }
+
+ vrfyerrs = 0;
+ for (i=0; i < block[0]; i++){
+ for (j=0; j < block[1]; j++){
+ if(*dataset != *original){
+ if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j,
+ (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
+ *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
+ }
+ if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if(vrfyerrs)
+ HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
+ return(vrfyerrs);
+}
+
+/* NOTE: This is a memory intensive test and is only run
+ * with 2 MPI ranks and with $HDF5TestExpress == 0
+ * i.e. Exhaustive test run is allowed. Otherwise
+ * the test is skipped.
+ *
+ * Thanks to l.ferraro@cineca.it for the following test::
+ *
+ * This is a simple test case to reproduce a problem
+ * occurring on LUSTRE filesystem with the creation
+ * of a 4GB dataset using chunking with parallel HDF5.
+ * The test works correctly if disabling chunking or
+ * when the bytes assigned to each process is less
+ * that 4GB. if equal or more, either hangs or results
+ * in a PMPI_Waitall error.
+ *
+ * $> mpirun -genv I_MPI_EXTRA_FILESYSTEM on
+ * -genv I_MPI_EXTRA_FILESYSTEM_LIST gpfs
+ * -n 1 ./h5_mpi_big_dataset.x 1024 1024 1024
+ */
+
+#define H5FILE_NAME "hugefile.h5"
+#define DATASETNAME "dataset"
+
+static int MpioTest2G( MPI_Comm comm )
+{
+ /*
+ * HDF5 APIs definitions
+ */
+ herr_t status;
+ hid_t file_id, dset_id; /* file and dataset identifiers */
+ hid_t plist_id; /* property list identifier */
+ hid_t filespace; /* file and memory dataspace identifiers */
+ int *data; /* pointer to data buffer to write */
+ size_t tot_size_bytes;
+ hid_t dcpl_id;
+ hid_t memorydataspace;
+ hid_t filedataspace;
+ size_t slice_per_process;
+ size_t data_size;
+ size_t data_size_bytes;
+
+ hsize_t chunk[3];
+ hsize_t h5_counts[3];
+ hsize_t h5_offsets[3];
+ hsize_t shape[3] = {1024, 1024, 1152};
+
+ /*
+ * MPI variables
+ */
+ int mpi_size, mpi_rank;
+ MPI_Info info = MPI_INFO_NULL;
+
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ if(mpi_rank == 0) {
+ HDprintf("Using %d process on dataset shape [%llu, %llu, %llu]\n",
+ mpi_size, shape[0], shape[1], shape[2]);
+ }
+
+ /*
+ * Set up file access property list with parallel I/O access
+ */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "H5Pcreate file_access succeeded");
+ status = H5Pset_fapl_mpio(plist_id, comm, info);
+ VRFY((status >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
+ file_id = H5Fcreate(H5FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ H5Pclose(plist_id);
+
+ /*
+ * Create the dataspace for the dataset.
+ */
+ tot_size_bytes = sizeof(int);
+ for (int i = 0; i < 3; i++) {
+ tot_size_bytes *= shape[i];
+ }
+ if(mpi_rank == 0) {
+ HDprintf("Dataset of %llu bytes\n", tot_size_bytes);
+ }
+ filespace = H5Screate_simple(3, shape, NULL);
+ VRFY((filespace >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Select chunking
+ */
+ dcpl_id = H5Pcreate (H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5P_DATASET_CREATE");
+ chunk[0] = 4;
+ chunk[1] = shape[1];
+ chunk[2] = shape[2];
+ status = H5Pset_chunk(dcpl_id, 3, chunk);
+ VRFY((status >= 0), "H5Pset_chunk succeeded");
+
+ /*
+ * Create the dataset with default properties and close filespace.
+ */
+ dset_id = H5Dcreate2(file_id, DATASETNAME,
+ H5T_NATIVE_INT, filespace,
+ H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+ H5Sclose(filespace);
+
+ /*
+ * Create property list for collective dataset write.
+ */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "H5P_DATASET_XFER");
+ status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((status >= 0), "");
+
+ H5_CHECKED_ASSIGN(slice_per_process, size_t, (shape[0] + (hsize_t)mpi_size - 1) / (hsize_t)mpi_size, hsize_t);
+ data_size = slice_per_process * shape[1] * shape[2];
+ data_size_bytes = sizeof(int) * data_size;
+ data = HDmalloc(data_size_bytes);
+ VRFY((data != NULL), "data HDmalloc succeeded");
+
+ for (size_t i = 0; i < data_size; i++) {
+ data[i] = mpi_rank;
+ }
+
+ h5_counts[0] = slice_per_process;
+ h5_counts[1] = shape[1];
+ h5_counts[2] = shape[2];
+ h5_offsets[0] = (size_t)mpi_rank * slice_per_process;
+ h5_offsets[1] = 0;
+ h5_offsets[2] = 0;
+ filedataspace = H5Screate_simple(3, shape, NULL);
+ VRFY((filedataspace >= 0), "H5Screate_simple succeeded");
+
+ // fix reminder along first dimension multiple of chunk[0]
+ if ( h5_offsets[0] + h5_counts[0] > shape[0]) {
+ h5_counts[0] = shape[0] - h5_offsets[0];
+ }
+
+ status = H5Sselect_hyperslab(filedataspace, H5S_SELECT_SET,
+ h5_offsets, NULL, h5_counts, NULL);
+ VRFY((status >= 0), "H5Sselect_hyperslab succeeded");
+
+ memorydataspace = H5Screate_simple(3, h5_counts, NULL);
+ VRFY((memorydataspace >= 0), "H5Screate_simple succeeded");
+
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT,
+ memorydataspace, filedataspace, plist_id, data);
+ VRFY((status >= 0), "H5Dwrite succeeded");
+ H5Pclose(plist_id);
+
+ /*
+ * Close/release resources.
+ */
+ H5Sclose(filedataspace);
+ H5Sclose(memorydataspace);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
+
+ free(data);
+ HDprintf("Proc %d - MpioTest2G test succeeded\n", mpi_rank, data_size_bytes);
+
+ if (mpi_rank == 0)
+ HDremove(FILENAME[1]);
+ return 0;
+}
+
+
+/*
+ * Part 1.a--Independent read/write for fixed dimension datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 files with parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+dataset_writeInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[MAX_RANK] = {1,}; /* dataset dim sizes */
+ hsize_t data_size;
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ const char *filename;
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK];
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* allocate memory for data buffer */
+ data_size = sizeof(DATATYPE);
+ data_size *= (hsize_t)dim0 * (hsize_t)dim1;
+ data_array1 = (DATATYPE *)HDmalloc(data_size);
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* ----------------------------------------
+ * CREATE AN HDF5 FILE WITH PARALLEL ACCESS
+ * ---------------------------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+
+ /* ---------------------------------------------
+ * Define the dimensions of the overall datasets
+ * and the slabs local to the MPI process.
+ * ------------------------------------------- */
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+
+ /*
+ * To test the independent orders of writes between processes, all
+ * even number processes write to dataset1 first, then dataset2.
+ * All odd number processes write to dataset2 first, then dataset1.
+ */
+
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+
+ /* setup dimensions again to write with zero rows for process 0 */
+ if(VERBOSE_MED)
+ HDprintf("writeInd by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if(MAINPROCESS){
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeInd by some with zero row");
+if((mpi_rank/2)*2 != mpi_rank){
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+}
+#ifdef BARRIER_CHECKS
+MPI_Barrier(test_comm);
+#endif /* BARRIER_CHECKS */
+
+ /* release dataspace ID */
+ H5Sclose(file_dataspace);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* release all IDs created */
+ H5Sclose(sid);
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(data_array1) HDfree(data_array1);
+}
+
+/* Example of using the parallel HDF5 library to read a dataset */
+void
+dataset_readInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid = H5Fopen(filename, H5F_ACC_RDONLY, acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+ /* release all IDs created */
+ H5Sclose(file_dataspace);
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(data_array1) HDfree(data_array1);
+ if(data_origin1) HDfree(data_origin1);
+}
+
+
+/*
+ * Part 1.b--Collective read/write for fixed dimension datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+dataset_writeAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
+ hid_t dataset5, dataset6, dataset7; /* Dataset ID */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[MAX_RANK] = {1,}; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ const char *filename;
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK];
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ hsize_t current_dims; /* for point selection */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Collective write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* set up the coords array selection */
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)MAX_RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+
+ /* --------------------------
+ * Define the dimensions of the overall datasets
+ * and create the dataset
+ * ------------------------- */
+ /* setup 2-D dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+
+ /* create a dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another dataset collectively */
+ datatype = H5Tcopy(H5T_NATIVE_INT);
+ ret = H5Tset_order(datatype, H5T_ORDER_LE);
+ VRFY((ret >= 0), "H5Tset_order succeeded");
+
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, datatype, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 2 succeeded");
+
+ /* create a third dataset collectively */
+ dataset3 = H5Dcreate2(fid, DATASETNAME3, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset3 >= 0), "H5Dcreate2 succeeded");
+
+ dataset5 = H5Dcreate2(fid, DATASETNAME7, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset5 >= 0), "H5Dcreate2 succeeded");
+ dataset6 = H5Dcreate2(fid, DATASETNAME8, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset6 >= 0), "H5Dcreate2 succeeded");
+ dataset7 = H5Dcreate2(fid, DATASETNAME9, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset7 >= 0), "H5Dcreate2 succeeded");
+
+ /* release 2-D space ID created */
+ H5Sclose(sid);
+
+ /* setup scalar dimensionality object */
+ sid = H5Screate(H5S_SCALAR);
+ VRFY((sid >= 0), "H5Screate succeeded");
+
+ /* create a fourth dataset collectively */
+ dataset4 = H5Dcreate2(fid, DATASETNAME4, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset4 >= 0), "H5Dcreate2 succeeded");
+
+ /* release scalar space ID created */
+ H5Sclose(sid);
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of rows. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* write data collectively */
+ MESG("writeAll by Row");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ /* setup dimensions again to writeAll with zero rows for process 0 */
+ if(VERBOSE_MED)
+ HDprintf("writeAll by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if(MAINPROCESS){
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeAll by some with zero row");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of columns. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+
+ /* setup dimensions again to writeAll with zero columns for process 0 */
+ if(VERBOSE_MED)
+ HDprintf("writeAll by some with zero col\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if(MAINPROCESS){
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("writeAll by some with zero col");
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset3 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+
+ /* Dataset3: each process takes a block of rows, except process zero uses "none" selection. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset3);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ if(MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
+ } /* end else */
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+ if(MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
+ } /* end if */
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ } /* end if */
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* write data collectively */
+ MESG("writeAll with none");
+ ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
+
+ /* write data collectively (with datatype conversion) */
+ MESG("writeAll with none");
+ ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset4 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset4: each process writes no data, except process zero uses "all" selection. */
+ /* Additionally, these are in a scalar dataspace */
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset4);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ if(MAINPROCESS) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ } /* end else */
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate(H5S_SCALAR);
+ VRFY((mem_dataspace >= 0), "");
+ if(MAINPROCESS) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
+ } /* end if */
+ else {
+ ret = H5Sselect_all(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ } /* end else */
+
+ /* fill the local slab with some trivial data */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ } /* end if */
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ MESG("writeAll with scalar dataspace");
+ ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
+
+ /* write data collectively (with datatype conversion) */
+ MESG("writeAll with scalar dataspace");
+ ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+
+ if(data_array1) free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
+ stride[0] = 1;
+ stride[1] = (hsize_t)dim1;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* Dataset5: point selection in File - Hyperslab selection in Memory*/
+ /* create a file dataspace independently */
+ point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space (dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ mem_dataspace = H5Dget_space (dataset5);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset5 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset6: point selection in File - Point selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ file_dataspace = H5Dget_space (dataset6);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, IN_ORDER);
+ mem_dataspace = H5Dget_space (dataset6);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset6 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset7: point selection in File - All selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space (dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ current_dims = num_points;
+ mem_dataspace = H5Screate_simple (1, &current_dims, NULL);
+ VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+
+ ret = H5Sselect_all(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite dataset7 succeeded");
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /*
+ * All writes completed. Close datasets collectively
+ */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+ ret = H5Dclose(dataset3);
+ VRFY((ret >= 0), "H5Dclose3 succeeded");
+ ret = H5Dclose(dataset4);
+ VRFY((ret >= 0), "H5Dclose4 succeeded");
+ ret = H5Dclose(dataset5);
+ VRFY((ret >= 0), "H5Dclose5 succeeded");
+ ret = H5Dclose(dataset6);
+ VRFY((ret >= 0), "H5Dclose6 succeeded");
+ ret = H5Dclose(dataset7);
+ VRFY((ret >= 0), "H5Dclose7 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(coords) HDfree(coords);
+ if(data_array1) HDfree(data_array1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read two datasets
+ * in one HDF5 file with collective parallel access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
+ * each process controls a hyperslab within.]
+ */
+
+void
+dataset_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+
+ size_t num_points; /* for point selection */
+ hsize_t *coords = NULL; /* for point selection */
+ int i,j,k;
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Collective read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* set up the coords array selection */
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * MAX_RANK * sizeof(hsize_t));
+ VRFY((coords != NULL), "coords malloc succeeded");
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+
+ /* --------------------------
+ * Open the datasets in it
+ * ------------------------- */
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME2, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dopen2 2 succeeded");
+
+ /* open another dataset collectively */
+ dataset5 = H5Dopen2(fid, DATASETNAME7, H5P_DEFAULT);
+ VRFY((dataset5 >= 0), "H5Dopen2 5 succeeded");
+ dataset6 = H5Dopen2(fid, DATASETNAME8, H5P_DEFAULT);
+ VRFY((dataset6 >= 0), "H5Dopen2 6 succeeded");
+ dataset7 = H5Dopen2(fid, DATASETNAME9, H5P_DEFAULT);
+ VRFY((dataset7 >= 0), "H5Dopen2 7 succeeded");
+
+ /*
+ * Set up dimensions of the slab this process accesses.
+ */
+
+ /* Dataset1: each process takes a block of columns. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* read data collectively */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* setup dimensions again to readAll with zero columns for process 0 */
+ if(VERBOSE_MED)
+ HDprintf("readAll by some with zero col\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if(MAINPROCESS){
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("readAll by some with zero col");
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* release all temporary handles. */
+ /* Could have used them for dataset2 but it is cleaner */
+ /* to create them again.*/
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* Dataset2: each process takes a block of rows. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* read data collectively */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* setup dimensions again to readAll with zero rows for process 0 */
+ if(VERBOSE_MED)
+ HDprintf("readAll by some with zero row\n");
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ /* need to make mem_dataspace to match for process 0 */
+ if(MAINPROCESS){
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ }
+ MESG("readAll by some with zero row");
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if(data_array1) free(data_array1);
+ if(data_origin1) free(data_origin1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+
+ block[0] = 1;
+ block[1] = (hsize_t)dim1;
+ stride[0] = 1;
+ stride[1] = (hsize_t)dim1;
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[1] = 0;
+
+ dataset_fill(start, block, data_origin1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
+ }
+
+ /* Dataset5: point selection in memory - Hyperslab selection in file*/
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset5);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space (dataset5);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset5, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset5 succeeded");
+
+
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if(data_array1) free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* Dataset6: point selection in File - Point selection in Memory*/
+ /* create a file dataspace independently */
+ start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, IN_ORDER);
+ file_dataspace = H5Dget_space (dataset6);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ start[0] = 0;
+ start[1] = 0;
+ point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
+ mem_dataspace = H5Dget_space (dataset6);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset6, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset6 succeeded");
+
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ if(ret) nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ if(data_array1) free(data_array1);
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+
+ /* Dataset7: point selection in memory - All selection in file*/
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset7);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_all(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all succeeded");
+
+ num_points = (size_t)dim0 * (size_t)dim1;
+ k=0;
+ for (i=0 ; i<dim0; i++) {
+ for (j=0 ; j<dim1; j++) {
+ coords[k++] = (hsize_t)i;
+ coords[k++] = (hsize_t)j;
+ }
+ }
+ mem_dataspace = H5Dget_space (dataset7);
+ VRFY((mem_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
+ VRFY((ret >= 0), "H5Sselect_elements succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+ /* read data collectively */
+ ret = H5Dread(dataset7, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread dataset7 succeeded");
+
+ start[0] = (hsize_t)dim0/(hsize_t)mpi_size * (hsize_t)mpi_rank;
+ start[1] = 0;
+ ret = dataset_vrfy(start, count, stride, block, data_array1+(dim0/mpi_size * dim1 * mpi_rank), data_origin1);
+ if(ret) nerrors++;
+
+ /* release all temporary handles. */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+ /*
+ * All reads completed. Close datasets collectively
+ */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+ ret = H5Dclose(dataset5);
+ VRFY((ret >= 0), "H5Dclose5 succeeded");
+ ret = H5Dclose(dataset6);
+ VRFY((ret >= 0), "H5Dclose6 succeeded");
+ ret = H5Dclose(dataset7);
+ VRFY((ret >= 0), "H5Dclose7 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(coords) HDfree(coords);
+ if(data_array1) HDfree(data_array1);
+ if(data_origin1) HDfree(data_origin1);
+}
+
+
+/*
+ * Part 2--Independent read/write for extendible datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two extendible
+ * datasets in one HDF5 file with independent parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+extend_writeInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ hsize_t max_dims[MAX_RANK] =
+ {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK]; /* for hyperslab setting */
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+/* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+{
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts=4;
+ ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+}
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if(VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ /* start out with no rows, extend it later. */
+ dims[0] = dims[1] = 0;
+ sid = H5Screate_simple (MAX_RANK, dims, max_dims);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+
+
+ /* -------------------------
+ * Test writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Extend its current dim sizes before writing */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset1, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+
+
+ /* -------------------------
+ * Test writing to dataset2
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Try write to dataset2 beyond its current dim sizes. Should fail. */
+ /* Temporary turn off auto error reporting */
+ H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently. Should fail. */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret < 0), "H5Dwrite failed as expected");
+
+ /* restore auto error reporting */
+ H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
+ H5Sclose(file_dataspace);
+
+ /* Extend dataset2 and try again. Should succeed. */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset2, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(data_array1) HDfree(data_array1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to create an extendable dataset
+ * and perform I/O on it in a way that verifies that the chunk cache is
+ * bypassed for parallel I/O.
+ */
+
+void
+extend_writeInd2(void)
+{
+ const char *filename;
+ hid_t fid; /* HDF5 file ID */
+ hid_t fapl_id; /* File access templates */
+ hid_t fs; /* File dataspace ID */
+ hid_t ms; /* Memory dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ hsize_t orig_size=10; /* Original dataset dim size */
+ hsize_t new_size=20; /* Extended dataset dim size */
+ hsize_t one=1;
+ hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
+ hsize_t chunk_size = 16384; /* chunk size */
+ hid_t dcpl; /* dataset create prop. list */
+ int written[10], /* Data to write */
+ retrieved[10]; /* Data read in */
+ int mpi_size, mpi_rank; /* MPI settings */
+ int i; /* Local index variable */
+ herr_t ret; /* Generic return value */
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Extend independent write test #2 on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ fapl_id = create_faccess_plist(test_comm, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(fapl_id);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dcpl, 1, &chunk_size);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ fs = H5Screate_simple (1, &orig_size, &max_size);
+ VRFY((fs >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, fs, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreat2e succeeded");
+
+ /* release resource */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /* -------------------------
+ * Test writing to dataset
+ * -------------------------*/
+ /* create a memory dataspace independently */
+ ms = H5Screate_simple(1, &orig_size, &max_size);
+ VRFY((ms >= 0), "H5Screate_simple succeeded");
+
+ /* put some trivial data in the data_array */
+ for(i = 0; i < (int)orig_size; i++)
+ written[i] = i;
+ MESG("data array initialized");
+ if(VERBOSE_MED) {
+ MESG("writing at offset zero: ");
+ for(i = 0; i < (int)orig_size; i++)
+ HDprintf("%s%d", i?", ":"", written[i]);
+ HDprintf("\n");
+ }
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* -------------------------
+ * Read initial data from dataset.
+ * -------------------------*/
+ ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
+ VRFY((ret >= 0), "H5Dread succeeded");
+ for (i=0; i<(int)orig_size; i++)
+ if(written[i]!=retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
+ i,written[i], i,retrieved[i]);
+ nerrors++;
+ }
+ if(VERBOSE_MED){
+ MESG("read at offset zero: ");
+ for (i=0; i<(int)orig_size; i++)
+ HDprintf("%s%d", i?", ":"", retrieved[i]);
+ HDprintf("\n");
+ }
+
+ /* -------------------------
+ * Extend the dataset & retrieve new dataspace
+ * -------------------------*/
+ ret = H5Dset_extent(dataset, &new_size);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+ ret = H5Sclose(fs);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ fs = H5Dget_space(dataset);
+ VRFY((fs >= 0), "H5Dget_space succeeded");
+
+ /* -------------------------
+ * Write to the second half of the dataset
+ * -------------------------*/
+ for (i=0; i<(int)orig_size; i++)
+ H5_CHECKED_ASSIGN(written[i], int, orig_size + (hsize_t)i, hsize_t);
+ MESG("data array re-initialized");
+ if(VERBOSE_MED) {
+ MESG("writing at offset 10: ");
+ for (i=0; i<(int)orig_size; i++)
+ HDprintf("%s%d", i?", ":"", written[i]);
+ HDprintf("\n");
+ }
+ ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
+ VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* -------------------------
+ * Read the new data
+ * -------------------------*/
+ ret = H5Dread(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, retrieved);
+ VRFY((ret >= 0), "H5Dread succeeded");
+ for (i=0; i<(int)orig_size; i++)
+ if(written[i]!=retrieved[i]) {
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
+ i,written[i], i,retrieved[i]);
+ nerrors++;
+ }
+ if(VERBOSE_MED){
+ MESG("read at offset 10: ");
+ for (i=0; i<(int)orig_size; i++)
+ HDprintf("%s%d", i?", ":"", retrieved[i]);
+ HDprintf("\n");
+ }
+
+
+ /* Close dataset collectively */
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+
+ /* Close the file collectively */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+}
+
+/* Example of using the parallel HDF5 library to read an extendible dataset */
+void
+extend_readInd(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ const char *filename;
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+ /* Try extend dataset1 which is open RDONLY. Should fail. */
+ /* first turn off auto error reporting */
+ H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
+ VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
+ dims[0]++;
+ ret = H5Dset_extent(dataset1, dims);
+ VRFY((ret < 0), "H5Dset_extent failed as expected");
+
+ /* restore auto error reporting */
+ H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
+ H5Sclose(file_dataspace);
+
+
+ /* Read dataset1 using BYROW pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset1 read verified correct");
+ if(ret) nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+
+
+ /* Read dataset2 using BYCOL pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset2);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset2 read verified correct");
+ if(ret) nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(data_array1) HDfree(data_array1);
+ if(data_array2) HDfree(data_array2);
+ if(data_origin1) HDfree(data_origin1);
+}
+
+/*
+ * Part 3--Collective read/write for extendible datasets.
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create two extendible
+ * datasets in one HDF5 file with collective parallel MPIO access support.
+ * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within each
+ * dataset.
+ */
+
+void
+extend_writeAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ hsize_t max_dims[MAX_RANK] =
+ {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK]; /* for hyperslab setting */
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+/* Reduce the number of metadata cache slots, so that there are cache
+ * collisions during the raw data I/O on the chunked dataset. This stresses
+ * the metadata cache and tests for cache bugs. -QAK
+ */
+{
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+
+ ret = H5Pget_cache(acc_tpl,&mdc_nelmts,&rdcc_nelmts,&rdcc_nbytes,&rdcc_w0);
+ VRFY((ret >= 0), "H5Pget_cache succeeded");
+ mdc_nelmts=4;
+ ret = H5Pset_cache(acc_tpl,mdc_nelmts,rdcc_nelmts,rdcc_nbytes,rdcc_w0);
+ VRFY((ret >= 0), "H5Pset_cache succeeded");
+}
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if(VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ /* start out with no rows, extend it later. */
+ dims[0] = dims[1] = 0;
+ sid = H5Screate_simple (MAX_RANK, dims, max_dims);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+
+
+ /* -------------------------
+ * Test writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED) {
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Extend its current dim sizes before writing */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset1, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ H5Sclose(file_dataspace);
+ H5Sclose(mem_dataspace);
+ H5Pclose(xfer_plist);
+
+
+ /* -------------------------
+ * Test writing to dataset2
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* put some trivial data in the data_array */
+ dataset_fill(start, block, data_array1);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* Try write to dataset2 beyond its current dim sizes. Should fail. */
+ /* Temporary turn off auto error reporting */
+ H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently. Should fail. */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret < 0), "H5Dwrite failed as expected");
+
+ /* restore auto error reporting */
+ H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
+ H5Sclose(file_dataspace);
+
+ /* Extend dataset2 and try again. Should succeed. */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ ret = H5Dset_extent(dataset2, dims);
+ VRFY((ret >= 0), "H5Dset_extent succeeded");
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* write data independently */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(data_array1) HDfree(data_array1);
+}
+
+/* Example of using the parallel HDF5 library to read an extendible dataset */
+void
+extend_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK], stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Extend independent read test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* allocate memory for data buffer */
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
+ VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
+ VRFY((fid >= 0), "");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* open the dataset1 collectively */
+ dataset1 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "");
+
+ /* open another dataset collectively */
+ dataset2 = H5Dopen2(fid, DATASETNAME1, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "");
+
+ /* Try extend dataset1 which is open RDONLY. Should fail. */
+ /* first turn off auto error reporting */
+ H5Eget_auto2(H5E_DEFAULT, &old_func, &old_client_data);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sget_simple_extent_dims(file_dataspace, dims, NULL);
+ VRFY((ret > 0), "H5Sget_simple_extent_dims succeeded");
+ dims[0]++;
+ ret = H5Dset_extent(dataset1, dims);
+ VRFY((ret < 0), "H5Dset_extent failed as expected");
+
+ /* restore auto error reporting */
+ H5Eset_auto2(H5E_DEFAULT, old_func, old_client_data);
+ H5Sclose(file_dataspace);
+
+
+ /* Read dataset1 using BYROW pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* read data collectively */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset1 read verified correct");
+ if(ret) nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+ H5Pclose(xfer_plist);
+
+
+ /* Read dataset2 using BYCOL pattern */
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset2);
+ VRFY((file_dataspace >= 0), "");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* fill dataset with test data */
+ dataset_fill(start, block, data_origin1);
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* read data collectively */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_array1);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* verify the read data with original expected data */
+ ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
+ VRFY((ret == 0), "dataset2 read verified correct");
+ if(ret) nerrors++;
+
+ H5Sclose(mem_dataspace);
+ H5Sclose(file_dataspace);
+ H5Pclose(xfer_plist);
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "");
+
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(data_array1) HDfree(data_array1);
+ if(data_array2) HDfree(data_array2);
+ if(data_origin1) HDfree(data_origin1);
+}
+
+/*
+ * Example of using the parallel HDF5 library to read a compressed
+ * dataset in an HDF5 file with collective parallel access support.
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
+void
+compress_readAll(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ int rank=1; /* Dataspace rank */
+ hsize_t dim=(hsize_t)dim0; /* Dataspace dimensions */
+ unsigned u; /* Local index variable */
+ unsigned chunk_opts; /* Chunk options */
+ unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
+ DATATYPE *data_read = NULL; /* data buffer */
+ DATATYPE *data_orig = NULL; /* expected data buffer */
+ const char *filename;
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+ int mpi_size, mpi_rank;
+ herr_t ret; /* Generic return value */
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Collective chunked dataset read test on file %s\n", filename);
+
+ /* Retrieve MPI parameters */
+ MPI_Comm_size(comm,&mpi_size);
+ MPI_Comm_rank(comm,&mpi_rank);
+
+ /* Allocate data buffer */
+ data_orig = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ VRFY((data_orig != NULL), "data_origin1 HDmalloc succeeded");
+ data_read = (DATATYPE *)HDmalloc((size_t)dim*sizeof(DATATYPE));
+ VRFY((data_read != NULL), "data_array1 HDmalloc succeeded");
+
+ /* Initialize data buffers */
+ for(u=0; u<dim;u++)
+ data_orig[u]=(DATATYPE)u;
+
+ /* Run test both with and without filters disabled on partial chunks */
+ for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
+ disable_partial_chunk_filters++) {
+ /* Process zero creates the file with a compressed, chunked dataset */
+ if(mpi_rank==0) {
+ hsize_t chunk_dim; /* Chunk dimensions */
+
+ /* Create the file */
+ fid = H5Fcreate(h5_rmprefix(filename), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+
+ /* Create property list for chunking and compression */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ VRFY((ret >= 0), "H5Pset_layout succeeded");
+
+ /* Use eight chunks */
+ chunk_dim = dim / 8;
+ ret = H5Pset_chunk(dcpl, rank, &chunk_dim);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* Set chunk options appropriately */
+ if(disable_partial_chunk_filters) {
+ ret = H5Pget_chunk_opts(dcpl, &chunk_opts);
+ VRFY((ret>=0),"H5Pget_chunk_opts succeeded");
+
+ chunk_opts |= H5D_CHUNK_DONT_FILTER_PARTIAL_CHUNKS;
+
+ ret = H5Pset_chunk_opts(dcpl, chunk_opts);
+ VRFY((ret>=0),"H5Pset_chunk_opts succeeded");
+ } /* end if */
+
+ ret = H5Pset_deflate(dcpl, 9);
+ VRFY((ret >= 0), "H5Pset_deflate succeeded");
+
+ /* Create dataspace */
+ dataspace = H5Screate_simple(rank, &dim, NULL);
+ VRFY((dataspace > 0), "H5Screate_simple succeeded");
+
+ /* Create dataset */
+ dataset = H5Dcreate2(fid, "compressed_data", H5T_NATIVE_INT, dataspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dcreate2 succeeded");
+
+ /* Write compressed data */
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data_orig);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* Close objects */
+ ret = H5Pclose(dcpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Sclose(dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ /* Wait for file to be created */
+ MPI_Barrier(comm);
+
+ /* -------------------
+ * OPEN AN HDF5 FILE
+ * -------------------*/
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ VRFY((fid > 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /* Open dataset with compressed chunks */
+ dataset = H5Dopen2(fid, "compressed_data", H5P_DEFAULT);
+ VRFY((dataset > 0), "H5Dopen2 succeeded");
+
+ /* Try reading & writing data */
+ if(dataset>0) {
+ /* Create dataset transfer property list */
+ xfer_plist = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((xfer_plist > 0), "H5Pcreate succeeded");
+
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
+ }
+
+
+ /* Try reading the data */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+ /* Verify data read */
+ for(u=0; u<dim; u++)
+ if(data_orig[u]!=data_read[u]) {
+ HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
+ (unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
+ nerrors++;
+ }
+
+#if MPI_VERSION >= 3
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+#endif
+
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Dclose(dataset);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ } /* end if */
+
+ /* Close file */
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ } /* end for */
+
+ /* release data buffers */
+ if(data_read) HDfree(data_read);
+ if(data_orig) HDfree(data_orig);
+}
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+/*
+ * Part 4--Non-selection for chunked dataset
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create chunked
+ * dataset in one HDF5 file with collective and independent parallel
+ * MPIO access support. The Datasets are of sizes dim0 x dim1.
+ * Each process controls only a slab of size dim0 x dim1 within the
+ * dataset with the exception that one processor selects no element.
+ */
+
+void
+none_selection_chunk(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ const char *filename;
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ DATATYPE *data_origin = NULL; /* data buffer */
+ DATATYPE *data_array = NULL; /* data buffer */
+ hsize_t chunk_dims[MAX_RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[MAX_RANK]; /* for hyperslab setting */
+ hsize_t count[MAX_RANK]; /* for hyperslab setting */
+ hsize_t stride[MAX_RANK]; /* for hyperslab setting */
+ hsize_t block[MAX_RANK]; /* for hyperslab setting */
+ hsize_t mstart[MAX_RANK]; /* for data buffer in memory */
+
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ filename = GetTestParameters();
+ if(VERBOSE_MED)
+ HDprintf("Extend independent write test on file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ /* setup chunk-size. Make sure sizes are > 0 */
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
+
+ /* -------------------
+ * START AN HDF5 FILE
+ * -------------------*/
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* --------------------------------------------------------------
+ * Define the dimensions of the overall datasets and create them.
+ * ------------------------------------------------------------- */
+
+ /* set up dataset storage chunk sizes and creation property list */
+ if(VERBOSE_MED)
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
+ ret = H5Pset_chunk(dataset_pl, MAX_RANK, chunk_dims);
+ VRFY((ret >= 0), "H5Pset_chunk succeeded");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple(MAX_RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create an extendible dataset collectively */
+ dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ /* create another extendible dataset collectively */
+ dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid, H5P_DEFAULT, dataset_pl, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* release resource */
+ H5Sclose(sid);
+ H5Pclose(dataset_pl);
+
+ /* -------------------------
+ * Test collective writing to dataset1
+ * -------------------------*/
+ /* set up dimensions of the slab this process accesses */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ /* allocate memory for data buffer. Only allocate enough buffer for
+ * each processor's data. */
+ if(mpi_rank) {
+ data_origin = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE));
+ VRFY((data_origin != NULL), "data_origin HDmalloc succeeded");
+
+ data_array = (DATATYPE *)HDmalloc(block[0]*block[1]*sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+
+ /* put some trivial data in the data_array */
+ mstart[0] = mstart[1] = 0;
+ dataset_fill(mstart, block, data_origin);
+ MESG("data_array initialized");
+ if(VERBOSE_MED){
+ MESG("data_array created");
+ dataset_print(mstart, block, data_origin);
+ }
+ }
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (MAX_RANK, block, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ /* Process 0 has no selection */
+ if(!mpi_rank) {
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ }
+
+ /* create a file dataspace independently */
+ file_dataspace = H5Dget_space (dataset1);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Process 0 has no selection */
+ if(!mpi_rank) {
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none succeeded");
+ }
+
+ /* set up the collective transfer properties list */
+ xfer_plist = H5Pcreate (H5P_DATASET_XFER);
+ VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_origin);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* read data independently */
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ if(mpi_rank) {
+ ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
+ if(ret) nerrors++;
+ }
+
+ /* -------------------------
+ * Test independent writing to dataset2
+ * -------------------------*/
+ ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* write data collectively */
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ xfer_plist, data_origin);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+
+ /* read data independently */
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ /* verify the read data with original expected data */
+ if(mpi_rank) {
+ ret = dataset_vrfy(mstart, count, stride, block, data_array, data_origin);
+ if(ret) nerrors++;
+ }
+
+ /* release resource */
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Pclose(xfer_plist);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+
+ /* close dataset collectively */
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose1 succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose2 succeeded");
+
+ /* close the file collectively */
+ H5Fclose(fid);
+
+ /* release data buffers */
+ if(data_origin) HDfree(data_origin);
+ if(data_array) HDfree(data_array);
+}
+
+
+/* Function: test_actual_io_mode
+ *
+ * Purpose: tests one specific case of collective I/O and checks that the
+ * actual_chunk_opt_mode property and the actual_io_mode
+ * properties in the DXPL have the correct values.
+ *
+ * Input: selection_mode: changes the way processes select data from the space, as well
+ * as some dxpl flags to get collective I/O to break in different ways.
+ *
+ * The relevant I/O function and expected response for each mode:
+ * TEST_ACTUAL_IO_MULTI_CHUNK_IND:
+ * H5D_mpi_chunk_collective_io, each process reports independent I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_COL:
+ * H5D_mpi_chunk_collective_io, each process reports collective I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
+ * H5D_mpi_chunk_collective_io, each process reports mixed I/O
+ *
+ * TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
+ * H5D_mpi_chunk_collective_io, processes disagree. The root reports
+ * collective, the rest report independent I/O
+ *
+ * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
+ * Set directly go to multi-chunk-io without num threshold calc.
+ * TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL.
+ * Set directly go to multi-chunk-io without num threshold calc.
+ *
+ * TEST_ACTUAL_IO_LINK_CHUNK:
+ * H5D_link_chunk_collective_io, processes report linked chunk I/O
+ *
+ * TEST_ACTUAL_IO_CONTIGUOUS:
+ * H5D__contig_collective_write or H5D__contig_collective_read
+ * each process reports contiguous collective I/O
+ *
+ * TEST_ACTUAL_IO_NO_COLLECTIVE:
+ * Simple independent I/O. This tests that the defaults are properly set.
+ *
+ * TEST_ACTUAL_IO_RESET:
+ * Perfroms collective and then independent I/O wit hthe same dxpl to
+ * make sure the peroperty is correctly reset to the default on each use.
+ * Specifically, this test runs TEST_ACTUAL_IO_MULTI_CHUNK_NO_OPT_MIX_DISAGREE
+ * (The most complex case that works on all builds) and then performs
+ * an independent read and write with the same dxpls.
+ *
+ * Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
+ * is not needed as they are covered by DIRECT_CHUNK_MIX and
+ * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
+ * path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold.
+ *
+ * Modification:
+ * - Refctore to remove multi-chunk-without-opimization test and update for
+ * testing direct to multi-chunk-io
+ * Programmer: Jonathan Kim
+ * Date: 2012-10-10
+ *
+ *
+ * Programmer: Jacob Gruber
+ * Date: 2011-04-06
+ */
+static void
+test_actual_io_mode(int selection_mode) {
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
+ H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_expected = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_write = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_read = -1;
+ H5D_mpio_actual_io_mode_t actual_io_mode_expected = -1;
+ const char * filename;
+ const char * test_name;
+ hbool_t direct_multi_chunk_io;
+ hbool_t multi_chunk_io;
+ hbool_t is_chunked;
+ hbool_t is_collective;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int * buffer;
+ int i;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl_id = -1;
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[MAX_RANK];
+ hsize_t chunk_dims[MAX_RANK];
+ hsize_t start[MAX_RANK];
+ hsize_t stride[MAX_RANK];
+ hsize_t count[MAX_RANK];
+ hsize_t block[MAX_RANK];
+ char message[256];
+ herr_t ret;
+
+ /* Set up some flags to make some future if statements slightly more readable */
+ direct_multi_chunk_io = (
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL );
+
+ /* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
+ * tests independent I/O
+ */
+ multi_chunk_io = (
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_IND ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_COL ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
+ selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE ||
+ selection_mode == TEST_ACTUAL_IO_RESET );
+
+ is_chunked = (
+ selection_mode != TEST_ACTUAL_IO_CONTIGUOUS &&
+ selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
+
+ is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
+
+ /* Set up MPI parameters */
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
+
+ MPI_Barrier(test_comm);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = test_comm;
+ mpi_info = MPI_INFO_NULL;
+
+ filename = (const char *)GetTestParameters();
+ HDassert(filename != NULL);
+
+ /* Setup the file access template */
+ fapl_id = create_faccess_plist(mpi_comm, mpi_info, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist() succeeded");
+
+ /* Create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Create the basic Space */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* Create the dataset creation plist */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation plist created successfully");
+
+ /* If we are not testing contiguous datasets */
+ if(is_chunked) {
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
+ chunk_dims[1] = dims[1];
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0),"chunk creation property list succeeded");
+ }
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(fid, "actual_io", data_type, sid, H5P_DEFAULT,
+ dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
+
+ /* Create the file dataspace */
+ file_space = H5Dget_space(dataset);
+ VRFY((file_space >= 0), "H5Dget_space succeeded");
+
+ /* Choose a selection method based on the type of I/O we want to occur,
+ * and also set up some selection-dependeent test info. */
+ switch(selection_mode) {
+
+ /* Independent I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
+ case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
+ /* Since the dataset is chunked by row and each process selects a row,
+ * each process writes to a different chunk. This forces all I/O to be
+ * independent.
+ */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Multi Chunk - Independent";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ break;
+
+ /* Collective I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_COL:
+ case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
+ /* The dataset is chunked by rows, so each process takes a column which
+ * spans all chunks. Since the processes write non-overlapping regular
+ * selections to each chunk, the operation is purely collective.
+ */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+
+ test_name = "Multi Chunk - Collective";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ if(mpi_size > 1)
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+ break;
+
+ /* Mixed I/O with optimization */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
+ /* A chunk will be assigned collective I/O only if it is selected by each
+ * process. To get mixed I/O, have the root select all chunks and each
+ * subsequent process select the first and nth chunk. The first chunk,
+ * accessed by all, will be assigned collective I/O while each other chunk
+ * will be accessed only by the root and the nth procecess and will be
+ * assigned independent I/O. Each process will access one chunk collectively
+ * and at least one chunk independently, reporting mixed I/O.
+ */
+
+ if(mpi_rank == 0) {
+ /* Select the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ } else {
+ /* Select the first and the nth chunk in the nth column */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
+ stride[0] = (hsize_t)mpi_rank * block[0];
+ stride[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank*block[1];
+ }
+
+ test_name = "Multi Chunk - Mixed";
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ break;
+
+ /* RESET tests that the properties are properly reset to defaults each time I/O is
+ * performed. To acheive this, we have RESET perform collective I/O (which would change
+ * the values from the defaults) followed by independent I/O (which should report the
+ * default values). RESET doesn't need to have a unique selection, so we reuse
+ * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
+ * on all builds. The independent section of RESET can be found at the end of this function.
+ */
+ case TEST_ACTUAL_IO_RESET:
+
+ /* Mixed I/O with optimization and internal disagreement */
+ case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
+ /* A chunk will be assigned collective I/O only if it is selected by each
+ * process. To get mixed I/O with disagreement, assign process n to the
+ * first chunk and the nth chunk. The first chunk, selected by all, is
+ * assgigned collective I/O, while each other process gets independent I/O.
+ * Since the root process with only access the first chunk, it will report
+ * collective I/O. The subsequent processes will access the first chunk
+ * collectively, and their other chunk indpendently, reporting mixed I/O.
+ */
+
+ if(mpi_rank == 0) {
+ /* Select the first chunk in the first column */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
+ block[0] = block[0] / (hsize_t)mpi_size;
+ } else {
+ /* Select the first and the nth chunk in the nth column */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ count[0] = 2;
+ count[1] = 1;
+ stride[0] = (hsize_t)mpi_rank * block[0];
+ stride[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank*block[1];
+ }
+
+ /* If the testname was not already set by the RESET case */
+ if (selection_mode == TEST_ACTUAL_IO_RESET)
+ test_name = "RESET";
+ else
+ test_name = "Multi Chunk - Mixed (Disagreement)";
+
+ actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
+ if(mpi_size > 1) {
+ if(mpi_rank == 0)
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
+ }
+ else
+ actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
+
+ break;
+
+ /* Linked Chunk I/O */
+ case TEST_ACTUAL_IO_LINK_CHUNK:
+ /* Nothing special; link chunk I/O is forced in the dxpl settings. */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Link Chunk";
+ actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
+ actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
+ break;
+
+ /* Contiguous Dataset */
+ case TEST_ACTUAL_IO_CONTIGUOUS:
+ /* A non overlapping, regular selection in a contiguous dataset leads to
+ * collective I/O */
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Contiguous";
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
+ break;
+
+ case TEST_ACTUAL_IO_NO_COLLECTIVE:
+ slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
+
+ test_name = "Independent";
+ actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
+ actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
+ break;
+
+ default:
+ test_name = "Undefined Selection Mode";
+ actual_chunk_opt_mode_expected = -1;
+ actual_io_mode_expected = -1;
+ break;
+ }
+
+ ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Create a memory dataspace mirroring the dataset and select the same hyperslab
+ * as in the file space.
+ */
+ mem_space = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((mem_space >= 0), "mem_space created");
+
+ ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* Get the number of elements in the selection */
+ length = dim0 * dim1;
+
+ /* Allocate and initialize the buffer */
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
+ buffer[i] = i;
+
+ /* Set up the dxpl for the write */
+ dxpl_write = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ /* Set collective I/O properties in the dxpl. */
+ if(is_collective) {
+ /* Request collective I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Set the threshold number of processes per chunk to twice mpi_size.
+ * This will prevent the threshold from ever being met, thus forcing
+ * multi chunk io instead of link chunk io.
+ * This is via deault.
+ */
+ if(multi_chunk_io) {
+ /* force multi-chunk-io by threshold */
+ ret = H5Pset_dxpl_mpio_chunk_opt_num(dxpl_write, (unsigned) mpi_size*2);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_num succeeded");
+
+ /* set this to manipulate testing senario about allocating processes
+ * to chunks */
+ ret = H5Pset_dxpl_mpio_chunk_opt_ratio(dxpl_write, (unsigned) 99);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio_chunk_opt_ratio succeeded");
+ }
+
+ /* Set directly go to multi-chunk-io without threshold calc. */
+ if(direct_multi_chunk_io) {
+ /* set for multi chunk io by property*/
+ ret = H5Pset_dxpl_mpio_chunk_opt(dxpl_write, H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+ }
+
+ /* Make a copy of the dxpl to test the read operation */
+ dxpl_read = H5Pcopy(dxpl_write);
+ VRFY((dxpl_read >= 0), "H5Pcopy succeeded");
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
+ if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Retreive Actual io valuess */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
+ VRFY((ret >= 0), "retriving actual io mode suceeded" );
+
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
+ if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
+
+ /* Retreive Actual io values */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
+ VRFY((ret >= 0), "retriving actual io mode succeeded" );
+
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
+ VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
+
+ /* Check write vs read */
+ VRFY((actual_io_mode_read == actual_io_mode_write),
+ "reading and writing are the same for actual_io_mode");
+ VRFY((actual_chunk_opt_mode_read == actual_chunk_opt_mode_write),
+ "reading and writing are the same for actual_chunk_opt_mode");
+
+ /* Test values */
+ if(actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t) -1 && actual_io_mode_expected != (H5D_mpio_actual_io_mode_t) -1) {
+ HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
+ VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
+ HDsprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
+ VRFY((actual_io_mode_write == actual_io_mode_expected), message);
+ } else {
+ HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank,
+ actual_chunk_opt_mode_write, actual_io_mode_write);
+ }
+
+ /* To test that the property is succesfully reset to the default, we perform some
+ * independent I/O after the collective I/O
+ */
+ if (selection_mode == TEST_ACTUAL_IO_RESET) {
+ if (mpi_rank == 0) {
+ /* Switch to independent io */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ ret = H5Pset_dxpl_mpio(dxpl_read, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_write, buffer);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Check Properties */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_write, &actual_io_mode_write);
+ VRFY( (ret >= 0), "retriving actual io mode succeeded" );
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
+ VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
+
+ VRFY(actual_chunk_opt_mode_write == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
+ "actual_chunk_opt_mode has correct value for reset write (independent)");
+ VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
+ "actual_io_mode has correct value for reset write (independent)");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+ /* Check Properties */
+ ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
+ VRFY( (ret >= 0), "retriving actual io mode succeeded" );
+ ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
+ VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
+
+ VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
+ "actual_chunk_opt_mode has correct value for reset read (independent)");
+ VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
+ "actual_io_mode has correct value for reset read (independent)");
+ }
+ }
+
+ /* Release some resources */
+ ret = H5Sclose(sid);
+ ret = H5Pclose(fapl_id);
+ ret = H5Pclose(dcpl);
+ ret = H5Pclose(dxpl_write);
+ ret = H5Pclose(dxpl_read);
+ ret = H5Dclose(dataset);
+ ret = H5Sclose(mem_space);
+ ret = H5Sclose(file_space);
+ ret = H5Fclose(fid);
+ HDfree(buffer);
+ return;
+}
+
+
+/* Function: actual_io_mode_tests
+ *
+ * Purpose: Tests all possible cases of the actual_io_mode property.
+ *
+ * Programmer: Jacob Gruber
+ * Date: 2011-04-06
+ */
+void
+actual_io_mode_tests(void) {
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_size(test_comm, &mpi_rank);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
+
+ /*
+ * Test multi-chunk-io via proc_num threshold
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_COL);
+
+ /* The Multi Chunk Mixed test requires atleast three processes. */
+ if (mpi_size > 2)
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
+ else
+ HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n");
+
+ test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
+
+ /*
+ * Test multi-chunk-io via setting direct property
+ */
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
+ test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
+ test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
+
+ test_actual_io_mode(TEST_ACTUAL_IO_RESET);
+ return;
+}
+
+/*
+ * Function: test_no_collective_cause_mode
+ *
+ * Purpose:
+ * tests cases for broken collective I/O and checks that the
+ * H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values.
+ *
+ * Input:
+ * selection_mode: various mode to cause broken collective I/O
+ * Note: Originally, each TEST case is supposed to be used alone.
+ * After some discussion, this is updated to take multiple TEST cases
+ * with '|'. However there is no error check for any of combined
+ * test cases, so a tester is responsible to understand and feed
+ * proper combination of TESTs if needed.
+ *
+ *
+ * TEST_COLLECTIVE:
+ * Test for regular collective I/O without cause of breaking.
+ * Just to test normal behavior.
+ *
+ * TEST_SET_INDEPENDENT:
+ * Test for Independent I/O as the cause of breaking collective I/O.
+ *
+ * TEST_DATATYPE_CONVERSION:
+ * Test for Data Type Conversion as the cause of breaking collective I/O.
+ *
+ * TEST_DATA_TRANSFORMS:
+ * Test for Data Transfrom feature as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
+ * Test for NULL dataspace as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT:
+ * Test for Compact layout as the cause of breaking collective I/O.
+ *
+ * TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL:
+ * Test for Externl-File storage as the cause of breaking collective I/O.
+ *
+ * TEST_FILTERS:
+ * Test for using filter (checksum) as the cause of breaking collective I/O.
+ * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead.
+ *
+ *
+ * Programmer: Jonathan Kim
+ * Date: Aug, 2012
+ */
+#define FILE_EXTERNAL "nocolcause_extern.data"
+static void
+test_no_collective_cause_mode(int selection_mode)
+{
+ uint32_t no_collective_cause_local_write = 0;
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_write = 0;
+ uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_global_expected = 0;
+ // hsize_t coord[NELM][MAX_RANK];
+
+ const char * filename;
+ const char * test_name;
+ hbool_t is_chunked=1;
+ hbool_t is_independent=0;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int * buffer;
+ int i;
+ MPI_Comm mpi_comm;
+ MPI_Info mpi_info;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl_id = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl_write = -1;
+ hid_t dxpl_read = -1;
+ hsize_t dims[MAX_RANK];
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hsize_t chunk_dims[MAX_RANK];
+ herr_t ret;
+#ifdef LATER /* fletcher32 */
+ H5Z_filter_t filter_info;
+#endif /* LATER */
+ /* set to global value as default */
+ int l_facc_type = facc_type;
+ char message[256];
+
+ /* Set up MPI parameters */
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
+
+ MPI_Barrier(test_comm);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = test_comm;
+ mpi_info = MPI_INFO_NULL;
+
+ /* Create the dataset creation plist */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation plist created successfully");
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
+ ret = H5Pset_layout (dcpl, H5D_COMPACT);
+ VRFY((ret >= 0),"set COMPACT layout succeeded");
+ is_chunked = 0;
+ }
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ ret = H5Pset_external (dcpl, FILE_EXTERNAL, (off_t) 0, H5F_UNLIMITED);
+ VRFY((ret >= 0),"set EXTERNAL file layout succeeded");
+ is_chunked = 0;
+ }
+
+#ifdef LATER /* fletcher32 */
+ if (selection_mode & TEST_FILTERS) {
+ ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
+ VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
+
+ ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, &filter_info);
+ VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
+
+ ret = H5Pset_fletcher32(dcpl);
+ VRFY((ret >= 0),"set filter (flecher32) succeeded");
+ }
+#endif /* LATER */
+
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
+ sid = H5Screate(H5S_NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ is_chunked = 0;
+ }
+ else {
+ /* Create the basic Space */
+ /* if this is a compact dataset, create a small dataspace that does not exceed 64K */
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT) {
+ dims[0] = BIG_X_FACTOR * 6;
+ dims[1] = BIG_Y_FACTOR * 6;
+ }
+ else {
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ }
+ sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+ }
+
+
+ filename = (const char *)GetTestParameters();
+ HDassert(filename != NULL);
+
+ /* Setup the file access template */
+ fapl_id = create_faccess_plist(mpi_comm, mpi_info, l_facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist() succeeded");
+
+ /* Create the file */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* If we are not testing contiguous datasets */
+ if(is_chunked) {
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
+ chunk_dims[1] = dims[1];
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0),"chunk creation property list succeeded");
+ }
+
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
+
+
+ /*
+ * Set expected causes and some tweaks based on the type of test
+ */
+ if (selection_mode & TEST_DATATYPE_CONVERSION) {
+ test_name = "Broken Collective I/O - Datatype Conversion";
+ no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION;
+ no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION;
+ /* set different sign to trigger type conversion */
+ data_type = H5T_NATIVE_UINT;
+ }
+
+ if (selection_mode & TEST_DATA_TRANSFORMS) {
+ test_name = "Broken Collective I/O - DATA Transfroms";
+ no_collective_cause_local_expected |= H5D_MPIO_DATA_TRANSFORMS;
+ no_collective_cause_global_expected |= H5D_MPIO_DATA_TRANSFORMS;
+ }
+
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES) {
+ test_name = "Broken Collective I/O - No Simple or Scalar DataSpace";
+ no_collective_cause_local_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
+ no_collective_cause_global_expected |= H5D_MPIO_NOT_SIMPLE_OR_SCALAR_DATASPACES;
+ }
+
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT ||
+ selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ test_name = "Broken Collective I/O - No CONTI or CHUNKED Dataset";
+ no_collective_cause_local_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
+ no_collective_cause_global_expected |= H5D_MPIO_NOT_CONTIGUOUS_OR_CHUNKED_DATASET;
+ }
+
+#ifdef LATER /* fletcher32 */
+ if (selection_mode & TEST_FILTERS) {
+ test_name = "Broken Collective I/O - Filter is required";
+ no_collective_cause_local_expected |= H5D_MPIO_FILTERS;
+ no_collective_cause_global_expected |= H5D_MPIO_FILTERS;
+ }
+#endif /* LATER */
+
+ if (selection_mode & TEST_COLLECTIVE) {
+ test_name = "Broken Collective I/O - Not Broken";
+ no_collective_cause_local_expected = H5D_MPIO_COLLECTIVE;
+ no_collective_cause_global_expected = H5D_MPIO_COLLECTIVE;
+ }
+
+ if (selection_mode & TEST_SET_INDEPENDENT) {
+ test_name = "Broken Collective I/O - Independent";
+ no_collective_cause_local_expected = H5D_MPIO_SET_INDEPENDENT;
+ no_collective_cause_global_expected = H5D_MPIO_SET_INDEPENDENT;
+ /* switch to independent io */
+ is_independent = 1;
+ }
+
+ /* use all spaces for certain tests */
+ if (selection_mode & TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES ||
+ selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL) {
+ file_space = H5S_ALL;
+ mem_space = H5S_ALL;
+ }
+ else {
+ /* Get the file dataspace */
+ file_space = H5Dget_space(dataset);
+ VRFY((file_space >= 0), "H5Dget_space succeeded");
+
+ /* Create the memory dataspace */
+ mem_space = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((mem_space >= 0), "mem_space created");
+ }
+
+ /* Get the number of elements in the selection */
+ H5_CHECKED_ASSIGN(length, int, dims[0] * dims[1], hsize_t);
+
+ /* Allocate and initialize the buffer */
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
+ buffer[i] = i;
+
+ /* Set up the dxpl for the write */
+ dxpl_write = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ if(is_independent) {
+ /* Set Independent I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+ else {
+ /* Set Collective I/O */
+ ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ }
+
+ if (selection_mode & TEST_DATA_TRANSFORMS) {
+ ret = H5Pset_data_transform (dxpl_write, "x+1");
+ VRFY((ret >= 0), "H5Pset_data_transform succeeded");
+ }
+
+ /*---------------------
+ * Test Write access
+ *---------------------*/
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
+ if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+
+ /* Get the cause of broken collective I/O */
+ ret = H5Pget_mpio_no_collective_cause (dxpl_write, &no_collective_cause_local_write, &no_collective_cause_global_write);
+ VRFY((ret >= 0), "retriving no collective cause succeeded" );
+
+
+ /*---------------------
+ * Test Read access
+ *---------------------*/
+
+ /* Make a copy of the dxpl to test the read operation */
+ dxpl_read = H5Pcopy(dxpl_write);
+ VRFY((dxpl_read >= 0), "H5Pcopy succeeded");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
+
+ if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
+
+ /* Get the cause of broken collective I/O */
+ ret = H5Pget_mpio_no_collective_cause (dxpl_read, &no_collective_cause_local_read, &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retriving no collective cause succeeded" );
+
+ /* Check write vs read */
+ VRFY((no_collective_cause_local_read == no_collective_cause_local_write),
+ "reading and writing are the same for local cause of Broken Collective I/O");
+ VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
+ "reading and writing are the same for global cause of Broken Collective I/O");
+
+ /* Test values */
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
+
+ /* Release some resources */
+ if (sid)
+ H5Sclose(sid);
+ if (fapl_id)
+ H5Pclose(fapl_id);
+ if (dcpl)
+ H5Pclose(dcpl);
+ if (dxpl_write)
+ H5Pclose(dxpl_write);
+ if (dxpl_read)
+ H5Pclose(dxpl_read);
+ if (dataset)
+ H5Dclose(dataset);
+ if (mem_space)
+ H5Sclose(mem_space);
+ if (file_space)
+ H5Sclose(file_space);
+ if (fid)
+ H5Fclose(fid);
+ HDfree(buffer);
+
+ /* clean up external file */
+ if (selection_mode & TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL)
+ HDremove(FILE_EXTERNAL);
+
+ return;
+}
+
+
+#if 0
+/*
+ * Function: test_no_collective_cause_mode_filter
+ *
+ * Purpose:
+ * Test specific for using filter as a caus of broken collective I/O and
+ * checks that the H5Pget_mpio_no_collective_cause properties in the DXPL
+ * have the correct values.
+ *
+ * NOTE:
+ * This is a temporary function.
+ * test_no_collective_cause_mode(TEST_FILTERS) will replace this when
+ * H5Dcreate and H5write support for mpio and filter feature.
+ *
+ * Input:
+ * TEST_FILTERS_READ:
+ * Test for using filter (checksum) as the cause of breaking collective I/O.
+ *
+ * Programmer: Jonathan Kim
+ * Date: Aug, 2012
+ */
+static void
+test_no_collective_cause_mode_filter(int selection_mode)
+{
+ uint32_t no_collective_cause_local_read = 0;
+ uint32_t no_collective_cause_local_expected = 0;
+ uint32_t no_collective_cause_global_read = 0;
+ uint32_t no_collective_cause_global_expected = 0;
+
+ const char * filename;
+ const char * test_name;
+ hbool_t is_chunked=1;
+ int mpi_size = -1;
+ int mpi_rank = -1;
+ int length;
+ int * buffer;
+ int i;
+ MPI_Comm mpi_comm = MPI_COMM_NULL;
+ MPI_Info mpi_info = MPI_INFO_NULL;
+ hid_t fid = -1;
+ hid_t sid = -1;
+ hid_t dataset = -1;
+ hid_t data_type = H5T_NATIVE_INT;
+ hid_t fapl_write = -1;
+ hid_t fapl_read = -1;
+ hid_t dcpl = -1;
+ hid_t dxpl = -1;
+ hsize_t dims[MAX_RANK];
+ hid_t mem_space = -1;
+ hid_t file_space = -1;
+ hsize_t chunk_dims[MAX_RANK];
+ herr_t ret;
+#ifdef LATER /* fletcher32 */
+ H5Z_filter_t filter_info;
+#endif /* LATER */
+ char message[256];
+
+ /* Set up MPI parameters */
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
+
+ MPI_Barrier(test_comm);
+
+ HDassert(mpi_size >= 1);
+
+ mpi_comm = test_comm;
+ mpi_info = MPI_INFO_NULL;
+
+ /* Create the dataset creation plist */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl >= 0), "dataset creation plist created successfully");
+
+ if (selection_mode == TEST_FILTERS_READ ) {
+#ifdef LATER /* fletcher32 */
+ ret = H5Zfilter_avail(H5Z_FILTER_FLETCHER32);
+ VRFY ((ret >=0 ), "Fletcher32 filter is available.\n");
+
+ ret = H5Zget_filter_info (H5Z_FILTER_FLETCHER32, (unsigned int *) &filter_info);
+ VRFY ( ( (filter_info & H5Z_FILTER_CONFIG_ENCODE_ENABLED) || (filter_info & H5Z_FILTER_CONFIG_DECODE_ENABLED) ) , "Fletcher32 filter encoding and decoding available.\n");
+
+ ret = H5Pset_fletcher32(dcpl);
+ VRFY((ret >= 0),"set filter (flecher32) succeeded");
+#endif /* LATER */
+ }
+ else {
+ VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
+ }
+
+ /* Create the basic Space */
+ dims[0] = dim0;
+ dims[1] = dim1;
+ sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+
+ filename = (const char *)GetTestParameters();
+ HDassert(filename != NULL);
+
+ /* Setup the file access template */
+ fapl_write = create_faccess_plist(mpi_comm, mpi_info, FACC_DEFAULT);
+ VRFY((fapl_write >= 0), "create_faccess_plist() succeeded");
+
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_write);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* If we are not testing contiguous datasets */
+ if(is_chunked) {
+ /* Set up chunk information. */
+ chunk_dims[0] = dims[0]/mpi_size;
+ chunk_dims[1] = dims[1];
+ ret = H5Pset_chunk(dcpl, 2, chunk_dims);
+ VRFY((ret >= 0),"chunk creation property list succeeded");
+ }
+
+
+ /* Create the dataset */
+ dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
+
+#ifdef LATER /* fletcher32 */
+ /* Set expected cause */
+ test_name = "Broken Collective I/O - Filter is required";
+ no_collective_cause_local_expected = H5D_MPIO_FILTERS;
+ no_collective_cause_global_expected = H5D_MPIO_FILTERS;
+#endif /* LATER */
+
+ /* Get the file dataspace */
+ file_space = H5Dget_space(dataset);
+ VRFY((file_space >= 0), "H5Dget_space succeeded");
+
+ /* Create the memory dataspace */
+ mem_space = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((mem_space >= 0), "mem_space created");
+
+ /* Get the number of elements in the selection */
+ length = dim0 * dim1;
+
+ /* Allocate and initialize the buffer */
+ buffer = (int *)HDmalloc(sizeof(int) * length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
+ buffer[i] = i;
+
+ /* Set up the dxpl for the write */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
+
+ if (selection_mode == TEST_FILTERS_READ) {
+ /* To test read in collective I/O mode , write in independent mode
+ * because write fails with mpio + filter */
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+ else {
+ /* To test write in collective I/O mode. */
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ }
+
+
+ /* Write */
+ ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
+
+ if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+
+
+ /* Make a copy of the dxpl to test the read operation */
+ dxpl = H5Pcopy(dxpl);
+ VRFY((dxpl >= 0), "H5Pcopy succeeded");
+
+ if (dataset)
+ H5Dclose(dataset);
+ if (fapl_write)
+ H5Pclose(fapl_write);
+ if (fid)
+ H5Fclose(fid);
+
+
+ /*---------------------
+ * Test Read access
+ *---------------------*/
+
+ /* Setup the file access template */
+ fapl_read = create_faccess_plist(mpi_comm, mpi_info, facc_type);
+ VRFY((fapl_read >= 0), "create_faccess_plist() succeeded");
+
+ fid = H5Fopen (filename, H5F_ACC_RDONLY, fapl_read);
+ dataset = H5Dopen2 (fid, DSET_NOCOLCAUSE, H5P_DEFAULT);
+
+ /* Set collective I/O properties in the dxpl. */
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* Read */
+ ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl, buffer);
+
+ if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
+ VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
+
+ /* Get the cause of broken collective I/O */
+ ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read);
+ VRFY((ret >= 0), "retriving no collective cause succeeded" );
+
+ /* Test values */
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
+
+ /* Release some resources */
+ if (sid)
+ H5Sclose(sid);
+ if (fapl_read)
+ H5Pclose(fapl_read);
+ if (dcpl)
+ H5Pclose(dcpl);
+ if (dxpl)
+ H5Pclose(dxpl);
+ if (dataset)
+ H5Dclose(dataset);
+ if (mem_space)
+ H5Sclose(mem_space);
+ if (file_space)
+ H5Sclose(file_space);
+ if (fid)
+ H5Fclose(fid);
+ HDfree(buffer);
+ return;
+}
+#endif
+
+/* Function: no_collective_cause_tests
+ *
+ * Purpose: Tests cases for broken collective IO.
+ *
+ * Programmer: Jonathan Kim
+ * Date: Aug, 2012
+ */
+void
+no_collective_cause_tests(void)
+{
+ /*
+ * Test individual cause
+ */
+ test_no_collective_cause_mode (TEST_COLLECTIVE);
+ test_no_collective_cause_mode (TEST_SET_INDEPENDENT);
+ test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode (TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode (TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES);
+ test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
+ test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
+#ifdef LATER /* fletcher32 */
+ /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
+ * H5Dwrite is ready for mpio + filter feature.
+ */
+ /* test_no_collective_cause_mode (TEST_FILTERS); */
+ test_no_collective_cause_mode_filter (TEST_FILTERS_READ);
+#endif /* LATER */
+
+ /*
+ * Test combined causes
+ */
+ test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
+ test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+ test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
+
+ return;
+}
+
+/*
+ * Test consistency semantics of atomic mode
+ */
+
+/*
+ * Example of using the parallel HDF5 library to create a dataset,
+ * where process 0 writes and the other processes read at the same
+ * time. If atomic mode is set correctly, the other processes should
+ * read the old values in the dataset or the new ones.
+ */
+
+void
+dataset_atomicity(void)
+{
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hsize_t dims[MAX_RANK]; /* dataset dim sizes */
+ int *write_buf = NULL; /* data buffer */
+ int *read_buf = NULL; /* data buffer */
+ int buf_size;
+ hid_t dataset2;
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
+ hsize_t start[MAX_RANK];
+ hsize_t stride[MAX_RANK];
+ hsize_t count[MAX_RANK];
+ hsize_t block[MAX_RANK];
+ const char *filename;
+ herr_t ret; /* Generic return value */
+ int mpi_size, mpi_rank;
+ int i, j, k;
+ hbool_t atomicity = FALSE;
+ MPI_Comm comm = test_comm;
+ MPI_Info info = MPI_INFO_NULL;
+
+ dim0 = 64; dim1 = 32;
+ filename = GetTestParameters();
+ if (facc_type != FACC_MPIO) {
+ HDprintf("Atomicity tests will not work without the MPIO VFD\n");
+ return;
+ }
+ if(VERBOSE_MED)
+ HDprintf("atomic writes to file %s\n", filename);
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ buf_size = dim0 * dim1;
+ /* allocate memory for data buffer */
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
+ /* allocate memory for data buffer */
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* setup dimensionality object */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
+ sid = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* create datasets */
+ dataset1 = H5Dcreate2(fid, DATASETNAME5, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
+
+ dataset2 = H5Dcreate2(fid, DATASETNAME6, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
+
+ /* initialize datasets to 0s */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+ }
+
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(sid);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+ MPI_Barrier (comm);
+
+ /* make sure setting atomicity fails on a serial file ID */
+ /* file locking allows only one file open (serial) for writing */
+ if(MAINPROCESS){
+ fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fopen succeeed");
+ }
+
+ /* should fail */
+ ret = H5Fset_mpi_atomicity(fid , TRUE);
+ VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
+
+ if(MAINPROCESS){
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ MPI_Barrier (comm);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* open the file collectively */
+ fid=H5Fopen(filename,H5F_ACC_RDWR,acc_tpl);
+ VRFY((fid >= 0), "H5Fopen succeeded");
+
+ /* Release file-access template */
+ ret = H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ ret = H5Fset_mpi_atomicity(fid , TRUE);
+ VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
+
+ /* open dataset1 (contiguous case) */
+ dataset1 = H5Dopen2(fid, DATASETNAME5, H5P_DEFAULT);
+ VRFY((dataset1 >= 0), "H5Dopen2 succeeded");
+
+ if (0 == mpi_rank) {
+ for (i=0 ; i<buf_size ; i++) {
+ write_buf[i] = 5;
+ }
+ }
+ else {
+ for (i=0 ; i<buf_size ; i++) {
+ read_buf[i] = 8;
+ }
+ }
+
+ /* check that the atomicity flag is set */
+ ret = H5Fget_mpi_atomicity(fid , &atomicity);
+ VRFY((ret >= 0), "atomcity get failed");
+ VRFY((atomicity == TRUE), "atomcity set failed");
+
+ MPI_Barrier (comm);
+
+ /* Process 0 writes contiguously to the entire dataset */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ }
+ /* The other processes read the entire dataset */
+ else {
+ ret = H5Dread(dataset1, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf);
+ VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
+ }
+
+ if(VERBOSE_MED) {
+ i=0;j=0;k=0;
+ for (i=0 ; i<dim0 ; i++) {
+ HDprintf ("\n");
+ for (j=0 ; j<dim1 ; j++)
+ HDprintf ("%d ", read_buf[k++]);
+ }
+ }
+
+ /* The processes that read the dataset must either read all values
+ as 0 (read happened before process 0 wrote to dataset 1), or 5
+ (read happened after process 0 wrote to dataset 1) */
+ if (0 != mpi_rank) {
+ int compare = read_buf[0];
+
+ VRFY((compare == 0 || compare == 5),
+ "Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
+ for (i=1; i<buf_size; i++) {
+ if (read_buf[i] != compare) {
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
+ nerrors ++;
+ }
+ }
+ }
+
+ ret = H5Dclose(dataset1);
+ VRFY((ret >= 0), "H5D close succeeded");
+
+ /* release data buffers */
+ if(write_buf) HDfree(write_buf);
+ if(read_buf) HDfree(read_buf);
+
+ /* open dataset2 (non-contiguous case) */
+ dataset2 = H5Dopen2(fid, DATASETNAME6, H5P_DEFAULT);
+ VRFY((dataset2 >= 0), "H5Dopen2 succeeded");
+
+ /* allocate memory for data buffer */
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
+ /* allocate memory for data buffer */
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
+ VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
+
+ for (i=0 ; i<buf_size ; i++) {
+ write_buf[i] = 5;
+ }
+ for (i=0 ; i<buf_size ; i++) {
+ read_buf[i] = 8;
+ }
+
+ atomicity = FALSE;
+ /* check that the atomicity flag is set */
+ ret = H5Fget_mpi_atomicity(fid , &atomicity);
+ VRFY((ret >= 0), "atomcity get failed");
+ VRFY((atomicity == TRUE), "atomcity set failed");
+
+
+ block[0] = (hsize_t)(dim0/mpi_size) - 1;
+ block[1] = (hsize_t)(dim1/mpi_size) - 1;
+ stride[0] = block[0] + 1;
+ stride[1] = block[1] + 1;
+ count[0] = (hsize_t)mpi_size;
+ count[1] = (hsize_t)mpi_size;
+ start[0] = 0;
+ start[1] = 0;
+
+ /* create a file dataspace */
+ file_dataspace = H5Dget_space (dataset2);
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace */
+ mem_dataspace = H5Screate_simple (MAX_RANK, dims, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ MPI_Barrier (comm);
+
+ /* Process 0 writes to the dataset */
+ if (0 == mpi_rank) {
+ ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, write_buf);
+ VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
+ }
+ /* All processes wait for the write to finish. This works because
+ atomicity is set to true */
+ MPI_Barrier (comm);
+ /* The other processes read the entire dataset */
+ if (0 != mpi_rank) {
+ ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
+ H5P_DEFAULT, read_buf);
+ VRFY((ret >= 0), "H5Dread dataset2 succeeded");
+ }
+
+ if(VERBOSE_MED) {
+ if (mpi_rank == 1) {
+ i=0;j=0;k=0;
+ for (i=0 ; i<dim0 ; i++) {
+ HDprintf ("\n");
+ for (j=0 ; j<dim1 ; j++)
+ HDprintf ("%d ", read_buf[k++]);
+ }
+ HDprintf ("\n");
+ }
+ }
+
+ /* The processes that read the dataset must either read all values
+ as 5 (read happened after process 0 wrote to dataset 1) */
+ if (0 != mpi_rank) {
+ int compare;
+ i=0;j=0;k=0;
+
+ compare = 5;
+
+ for (i=0 ; i<dim0 ; i++) {
+ if ((hsize_t)i >= (hsize_t)mpi_rank*(block[0]+1)) {
+ break;
+ }
+ if (((hsize_t)i+1)%(block[0]+1)==0) {
+ k += dim1;
+ continue;
+ }
+ for (j=0 ; j<dim1 ; j++) {
+ if ((hsize_t)j >= (hsize_t)mpi_rank*(block[1]+1)) {
+ H5_CHECKED_ASSIGN(k, int, (hsize_t)dim1 - (hsize_t)mpi_rank*(block[1]+1) + (hsize_t)k, hsize_t);
+ break;
+ }
+ if (((hsize_t)j+1)%(block[1]+1)==0) {
+ k++;
+ continue;
+ }
+ else if (compare != read_buf[k]) {
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
+ nerrors++;
+ }
+ k ++;
+ }
+ }
+ }
+
+ ret = H5Dclose(dataset2);
+ VRFY((ret >= 0), "H5Dclose succeeded");
+ ret = H5Sclose(file_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret >= 0), "H5Sclose succeeded");
+
+ /* release data buffers */
+ if(write_buf) HDfree(write_buf);
+ if(read_buf) HDfree(read_buf);
+
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+
+}
+
+/* Function: dense_attr_test
+ *
+ * Purpose: Test cases for writing dense attributes in parallel
+ *
+ * Programmer: Quincey Koziol
+ * Date: April, 2013
+ */
+void
+test_dense_attr(void)
+{
+ int mpi_size, mpi_rank;
+ hid_t fpid, fid;
+ hid_t gid, gpid;
+ hid_t atFileSpace, atid;
+ hsize_t atDims[1] = {10000};
+ herr_t status;
+ const char *filename;
+
+ /* get filename */
+ filename = (const char *)GetTestParameters();
+ HDassert( filename != NULL );
+
+ /* set up MPI parameters */
+ MPI_Comm_size(test_comm,&mpi_size);
+ MPI_Comm_rank(test_comm,&mpi_rank);
+
+ fpid = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fpid > 0), "H5Pcreate succeeded");
+ status = H5Pset_libver_bounds(fpid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ VRFY((status >= 0), "H5Pset_libver_bounds succeeded");
+ status = H5Pset_fapl_mpio(fpid, test_comm, MPI_INFO_NULL);
+ VRFY((status >= 0), "H5Pset_fapl_mpio succeeded");
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fpid);
+ VRFY((fid > 0), "H5Fcreate succeeded");
+ status = H5Pclose(fpid);
+ VRFY((status >= 0), "H5Pclose succeeded");
+
+ gpid = H5Pcreate(H5P_GROUP_CREATE);
+ VRFY((gpid > 0), "H5Pcreate succeeded");
+ status = H5Pset_attr_phase_change(gpid, 0, 0);
+ VRFY((status >= 0), "H5Pset_attr_phase_change succeeded");
+ gid = H5Gcreate2(fid, "foo", H5P_DEFAULT, gpid, H5P_DEFAULT);
+ VRFY((gid > 0), "H5Gcreate2 succeeded");
+ status = H5Pclose(gpid);
+ VRFY((status >= 0), "H5Pclose succeeded");
+
+ atFileSpace = H5Screate_simple(1, atDims, NULL);
+ VRFY((atFileSpace > 0), "H5Screate_simple succeeded");
+ atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((atid > 0), "H5Acreate succeeded");
+ status = H5Sclose(atFileSpace);
+ VRFY((status >= 0), "H5Sclose succeeded");
+
+ status = H5Aclose(atid);
+ VRFY((status >= 0), "H5Aclose succeeded");
+
+ status = H5Gclose(gid);
+ VRFY((status >= 0), "H5Gclose succeeded");
+ status = H5Fclose(fid);
+ VRFY((status >= 0), "H5Fclose succeeded");
+
+ return;
+}
+
+
+int
+main(int argc, char **argv)
+{
+ int express_test;
+ int mpi_size, mpi_rank; /* mpi variables */
+ hsize_t oldsize, newsize = 1048576;
+
+#ifndef H5_HAVE_WIN32_API
+ /* Un-buffer the stdout and stderr */
+ HDsetbuf(stderr, NULL);
+ HDsetbuf(stdout, NULL);
+#endif
+
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(test_comm, &mpi_size);
+ MPI_Comm_rank(test_comm, &mpi_rank);
+
+ dim0 = BIG_X_FACTOR;
+ dim1 = BIG_Y_FACTOR;
+ dim2 = BIG_Z_FACTOR;
+
+ if (MAINPROCESS){
+ HDprintf("===================================\n");
+ HDprintf("2 GByte IO TESTS START\n");
+ HDprintf("2 MPI ranks will run the tests...\n");
+ HDprintf("===================================\n");
+ h5_show_hostname();
+ }
+
+ if (H5dont_atexit() < 0){
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
+ };
+ H5open();
+ /* Set the internal transition size to allow use of derived datatypes
+ * without having to actually read or write large datasets (>2GB).
+ */
+ oldsize = H5_mpi_set_bigio_count(newsize);
+
+ if (mpi_size > 2) {
+ int rank_color = 0;
+ if (mpi_rank >= 2) rank_color = 1;
+ if (MPI_Comm_split(test_comm, rank_color, mpi_rank, &test_comm) != MPI_SUCCESS) {
+ HDprintf("MPI returned an error. Exiting\n");
+ }
+ }
+
+ /* Initialize testing framework */
+ if (mpi_rank < 2) {
+ TestInit(argv[0], usage, parse_options);
+
+ /* Parse command line arguments */
+ TestParseCmdLine(argc, argv);
+
+ AddTest("idsetw", dataset_writeInd, NULL,
+ "dataset independent write", PARATESTFILE);
+
+ AddTest("idsetr", dataset_readInd, NULL,
+ "dataset independent read", PARATESTFILE);
+
+ AddTest("cdsetw", dataset_writeAll, NULL,
+ "dataset collective write", PARATESTFILE);
+
+ AddTest("cdsetr", dataset_readAll, NULL,
+ "dataset collective read", PARATESTFILE);
+
+ AddTest("eidsetw2", extend_writeInd2, NULL,
+ "extendible dataset independent write #2", PARATESTFILE);
+
+ AddTest("selnone", none_selection_chunk, NULL,
+ "chunked dataset with none-selection", PARATESTFILE);
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+ AddTest("cmpdsetr", compress_readAll, NULL,
+ "compressed dataset collective read", PARATESTFILE);
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+ /* Display testing information */
+ if (MAINPROCESS)
+ TestInfo(argv[0]);
+
+ /* setup file access property list */
+ fapl = H5Pcreate (H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(fapl, test_comm, MPI_INFO_NULL);
+
+ /* Perform requested testing */
+ PerformTests();
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ /* Restore the default bigio setting */
+ H5_mpi_set_bigio_count(oldsize);
+
+ express_test = GetTestExpress();
+ if ((express_test == 0) && (mpi_rank < 2)) {
+ MpioTest2G(test_comm);
+ }
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if (mpi_rank == 0)
+ HDremove(FILENAME[0]);
+
+ H5close();
+ if (test_comm != MPI_COMM_WORLD) {
+ MPI_Comm_free(&test_comm);
+ }
+ MPI_Finalize();
+ return 0;
+}
diff --git a/testpar/t_bigio.c b/testpar/t_bigio.c
index 611ff1a..f86852a 100644
--- a/testpar/t_bigio.c
+++ b/testpar/t_bigio.c
@@ -1,9 +1,13 @@
#include "hdf5.h"
#include "testphdf5.h"
-#include "H5Dprivate.h" /* For Chunk tests */
+#include "H5Dprivate.h" /* For Chunk tests */
-// int TestVerbosity = VERBO_LO; /* Default Verbosity is Low */
+/* FILENAME and filenames must have the same number of names */
+const char *FILENAME[3]={ "bigio_test.h5",
+ "single_rank_independent_io.h5",
+ NULL
+ };
/* Constants definitions */
#define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
@@ -11,10 +15,10 @@
/* Define some handy debugging shorthands, routines, ... */
/* debugging tools */
-#define MAINPROCESS (!mpi_rank) /* define process 0 as main process */
+#define MAIN_PROCESS (mpi_rank_g == 0) /* define process 0 as main process */
/* Constants definitions */
-#define RANK 2
+#define RANK 2
#define IN_ORDER 1
#define OUT_OF_ORDER 2
@@ -23,41 +27,39 @@
#define DATASET2 "DSET2"
#define DATASET3 "DSET3"
#define DATASET4 "DSET4"
-#define DATASET5 "DSET5"
#define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
#define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
-#define DXFER_BIGCOUNT 536870916
+#define DXFER_BIGCOUNT (1 < 29)
+#define LARGE_DIM 1610612736
#define HYPER 1
#define POINT 2
-#define ALL 3
+#define ALL 3
/* Dataset data type. Int's can be easily octo dumped. */
typedef hsize_t B_DATATYPE;
-int facc_type = FACC_MPIO; /*Test file access type */
+int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-size_t bigcount = DXFER_BIGCOUNT;
-char filename[20] = "bigio_test.h5";
+size_t bigcount = (size_t)DXFER_BIGCOUNT;
int nerrors = 0;
-int mpi_size, mpi_rank;
+static int mpi_size_g, mpi_rank_g;
hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096
hsize_t space_dim2 = SPACE_DIM2;
static void coll_chunktest(const char* filename, int chunk_factor, int select_factor,
int api_option, int file_selection, int mem_selection, int mode);
-hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
/*
* Setup the coordinates for point selection.
*/
-static void
+static void
set_coords(hsize_t start[],
hsize_t count[],
hsize_t stride[],
hsize_t block[],
- size_t num_points,
+ size_t num_points,
hsize_t coords[],
int order)
{
@@ -97,10 +99,10 @@ fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
/* put some trivial data in the data_array */
for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1));
- dataptr++;
- }
+ for (j=0; j < block[1]; j++){
+ *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1));
+ dataptr++;
+ }
}
}
@@ -111,7 +113,7 @@ void point_set(hsize_t start[],
hsize_t count[],
hsize_t stride[],
hsize_t block[],
- size_t num_points,
+ size_t num_points,
hsize_t coords[],
int order)
{
@@ -141,13 +143,13 @@ void point_set(hsize_t start[],
}
if(VERBOSE_MED) {
- printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
(unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
(unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
(unsigned long)(block[0] * block[1] * count[0] * count[1]));
k = 0;
for(i = 0; i < num_points ; i++) {
- printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
k += 2;
}
}
@@ -163,19 +165,19 @@ dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
hsize_t i, j;
/* print the column heading */
- printf("%-8s", "Cols:");
+ HDprintf("%-8s", "Cols:");
for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- printf("%llu ", *dataptr++);
- }
- printf("\n");
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ HDprintf("%llu ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
@@ -191,90 +193,90 @@ verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[],
/* print it if VERBOSE_MED */
if(VERBOSE_MED) {
- printf("verify_data dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- dataset_print(start, block, original);
- printf("compared values:\n");
- dataset_print(start, block, dataset);
+ HDprintf("verify_data dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
}
vrfyerrs = 0;
for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataset != *original){
- if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n",
+ for (j=0; j < block[1]; j++){
+ if(*dataset != *original){
+ if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n",
(unsigned long)i, (unsigned long)j,
(unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
*(original), *(dataset));
- }
- dataset++;
- original++;
- }
- }
+ }
+ dataset++;
+ original++;
+ }
+ }
}
if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if(vrfyerrs)
- printf("%d errors found in verify_data\n", vrfyerrs);
+ HDprintf("%d errors found in verify_data\n", vrfyerrs);
return(vrfyerrs);
}
/* Set up the selection */
static void
ccslab_set(int mpi_rank,
- int mpi_size,
- hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- int mode)
+ int mpi_size,
+ hsize_t start[],
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ int mode)
{
switch (mode){
case BYROW_CONT:
- /* Each process takes a slabs of rows. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = space_dim1;
- count[1] = space_dim2;
- start[0] = mpi_rank*count[0];
- start[1] = 0;
-
- break;
+ /* Each process takes a slabs of rows. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = space_dim1;
+ count[1] = space_dim2;
+ start[0] = (hsize_t)mpi_rank*count[0];
+ start[1] = 0;
+
+ break;
case BYROW_DISCONT:
- /* Each process takes several disjoint blocks. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 3;
- stride[1] = 3;
- count[0] = space_dim1/(stride[0]*block[0]);
- count[1] = (space_dim2)/(stride[1]*block[1]);
- start[0] = space_dim1*mpi_rank;
- start[1] = 0;
-
- break;
+ /* Each process takes several disjoint blocks. */
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = space_dim1/(stride[0]*block[0]);
+ count[1] = (space_dim2)/(stride[1]*block[1]);
+ start[0] = space_dim1*(hsize_t)mpi_rank;
+ start[1] = 0;
+
+ break;
case BYROW_SELECTNONE:
- /* Each process takes a slabs of rows, there are
+ /* Each process takes a slabs of rows, there are
no selections for the last process. */
- block[0] = 1;
- block[1] = 1;
- stride[0] = 1;
- stride[1] = 1;
- count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1);
- count[1] = space_dim2;
- start[0] = mpi_rank*count[0];
- start[1] = 0;
+ block[0] = 1;
+ block[1] = 1;
+ stride[0] = 1;
+ stride[1] = 1;
+ count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1);
+ count[1] = space_dim2;
+ start[0] = (hsize_t)mpi_rank*count[0];
+ start[1] = 0;
- break;
+ break;
case BYROW_SELECTUNBALANCE:
/* The first one-third of the number of processes only
@@ -282,14 +284,14 @@ ccslab_set(int mpi_rank,
half of the domain. */
block[0] = 1;
- count[0] = 2;
- stride[0] = space_dim1*mpi_size/4+1;
+ count[0] = 2;
+ stride[0] = (hsize_t)(space_dim1*(hsize_t)mpi_size/4+1);
block[1] = space_dim2;
count[1] = 1;
start[1] = 0;
stride[1] = 1;
- if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank;
- else start[0] = 1 + space_dim1*mpi_size/2 + (mpi_rank-2*mpi_size/3);
+ if((mpi_rank *3)<(mpi_size*2)) start[0] = (hsize_t)mpi_rank;
+ else start[0] = 1 + space_dim1*(hsize_t)mpi_size/2 + (hsize_t)(mpi_rank-2*mpi_size/3);
break;
case BYROW_SELECTINCHUNK:
@@ -297,33 +299,33 @@ ccslab_set(int mpi_rank,
block[0] = 1;
count[0] = 1;
- start[0] = mpi_rank*space_dim1;
+ start[0] = (hsize_t)mpi_rank*space_dim1;
stride[0]= 1;
- block[1] = space_dim2;
- count[1] = 1;
- stride[1]= 1;
- start[1] = 0;
+ block[1] = space_dim2;
+ count[1] = 1;
+ stride[1]= 1;
+ start[1] = 0;
break;
default:
- /* Unknown mode. Set it to cover the whole dataset. */
- block[0] = space_dim1*mpi_size;
- block[1] = space_dim2;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-
- break;
+ /* Unknown mode. Set it to cover the whole dataset. */
+ block[0] = space_dim1*(hsize_t)mpi_size;
+ block[1] = space_dim2;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+
+ break;
}
if (VERBOSE_MED){
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0]*block[1]*count[0]*count[1]));
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
+ (unsigned long)(block[0]*block[1]*count[0]*count[1]));
}
}
@@ -334,10 +336,10 @@ ccslab_set(int mpi_rank,
*/
static void
ccdataset_fill(hsize_t start[],
- hsize_t stride[],
- hsize_t count[],
- hsize_t block[],
- DATATYPE * dataset,
+ hsize_t stride[],
+ hsize_t count[],
+ hsize_t block[],
+ DATATYPE * dataset,
int mem_selection)
{
DATATYPE *dataptr = dataset;
@@ -375,28 +377,28 @@ ccdataset_fill(hsize_t start[],
*/
static void
ccdataset_print(hsize_t start[],
- hsize_t block[],
- DATATYPE * dataset)
+ hsize_t block[],
+ DATATYPE * dataset)
{
DATATYPE *dataptr = dataset;
hsize_t i, j;
/* print the column heading */
- printf("Print only the first block of the dataset\n");
- printf("%-8s", "Cols:");
+ HDprintf("Print only the first block of the dataset\n");
+ HDprintf("%-8s", "Cols:");
for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- printf("%03d ", *dataptr++);
- }
- printf("\n");
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
@@ -405,11 +407,11 @@ ccdataset_print(hsize_t start[],
*/
static int
ccdataset_vrfy(hsize_t start[],
- hsize_t count[],
- hsize_t stride[],
- hsize_t block[],
- DATATYPE *dataset,
- DATATYPE *original,
+ hsize_t count[],
+ hsize_t stride[],
+ hsize_t block[],
+ DATATYPE *dataset,
+ DATATYPE *original,
int mem_selection)
{
hsize_t i, j,k1,k2,k=0;
@@ -418,14 +420,14 @@ ccdataset_vrfy(hsize_t start[],
/* print it if VERBOSE_MED */
if (VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- ccdataset_print(start, block, original);
- printf("compared values:\n");
- ccdataset_print(start, block, dataset);
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ ccdataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ ccdataset_print(start, block, dataset);
}
vrfyerrs = 0;
@@ -447,7 +449,7 @@ ccdataset_vrfy(hsize_t start[],
}
if (*dataptr != *oriptr){
if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
(unsigned long)i, (unsigned long)j,
*(oriptr), *(dataptr));
}
@@ -457,9 +459,9 @@ ccdataset_vrfy(hsize_t start[],
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if (vrfyerrs)
- printf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
return(vrfyerrs);
}
@@ -476,111 +478,94 @@ static void
dataset_big_write(void)
{
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset;
- hid_t datatype; /* Datatype ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK],stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
hsize_t *coords = NULL;
- int i;
- herr_t ret; /* Generic return value */
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hsize_t h;
+ herr_t ret; /* Generic return value */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
size_t num_points;
B_DATATYPE * wdata;
/* allocate memory for data buffer */
- wdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
- VRFY((wdata != NULL), "wdata malloc succeeded");
+ wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
+ VRFY_G((wdata != NULL), "wdata malloc succeeded");
/* setup file access template */
acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* create the file collectively */
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
+ fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
+ VRFY_G((fid >= 0), "H5Fcreate succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
/* Each process takes a slabs of rows. */
- printf("\nTesting Dataset1 write by ROW\n");
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset1 write by ROW\n");
/* Create a large dataset */
dims[0] = bigcount;
- dims[1] = mpi_size;
+ dims[1] = (hsize_t)mpi_size_g;
sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
- block[0] = dims[0]/mpi_size;
+ block[0] = dims[0]/(hsize_t)mpi_size_g;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank*block[0];
+ start[0] = (hsize_t)mpi_rank_g*block[0];
start[1] = 0;
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, wdata);
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
}
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- MESG("writeAll by Row");
- {
- int j,k =0;
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(k < 10) {
- printf("%lld ", wdata[k]);
- k++;
- }
- }
- }
- printf("\n");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -588,77 +573,62 @@ dataset_big_write(void)
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
-
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
/* Each process takes a slabs of cols. */
- printf("\nTesting Dataset2 write by COL\n");
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset2 write by COL\n");
/* Create a large dataset */
dims[0] = bigcount;
- dims[1] = mpi_size;
+ dims[1] = (hsize_t)mpi_size_g;
sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
block[0] = dims[0];
- block[1] = dims[1]/mpi_size;
+ block[1] = dims[1]/(hsize_t)mpi_size_g;
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = mpi_rank*block[1];
+ start[1] = (hsize_t)mpi_rank_g*block[1];
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* fill the local slab with some trivial data */
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, wdata);
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
}
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
-
- /* write data collectively */
- MESG("writeAll by Col");
- {
- int j,k =0;
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(k < 10) {
- printf("%lld ", wdata[k]);
- k++;
- }
- }
- }
- printf("\n");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -666,77 +636,63 @@ dataset_big_write(void)
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
/* ALL selection */
- printf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
/* Create a large dataset */
dims[0] = bigcount;
dims[1] = 1;
sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(MAINPROCESS) {
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ if(mpi_rank_g == 0) {
ret = H5Sselect_all(file_dataspace);
- VRFY((ret >= 0), "H5Sset_all succeeded");
+ VRFY_G((ret >= 0), "H5Sset_all succeeded");
}
else {
ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sset_none succeeded");
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
- if(!MAINPROCESS) {
+ VRFY_G((mem_dataspace >= 0), "");
+ if(mpi_rank_g != 0) {
ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sset_none succeeded");
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* fill the local slab with some trivial data */
fill_datasets(start, dims, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- }
-
- /* write data collectively */
- MESG("writeAll by process 0");
- {
- int j,k =0;
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(k < 10) {
- printf("%lld ", wdata[k]);
- k++;
- }
- }
- }
- printf("\n");
+ MESG("data_array created");
}
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -744,18 +700,19 @@ dataset_big_write(void)
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
/* Point selection */
- printf("\nTesting Dataset4 write point selection\n");
+ if (mpi_rank_g == 0)
+ HDprintf("\nTesting Dataset4 write point selection\n");
/* Create a large dataset */
dims[0] = bigcount;
- dims[1] = mpi_size * 4;
+ dims[1] = (hsize_t)(mpi_size_g * 4);
sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
+ VRFY_G((sid >= 0), "H5Screate_simple succeeded");
dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dcreate2 succeeded");
H5Sclose(sid);
block[0] = dims[0]/2;
@@ -765,27 +722,27 @@ dataset_big_write(void)
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = dims[1]/mpi_size * mpi_rank;
+ start[1] = dims[1]/(hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
num_points = bigcount;
- coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
/* create a file dataspace */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
+ VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
if(coords) free(coords);
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, wdata);
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
}
/* create a memory dataspace */
@@ -794,114 +751,21 @@ dataset_big_write(void)
* appears to cause problems with 32 bit compilers.
*/
mem_dataspace = H5Screate_simple (1, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
-
- ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
-
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
-
- /* Irregular selection */
- /* Need larger memory for data buffer */
- free(wdata);
-#if 0
- wdata = (B_DATATYPE *)malloc(bigcount*4*sizeof(B_DATATYPE));
- VRFY((wdata != NULL), "wdata malloc succeeded");
-
- printf("\nTesting Dataset5 write irregular selection\n");
- /* Create a large dataset */
- dims[0] = bigcount/6;
- dims[1] = mpi_size * 4;
-
- sid = H5Screate_simple (RANK, dims, NULL);
- VRFY((sid >= 0), "H5Screate_simple succeeded");
- dataset = H5Dcreate2(fid, DATASET5, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dcreate2 succeeded");
- H5Sclose(sid);
-
- /* first select 1 col in this procs splice */
- block[0] = dims[0];
- block[1] = 1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = mpi_rank * 4;
-
- /* create a file dataspace */
- file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
-
- // dims[1] = 4;
- /* create a memory dataspace */
- mem_dataspace = H5Screate_simple (RANK, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
-
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- start[1] = 0;
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- /* select every other row in the process splice and OR it with
- the col selection to create an irregular selection */
- for(h=0 ; h<dims[0] ; h+=2) {
- block[0] = 1;
- block[1] = 4;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = h;
- start[1] = mpi_rank * 4;
+ VRFY_G((mem_dataspace >= 0), "");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
- start[1] = 0;
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
- }
- printf("Setting up for collective transfer\n");
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((xfer_plist >= 0), "H5Pcreate xfer succeeded");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY_G((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
-
- /* fill the local slab with some trivial data */
- fill_datasets(start, dims, wdata);
- MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, wdata);
- VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dwrite dataset1 succeeded");
/* release all temporary handles. */
H5Sclose(file_dataspace);
@@ -909,10 +773,9 @@ dataset_big_write(void)
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- free(wdata);
-#endif
+ HDfree(wdata);
H5Fclose(fid);
}
@@ -929,256 +792,220 @@ static void
dataset_big_read(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset;
- B_DATATYPE *rdata = NULL; /* data buffer */
- B_DATATYPE *wdata = NULL; /* expected data buffer */
- hsize_t dims[RANK]; /* dataset dim sizes */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- int i,j,k;
- hsize_t h;
+ B_DATATYPE *rdata = NULL; /* data buffer */
+ B_DATATYPE *wdata = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
size_t num_points;
hsize_t *coords = NULL;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* allocate memory for data buffer */
- rdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
- VRFY((rdata != NULL), "rdata malloc succeeded");
- wdata = (B_DATATYPE *)malloc(bigcount*sizeof(B_DATATYPE));
- VRFY((wdata != NULL), "wdata malloc succeeded");
+ rdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
+ VRFY_G((rdata != NULL), "rdata malloc succeeded");
+ wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
+ VRFY_G((wdata != NULL), "wdata malloc succeeded");
- memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
/* setup file access template */
acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((acc_tpl >= 0), "H5P_FILE_ACCESS");
+ VRFY_G((acc_tpl >= 0), "H5P_FILE_ACCESS");
H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* open the file collectively */
- fid=H5Fopen(filename,H5F_ACC_RDONLY,acc_tpl);
- VRFY((fid >= 0), "H5Fopen succeeded");
+ fid=H5Fopen(FILENAME[0],H5F_ACC_RDONLY,acc_tpl);
+ VRFY_G((fid >= 0), "H5Fopen succeeded");
/* Release file-access template */
ret = H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset1 by COL\n");
- printf("\nRead Testing Dataset1 by COL\n");
dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
- dims[1] = mpi_size;
+ dims[1] = (hsize_t)mpi_size_g;
/* Each process takes a slabs of cols. */
block[0] = dims[0];
- block[1] = dims[1]/mpi_size;
+ block[1] = dims[1]/(hsize_t)mpi_size_g;
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = mpi_rank*block[1];
+ start[1] = (hsize_t)mpi_rank_g*block[1];
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* fill dataset with test data */
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
+ MESG("data_array created");
}
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset1 succeeded");
-
- {
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(k < 10) {
- printf("%lld ", rdata[k]);
- k++;
- }
- }
- }
- printf("\n");
- }
+ VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- printf("\nRead Testing Dataset2 by ROW\n");
- memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset2 by ROW\n");
+ HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
- dims[1] = mpi_size;
+ dims[1] = (hsize_t)mpi_size_g;
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/mpi_size;
+ block[0] = dims[0]/(hsize_t)mpi_size_g;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank*block[0];
+ start[0] = (hsize_t)mpi_rank_g*block[0];
start[1] = 0;
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ VRFY_G((ret >= 0), "H5Sset_hyperslab succeeded");
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, block, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* fill dataset with test data */
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
+ MESG("data_array created");
}
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset2 succeeded");
-
- {
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(k < 10) {
- printf("%lld ", rdata[k]);
- k++;
- }
- }
- }
- printf("\n");
- }
+ VRFY_G((ret >= 0), "H5Dread dataset2 succeeded");
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
-
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- printf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
- memset(rdata, 0, bigcount*sizeof(B_DATATYPE));
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
+ HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
dims[1] = 1;
/* create a file dataspace independently */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
- if(MAINPROCESS) {
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
+ if(mpi_rank_g == 0) {
ret = H5Sselect_all(file_dataspace);
- VRFY((ret >= 0), "H5Sset_all succeeded");
+ VRFY_G((ret >= 0), "H5Sset_all succeeded");
}
else {
ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sset_none succeeded");
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* create a memory dataspace independently */
mem_dataspace = H5Screate_simple (RANK, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
- if(!MAINPROCESS) {
+ VRFY_G((mem_dataspace >= 0), "");
+ if(mpi_rank_g != 0) {
ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sset_none succeeded");
+ VRFY_G((ret >= 0), "H5Sset_none succeeded");
}
/* fill dataset with test data */
fill_datasets(start, dims, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
+ MESG("data_array created");
}
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset3 succeeded");
-
- {
- for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(k < 10) {
- printf("%lld ", rdata[k]);
- k++;
- }
- }
- }
- printf("\n");
- }
+ VRFY_G((ret >= 0), "H5Dread dataset3 succeeded");
- if(MAINPROCESS) {
+ if(mpi_rank_g == 0) {
/* verify the read data with original expected data */
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
}
/* release all temporary handles. */
@@ -1186,14 +1013,15 @@ dataset_big_read(void)
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- printf("\nRead Testing Dataset4 with Point selection\n");
+ if (mpi_rank_g == 0)
+ HDprintf("\nRead Testing Dataset4 with Point selection\n");
dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ VRFY_G((dataset >= 0), "H5Dopen2 succeeded");
dims[0] = bigcount;
- dims[1] = mpi_size * 4;
+ dims[1] = (hsize_t)(mpi_size_g * 4);
block[0] = dims[0]/2;
block[1] = 2;
@@ -1202,28 +1030,28 @@ dataset_big_read(void)
count[0] = 1;
count[1] = 1;
start[0] = 0;
- start[1] = dims[1]/mpi_size * mpi_rank;
+ start[1] = dims[1]/(hsize_t)mpi_size_g * (hsize_t)mpi_rank_g;
fill_datasets(start, block, wdata);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, wdata);
+ MESG("data_array created");
+ dataset_print(start, block, wdata);
}
num_points = bigcount;
- coords = (hsize_t *)malloc(num_points * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
+ coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
+ VRFY_G((coords != NULL), "coords malloc succeeded");
set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
/* create a file dataspace */
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY_G((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((ret >= 0), "H5Sselect_elements succeeded");
+ VRFY_G((ret >= 0), "H5Sselect_elements succeeded");
- if(coords) free(coords);
+ if(coords) HDfree(coords);
/* create a memory dataspace */
/* Warning: H5Screate_simple requires an array of hsize_t elements
@@ -1231,160 +1059,119 @@ dataset_big_read(void)
* appears to cause problems with 32 bit compilers.
*/
mem_dataspace = H5Screate_simple (1, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
/* set up the collective transfer properties list */
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
+ VRFY_G((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ VRFY_G((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+ VRFY_G((ret >= 0), "H5Dread dataset1 succeeded");
ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
/* release all temporary handles. */
H5Sclose(file_dataspace);
H5Sclose(mem_dataspace);
H5Pclose(xfer_plist);
ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
-
- printf("\nRead Testing Dataset5 with Irregular selection\n");
- /* Need larger memory for data buffer */
- free(wdata);
- free(rdata);
-#if 0
- wdata = (B_DATATYPE *)malloc(bigcount*4*sizeof(B_DATATYPE));
- VRFY((wdata != NULL), "wdata malloc succeeded");
- rdata = (B_DATATYPE *)malloc(bigcount*4*sizeof(B_DATATYPE));
- VRFY((rdata != NULL), "rdata malloc succeeded");
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
- dataset = H5Dopen2(fid, DATASET5, H5P_DEFAULT);
- VRFY((dataset >= 0), "H5Dopen2 succeeded");
+ HDfree(wdata);
+ HDfree(rdata);
- dims[0] = bigcount;
- dims[1] = mpi_size * 4;
+ wdata = NULL;
+ rdata = NULL;
+ /* We never wrote Dataset5 in the write section, so we can't
+ * expect to read it...
+ */
+ file_dataspace = -1;
+ mem_dataspace = -1;
+ xfer_plist = -1;
+ dataset = -1;
- /* first select 1 col in this proc splice */
- block[0] = dims[0];
- block[1] = 1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = mpi_rank * 4;
+ /* release all temporary handles. */
+ if (file_dataspace != -1) H5Sclose(file_dataspace);
+ if (mem_dataspace != -1) H5Sclose(mem_dataspace);
+ if (xfer_plist != -1) H5Pclose(xfer_plist);
+ if (dataset != -1) {
+ ret = H5Dclose(dataset);
+ VRFY_G((ret >= 0), "H5Dclose1 succeeded");
+ }
+ H5Fclose(fid);
- /* get file dataspace */
- file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ /* release data buffers */
+ if(rdata) HDfree(rdata);
+ if(wdata) HDfree(wdata);
- /* create a memory dataspace */
- mem_dataspace = H5Screate_simple (RANK, dims, NULL);
- VRFY((mem_dataspace >= 0), "");
+} /* dataset_large_readAll */
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+static void
+single_rank_independent_io(void)
+{
+ if (mpi_rank_g == 0)
+ HDprintf("single_rank_independent_io\n");
- start[1] = 0;
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ if (MAIN_PROCESS) {
+ hsize_t dims[] = { LARGE_DIM };
+ hid_t file_id = -1;
+ hid_t fapl_id = -1;
+ hid_t dset_id = -1;
+ hid_t fspace_id = -1;
+ hid_t mspace_id = -1;
+ void *data = NULL;
- /* select every other row in the process splice and OR it with
- the col selection to create an irregular selection */
- for(h=0 ; h<dims[0] ; h+=2) {
- block[0] = 1;
- block[1] = 4;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = h;
- start[1] = mpi_rank * 4;
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY_G((fapl_id >= 0), "H5P_FILE_ACCESS");
- ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ H5Pset_fapl_mpio(fapl_id, MPI_COMM_SELF, MPI_INFO_NULL);
+ file_id = H5Fcreate(FILENAME[1], H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY_G((file_id >= 0), "H5Dcreate2 succeeded");
- start[1] = 0;
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_OR, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+ fspace_id = H5Screate_simple(1, dims, NULL);
+ VRFY_G((fspace_id >= 0), "H5Screate_simple fspace_id succeeded");
- //fprintf(stderr, "%d: %d - %d\n", mpi_rank, (int)h, (int)H5Sget_select_npoints(mem_dataspace));
- }
+ /*
+ * Create and write to a >2GB dataset from a single rank.
+ */
+ dset_id = H5Dcreate2(file_id, "test_dset", H5T_NATIVE_INT, fspace_id,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- /* set up the collective transfer properties list */
- xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
- ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((ret >= 0), "H5Pcreate xfer succeeded");
- if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
- }
+ VRFY_G((dset_id >= 0), "H5Dcreate2 succeeded");
- /* read data collectively */
- ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
- xfer_plist, rdata);
- VRFY((ret >= 0), "H5Dread dataset1 succeeded");
+ data = malloc(LARGE_DIM * sizeof(int));
- /* fill dataset with test data */
- fill_datasets(start, dims, wdata);
- MESG("data_array initialized");
- if(VERBOSE_MED){
- MESG("data_array created");
- }
+ if (mpi_rank_g == 0)
+ H5Sselect_all(fspace_id);
+ else
+ H5Sselect_none(fspace_id);
+ dims[0] = LARGE_DIM;
+ mspace_id = H5Screate_simple(1, dims, NULL);
+ VRFY_G((mspace_id >= 0), "H5Screate_simple mspace_id succeeded");
+ H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, H5P_DEFAULT, data);
+ free(data);
+ H5Sclose(mspace_id);
+ H5Sclose(fspace_id);
+ H5Pclose(fapl_id);
+ H5Dclose(dset_id);
+ H5Fclose(file_id);
- /* verify the read data with original expected data */
- block[0] = dims[0];
- block[1] = 1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
- ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
+ HDremove(FILENAME[1]);
- for(h=0 ; h<dims[0] ; h+=2) {
- block[0] = 1;
- block[1] = 4;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = h;
- start[1] = 0;
- ret = verify_data(start, count, stride, block, rdata, wdata);
- if(ret) {fprintf(stderr, "verify failed\n"); exit(1);}
}
-
- /* release all temporary handles. */
- H5Sclose(file_dataspace);
- H5Sclose(mem_dataspace);
- H5Pclose(xfer_plist);
- ret = H5Dclose(dataset);
- VRFY((ret >= 0), "H5Dclose1 succeeded");
-
- H5Fclose(fid);
-
- /* release data buffers */
- if(rdata) free(rdata);
- if(wdata) free(wdata);
-#endif
-} /* dataset_large_readAll */
-
+ MPI_Barrier(MPI_COMM_WORLD);
+}
/*
* Create the appropriate File access property list
@@ -1394,45 +1181,45 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
hid_t ret_pl = -1;
herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
+ VRFY_G((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
+ return (ret_pl);
if (l_facc_type == FACC_MPIO){
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY_G((ret >= 0), "");
ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
- VRFY((ret >= 0), "");
+ VRFY_G((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
- VRFY((ret >= 0), "");
- return(ret_pl);
+ VRFY_G((ret >= 0), "");
+ return(ret_pl);
}
if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return(ret_pl);
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY_G((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY_G((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY_G((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY_G((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return(ret_pl);
}
/* unknown file access types */
@@ -1441,17 +1228,17 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
/*-------------------------------------------------------------------------
- * Function: coll_chunk1
+ * Function: coll_chunk1
*
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
selection with a single chunk
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * July 12th, 2004
+ * Programmer: Unknown
+ * July 12th, 2004
*
* Modifications:
*
@@ -1478,8 +1265,9 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
void
coll_chunk1(void)
{
- if (MAINPROCESS)
- printf("coll_chunk1\n");
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk1\n");
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1494,17 +1282,17 @@ coll_chunk1(void)
/*-------------------------------------------------------------------------
- * Function: coll_chunk2
+ * Function: coll_chunk2
*
- * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
+ * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
selection with a single chunk
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * July 12th, 2004
+ * Programmer: Unknown
+ * July 12th, 2004
*
* Modifications:
*
@@ -1531,8 +1319,9 @@ coll_chunk1(void)
void
coll_chunk2(void)
{
- if (MAINPROCESS)
- printf("coll_chunk2\n");
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk2\n");
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
@@ -1547,17 +1336,17 @@ coll_chunk2(void)
/*-------------------------------------------------------------------------
- * Function: coll_chunk3
+ * Function: coll_chunk3
*
- * Purpose: Wrapper to test the collective chunk IO for regular JOINT
+ * Purpose: Wrapper to test the collective chunk IO for regular JOINT
selection with at least number of 2*mpi_size chunks
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * July 12th, 2004
+ * Programmer: Unknown
+ * July 12th, 2004
*
* Modifications:
*
@@ -1585,35 +1374,36 @@ coll_chunk2(void)
void
coll_chunk3(void)
{
- if (MAINPROCESS)
- printf("coll_chunk3\n");
-
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
-
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
- coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
+ const char *filename = FILENAME[0];
+ if (mpi_rank_g == 0)
+ HDprintf("coll_chunk3\n");
+
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
+
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
+ coll_chunktest(filename, mpi_size_g, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
}
//-------------------------------------------------------------------------
// Borrowed/Modified (slightly) from t_coll_chunk.c
/*-------------------------------------------------------------------------
- * Function: coll_chunktest
+ * Function: coll_chunktest
*
* Purpose: The real testing routine for regular selection of collective
chunking storage
testing both write and read,
- If anything fails, it may be read or write. There is no
- separation test between read and write.
+ If anything fails, it may be read or write. There is no
+ separation test between read and write.
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
* Modifications:
* Remove invalid temporary property checkings for API_LINK_HARD and
@@ -1621,8 +1411,8 @@ coll_chunk3(void)
* Programmer: Jonathan Kim
* Date: 2012-10-10
*
- * Programmer: Unknown
- * July 12th, 2004
+ * Programmer: Unknown
+ * July 12th, 2004
*
* Modifications:
*
@@ -1631,14 +1421,14 @@ coll_chunk3(void)
static void
coll_chunktest(const char* filename,
- int chunk_factor,
- int select_factor,
+ int chunk_factor,
+ int select_factor,
int api_option,
int file_selection,
int mem_selection,
int mode)
{
- hid_t file, dataset, file_dataspace, mem_dataspace;
+ hid_t file, dataset, file_dataspace, mem_dataspace;
hid_t acc_plist,xfer_plist,crp_plist;
hsize_t dims[RANK], chunk_dims[RANK];
@@ -1657,34 +1447,33 @@ coll_chunktest(const char* filename,
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
- int i;
/* Create the data space */
acc_plist = create_faccess_plist(comm,info,facc_type);
- VRFY((acc_plist >= 0),"");
+ VRFY_G((acc_plist >= 0),"");
file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist);
- VRFY((file >= 0),"H5Fcreate succeeded");
+ VRFY_G((file >= 0),"H5Fcreate succeeded");
status = H5Pclose(acc_plist);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
/* setup dimensionality object */
- dims[0] = space_dim1*mpi_size;
+ dims[0] = space_dim1*(hsize_t)mpi_size_g;
dims[1] = space_dim2;
/* allocate memory for data buffer */
data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
/* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
/* set up the coords array selection */
num_points = block[0] * block[1] * count[0] * count[1];
coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
- VRFY((coords != NULL), "coords malloc succeeded");
+ VRFY_G((coords != NULL), "coords malloc succeeded");
point_set(start, count, stride, block, num_points, coords, mode);
/* Warning: H5Screate_simple requires an array of hsize_t elements
@@ -1692,36 +1481,36 @@ coll_chunktest(const char* filename,
* appears to cause problems with 32 bit compilers.
*/
file_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY((file_dataspace >= 0), "file dataspace created succeeded");
+ VRFY_G((file_dataspace >= 0), "file dataspace created succeeded");
if(ALL != mem_selection) {
mem_dataspace = H5Screate_simple(2, dims, NULL);
- VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
+ VRFY_G((mem_dataspace >= 0), "mem dataspace created succeeded");
}
else {
/* Putting the warning about H5Screate_simple (above) into practice... */
hsize_t dsdims[1] = {num_points};
mem_dataspace = H5Screate_simple (1, dsdims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
}
crp_plist = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((crp_plist >= 0),"");
+ VRFY_G((crp_plist >= 0),"");
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/chunk_factor;
+ chunk_dims[0] = dims[0]/(hsize_t)chunk_factor;
/* to decrease the testing time, maintain bigger chunk size */
(chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2/2);
status = H5Pset_chunk(crp_plist, 2, chunk_dims);
- VRFY((status >= 0),"chunk creation property list succeeded");
+ VRFY_G((status >= 0),"chunk creation property list succeeded");
dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT,
file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT);
- VRFY((dataset >= 0),"dataset created succeeded");
+ VRFY_G((dataset >= 0),"dataset created succeeded");
status = H5Pclose(crp_plist);
- VRFY((status >= 0), "");
+ VRFY_G((status >= 0), "");
/*put some trivial data in the data array */
ccdataset_fill(start, stride, count,block, data_array1, mem_selection);
@@ -1731,96 +1520,96 @@ coll_chunktest(const char* filename,
switch (file_selection) {
case HYPER:
status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ VRFY_G((status >= 0),"hyperslab selection succeeded");
break;
case POINT:
if (num_points) {
status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
+ VRFY_G((status >= 0),"Element selection succeeded");
}
else {
status = H5Sselect_none(file_dataspace);
- VRFY((status >= 0),"none selection succeeded");
+ VRFY_G((status >= 0),"none selection succeeded");
}
break;
case ALL:
status = H5Sselect_all(file_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
}
switch (mem_selection) {
case HYPER:
status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ VRFY_G((status >= 0),"hyperslab selection succeeded");
break;
case POINT:
if (num_points) {
status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
+ VRFY_G((status >= 0),"Element selection succeeded");
}
else {
status = H5Sselect_none(mem_dataspace);
- VRFY((status >= 0),"none selection succeeded");
+ VRFY_G((status >= 0),"none selection succeeded");
}
break;
case ALL:
status = H5Sselect_all(mem_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
}
/* set up the collective transfer property list */
xfer_plist = H5Pcreate(H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0), "");
+ VRFY_G((xfer_plist >= 0), "");
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((status>= 0),"MPIO collective transfer property succeeded");
+ VRFY_G((status>= 0),"MPIO collective transfer property succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
+ VRFY_G((status>= 0),"set independent IO collectively succeeded");
}
switch(api_option){
- case API_LINK_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
- VRFY((status>= 0),"collective chunk optimization succeeded");
+ case API_LINK_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
+ VRFY_G((status>= 0),"collective chunk optimization succeeded");
break;
- case API_MULTI_HARD:
- status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
- VRFY((status>= 0),"collective chunk optimization succeeded ");
+ case API_MULTI_HARD:
+ status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
+ VRFY_G((status>= 0),"collective chunk optimization succeeded ");
break;
- case API_LINK_TRUE:
+ case API_LINK_TRUE:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
break;
- case API_LINK_FALSE:
+ case API_LINK_FALSE:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
break;
- case API_MULTI_COLL:
+ case API_MULTI_COLL:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
- VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
+ VRFY_G((status>= 0),"collective chunk optimization set chunk ratio succeeded");
break;
- case API_MULTI_IND:
+ case API_MULTI_IND:
status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
- VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
- status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
- VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
+ VRFY_G((status>= 0),"collective chunk optimization set chunk number succeeded");
+ status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
+ VRFY_G((status>= 0),"collective chunk optimization set chunk ratio succeeded");
break;
- default:
+ default:
;
}
@@ -1831,42 +1620,42 @@ coll_chunktest(const char* filename,
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_MULTI_HARD:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_LINK_TRUE:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_LINK_FALSE:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_MULTI_COLL:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
case API_MULTI_IND:
prop_value = H5D_XFER_COLL_CHUNK_DEF;
status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
NULL, NULL, NULL, NULL, NULL, NULL);
- VRFY((status >= 0),"testing property list inserted succeeded");
+ VRFY_G((status >= 0),"testing property list inserted succeeded");
break;
default:
@@ -1877,46 +1666,46 @@ coll_chunktest(const char* filename,
/* write data collectively */
status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
- VRFY((status >= 0),"dataset write succeeded");
+ xfer_plist, data_array1);
+ VRFY_G((status >= 0),"dataset write succeeded");
#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
if(facc_type == FACC_MPIO) {
switch(api_option){
case API_LINK_HARD:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
break;
case API_MULTI_HARD:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
break;
case API_LINK_TRUE:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
break;
case API_LINK_FALSE:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
break;
case API_MULTI_COLL:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
break;
case API_MULTI_IND:
status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value);
- VRFY((status >= 0),"testing property list get succeeded");
- VRFY((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
+ VRFY_G((status >= 0),"testing property list get succeeded");
+ VRFY_G((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
break;
default:
@@ -1926,20 +1715,20 @@ coll_chunktest(const char* filename,
#endif
status = H5Dclose(dataset);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
status = H5Pclose(xfer_plist);
- VRFY((status >= 0),"property list closed");
+ VRFY_G((status >= 0),"property list closed");
status = H5Sclose(file_dataspace);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
status = H5Sclose(mem_dataspace);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
status = H5Fclose(file);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
if (data_array1) HDfree(data_array1);
@@ -1947,35 +1736,35 @@ coll_chunktest(const char* filename,
/* allocate memory for data buffer */
data_array1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
- VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
+ VRFY_G((data_array1 != NULL), "data_array1 malloc succeeded");
/* allocate memory for data buffer */
data_origin1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
- VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
+ VRFY_G((data_origin1 != NULL), "data_origin1 malloc succeeded");
acc_plist = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
+ VRFY_G((acc_plist >= 0),"MPIO creation property list succeeded");
- file = H5Fopen(filename,H5F_ACC_RDONLY,acc_plist);
- VRFY((file >= 0),"H5Fcreate succeeded");
+ file = H5Fopen(FILENAME[0],H5F_ACC_RDONLY,acc_plist);
+ VRFY_G((file >= 0),"H5Fcreate succeeded");
status = H5Pclose(acc_plist);
- VRFY((status >= 0),"");
+ VRFY_G((status >= 0),"");
/* open the collective dataset*/
dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
- VRFY((dataset >= 0), "");
+ VRFY_G((dataset >= 0), "");
/* set up dimensions of the slab this process accesses */
- ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
+ ccslab_set(mpi_rank_g, mpi_size_g, start, count, stride, block, select_factor);
/* obtain the file and mem dataspace*/
file_dataspace = H5Dget_space (dataset);
- VRFY((file_dataspace >= 0), "");
+ VRFY_G((file_dataspace >= 0), "");
if (ALL != mem_selection) {
mem_dataspace = H5Dget_space (dataset);
- VRFY((mem_dataspace >= 0), "");
+ VRFY_G((mem_dataspace >= 0), "");
}
else {
/* Warning: H5Screate_simple requires an array of hsize_t elements
@@ -1984,92 +1773,92 @@ coll_chunktest(const char* filename,
*/
hsize_t dsdims[1] = {num_points};
mem_dataspace = H5Screate_simple (1, dsdims, NULL);
- VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
+ VRFY_G((mem_dataspace >= 0), "mem_dataspace create succeeded");
}
switch (file_selection) {
case HYPER:
status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ VRFY_G((status >= 0),"hyperslab selection succeeded");
break;
case POINT:
if (num_points) {
status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
+ VRFY_G((status >= 0),"Element selection succeeded");
}
else {
status = H5Sselect_none(file_dataspace);
- VRFY((status >= 0),"none selection succeeded");
+ VRFY_G((status >= 0),"none selection succeeded");
}
break;
case ALL:
status = H5Sselect_all(file_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
}
switch (mem_selection) {
case HYPER:
status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((status >= 0),"hyperslab selection succeeded");
+ VRFY_G((status >= 0),"hyperslab selection succeeded");
break;
case POINT:
if (num_points) {
status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
- VRFY((status >= 0),"Element selection succeeded");
+ VRFY_G((status >= 0),"Element selection succeeded");
}
else {
status = H5Sselect_none(mem_dataspace);
- VRFY((status >= 0),"none selection succeeded");
+ VRFY_G((status >= 0),"none selection succeeded");
}
break;
case ALL:
status = H5Sselect_all(mem_dataspace);
- VRFY((status >= 0), "H5Sselect_all succeeded");
+ VRFY_G((status >= 0), "H5Sselect_all succeeded");
break;
}
/* fill dataset with test data */
ccdataset_fill(start, stride,count,block, data_origin1, mem_selection);
xfer_plist = H5Pcreate (H5P_DATASET_XFER);
- VRFY((xfer_plist >= 0),"");
+ VRFY_G((xfer_plist >= 0),"");
status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
- VRFY((status>= 0),"MPIO collective transfer property succeeded");
+ VRFY_G((status>= 0),"MPIO collective transfer property succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
+ VRFY_G((status>= 0),"set independent IO collectively succeeded");
}
status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
xfer_plist, data_array1);
- VRFY((status >=0),"dataset read succeeded");
+ VRFY_G((status >=0),"dataset read succeeded");
/* verify the read data with original expected data */
status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
if (status) nerrors++;
status = H5Pclose(xfer_plist);
- VRFY((status >= 0),"property list closed");
+ VRFY_G((status >= 0),"property list closed");
/* close dataset collectively */
status=H5Dclose(dataset);
- VRFY((status >= 0), "H5Dclose");
+ VRFY_G((status >= 0), "H5Dclose");
/* release all IDs created */
status = H5Sclose(file_dataspace);
- VRFY((status >= 0),"H5Sclose");
+ VRFY_G((status >= 0),"H5Sclose");
status = H5Sclose(mem_dataspace);
- VRFY((status >= 0),"H5Sclose");
+ VRFY_G((status >= 0),"H5Sclose");
/* close the file collectively */
status = H5Fclose(file);
- VRFY((status >= 0),"H5Fclose");
+ VRFY_G((status >= 0),"H5Fclose");
/* release data buffers */
if(coords) HDfree(coords);
@@ -2082,22 +1871,22 @@ coll_chunktest(const char* filename,
/*****************************************************************************
*
- * Function: do_express_test()
+ * Function: do_express_test()
*
- * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
- * by GetTestExpress() across all processes. Return this
- * value.
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
+ * by GetTestExpress() across all processes. Return this
+ * value.
*
- * Envirmoment variables can be different across different
- * processes. This function ensures that all processes agree
- * on whether to do an express test.
+ * Envirmoment variables can be different across different
+ * processes. This function ensures that all processes agree
+ * on whether to do an express test.
*
- * Return: Success: Maximum of the values returned by
- * GetTestExpress() across all processes.
+ * Return: Success: Maximum of the values returned by
+ * GetTestExpress() across all processes.
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: JRM -- 4/25/06
+ * Programmer: JRM -- 4/25/06
*
*****************************************************************************/
static int
@@ -2130,21 +1919,39 @@ do_express_test(int world_mpi_rank)
} /* do_express_test() */
-int main(int argc, char **argv)
+int main(int argc, char **argv)
{
int ExpressMode = 0;
hsize_t newsize = 1048576;
- hsize_t oldsize = H5S_mpio_set_bigio_count(newsize);
+ /* Set the bigio processing limit to be 'newsize' bytes */
+ hsize_t oldsize = H5_mpi_set_bigio_count(newsize);
+ /* Having set the bigio handling to a size that is managable,
+ * we'll set our 'bigcount' variable to be 2X that limit so
+ * that we try to ensure that our bigio handling is actually
+ * envoked and tested.
+ */
if (newsize != oldsize) {
- bigcount = newsize * 2;
+ bigcount = newsize * 2;
}
MPI_Init(&argc, &argv);
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD,&mpi_size_g);
+ MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank_g);
- ExpressMode = do_express_test(mpi_rank);
+ /* Attempt to turn off atexit post processing so that in case errors
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0){
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
+ };
+
+ /* set alarm. */
+ ALARM_ON;
+
+ ExpressMode = do_express_test(mpi_rank_g);
dataset_big_write();
MPI_Barrier(MPI_COMM_WORLD);
@@ -2153,7 +1960,8 @@ int main(int argc, char **argv)
MPI_Barrier(MPI_COMM_WORLD);
if (ExpressMode > 0) {
- printf("***Express test mode on. Several tests are skipped\n");
+ if (mpi_rank_g == 0)
+ HDprintf("***Express test mode on. Several tests are skipped\n");
}
else {
coll_chunk1();
@@ -2161,8 +1969,16 @@ int main(int argc, char **argv)
coll_chunk2();
MPI_Barrier(MPI_COMM_WORLD);
coll_chunk3();
+ MPI_Barrier(MPI_COMM_WORLD);
+ single_rank_independent_io();
}
+ /* turn off alarm */
+ ALARM_OFF;
+
+ if (mpi_rank_g == 0)
+ HDremove(FILENAME[0]);
+
/* close HDF5 library */
H5close();
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 700e993..954071d 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -16,52 +16,51 @@
*
*/
-#include "h5test.h"
#include "testpar.h"
-#define H5AC_FRIEND /*suppress error about including H5ACpkg */
-#define H5C_FRIEND /*suppress error about including H5Cpkg */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
#include "H5ACpkg.h"
#include "H5Cpkg.h"
+#include "H5CXprivate.h"
#include "H5Fpkg.h"
#include "H5Iprivate.h"
#include "H5MFprivate.h"
-
+#include "H5private.h"
#define BASE_ADDR (haddr_t)1024
-int nerrors = 0;
-int failures = 0;
-hbool_t verbose = TRUE; /* used to control error messages */
+int nerrors = 0;
+int failures = 0;
+hbool_t verbose = TRUE; /* used to control error messages */
#define NFILENAME 2
-#define PARATESTFILE filenames[0]
const char *FILENAME[NFILENAME]={"CacheTestDummy", NULL};
#ifndef PATH_MAX
#define PATH_MAX 512
#endif /* !PATH_MAX */
char filenames[NFILENAME][PATH_MAX];
hid_t fapl; /* file access property list */
-haddr_t max_addr = 0; /* used to store the end of
- * the address space used by
- * the data array (see below).
- */
-hbool_t callbacks_verbose = FALSE; /* flag used to control whether
- * the callback functions are in
- * verbose mode.
- */
+haddr_t max_addr = 0; /* used to store the end of
+ * the address space used by
+ * the data array (see below).
+ */
+hbool_t callbacks_verbose = FALSE; /* flag used to control whether
+ * the callback functions are in
+ * verbose mode.
+ */
-int world_mpi_size = -1;
-int world_mpi_rank = -1;
-int world_server_mpi_rank = -1;
-MPI_Comm world_mpi_comm = MPI_COMM_NULL;
-int file_mpi_size = -1;
-int file_mpi_rank = -1;
-MPI_Comm file_mpi_comm = MPI_COMM_NULL;
+int world_mpi_size = -1;
+int world_mpi_rank = -1;
+int world_server_mpi_rank = -1;
+MPI_Comm world_mpi_comm = MPI_COMM_NULL;
+int file_mpi_size = -1;
+int file_mpi_rank = -1;
+MPI_Comm file_mpi_comm = MPI_COMM_NULL;
/* the following globals are used to maintain rudementary statistics
@@ -74,67 +73,67 @@ long datum_destroys = 0;
long datum_flushes = 0;
long datum_pinned_flushes = 0;
long datum_loads = 0;
-long global_pins = 0;
-long global_dirty_pins = 0;
-long local_pins = 0;
+long global_pins = 0;
+long global_dirty_pins = 0;
+long local_pins = 0;
/* the following fields are used by the server process only */
-int total_reads = 0;
+int total_reads = 0;
int total_writes = 0;
/*****************************************************************************
* struct datum
*
- * Instances of struct datum are used to store information on entries
- * that may be loaded into the cache. The individual fields are
- * discussed below:
+ * Instances of struct datum are used to store information on entries
+ * that may be loaded into the cache. The individual fields are
+ * discussed below:
*
- * header: Instance of H5C_cache_entry_t used by the for its data.
- * This field is only used on the file processes, not on the
- * server process.
+ * header: Instance of H5C_cache_entry_t used by the for its data.
+ * This field is only used on the file processes, not on the
+ * server process.
*
- * This field MUST be the first entry in this structure.
+ * This field MUST be the first entry in this structure.
*
- * base_addr: Base address of the entry.
+ * base_addr: Base address of the entry.
*
- * len: Length of the entry.
+ * len: Length of the entry.
*
- * local_len: Length of the entry according to the cache. This
- * value must be positive, and may not be larger than len.
+ * local_len: Length of the entry according to the cache. This
+ * value must be positive, and may not be larger than len.
*
- * The field exists to allow us change the sizes of entries
- * in the cache without upsetting the server. This value
- * is only used locally, and is never sent to the server.
+ * The field exists to allow us change the sizes of entries
+ * in the cache without upsetting the server. This value
+ * is only used locally, and is never sent to the server.
*
- * ver: Version number of the entry. This number is initialize
- * to zero, and incremented each time the entry is modified.
+ * ver: Version number of the entry. This number is initialize
+ * to zero, and incremented each time the entry is modified.
*
- * dirty: Boolean flag indicating whether the entry is dirty.
+ * dirty: Boolean flag indicating whether the entry is dirty.
*
- * For current purposes, an entry is clean until it is
- * modified, and dirty until written to the server (cache
- * on process 0) or until it is marked clean (all other
- * caches).
+ * For current purposes, an entry is clean until it is
+ * modified, and dirty until written to the server (cache
+ * on process 0) or until it is marked clean (all other
+ * caches).
*
- * valid: Boolean flag indicating whether the entry contains
- * valid data. Attempts to read an entry whose valid
- * flag is not set should trigger an error.
+ * valid: Boolean flag indicating whether the entry contains
+ * valid data. Attempts to read an entry whose valid
+ * flag is not set should trigger an error.
*
- * locked: Boolean flag that is set to true iff the entry is in
- * the cache and locked.
+ * locked: Boolean flag that is set to true iff the entry is in
+ * the cache and locked.
*
- * global_pinned: Boolean flag that is set to true iff the entry has
- * been pinned collectively in all caches. Since writes must
- * be collective across all processes, only entries pinned
- * in this fashion may be marked dirty.
+ * global_pinned: Boolean flag that is set to true iff the entry has
+ * been pinned collectively in all caches. Since writes must
+ * be collective across all processes, only entries pinned
+ * in this fashion may be marked dirty.
*
- * local_pinned: Boolean flag that is set to true iff the entry
- * has been pinned in the local cache, but probably not all
- * caches. Such pins will typically not be consistant across
- * processes, and thus cannot be marked as dirty unless they
- * happen to overlap some collective operation.
+ * local_pinned: Boolean flag that is set to true iff the entry
+ * has been pinned in the local cache, but probably not all
+ * caches. Such pins will typically not be consistant across
+ * processes, and thus cannot be marked as dirty unless they
+ * happen to overlap some collective operation.
*
* cleared: Boolean flag that is set to true whenever the entry is
* dirty, and is cleared via a call to datum_notify with the
@@ -143,61 +142,61 @@ int total_writes = 0;
* flushed: Boolean flag that is set to true whenever the entry is
* dirty, and is flushed by the metadata cache.
*
- * reads: Integer field used to maintain a count of the number of
- * times this entry has been read from the server since
- * the last time the read and write counts were reset.
+ * reads: Integer field used to maintain a count of the number of
+ * times this entry has been read from the server since
+ * the last time the read and write counts were reset.
*
- * writes: Integer field used to maintain a count of the number of
- * times this entry has been written to the server since
- * the last time the read and write counts were reset.
+ * writes: Integer field used to maintain a count of the number of
+ * times this entry has been written to the server since
+ * the last time the read and write counts were reset.
*
- * index: Index of this instance of datum in the data_index[] array
- * discussed below.
+ * index: Index of this instance of datum in the data_index[] array
+ * discussed below.
*
- * aux_ptr: Pointer to the instance of H5AC_aux_t associated with the
- * instance of the metadata cache within which this entry
- * resides. This field was added to allow us to pass this
- * value to the notify callback from the serialize callback.
- * It should be NULL when not in use.
+ * aux_ptr: Pointer to the instance of H5AC_aux_t associated with the
+ * instance of the metadata cache within which this entry
+ * resides. This field was added to allow us to pass this
+ * value to the notify callback from the serialize callback.
+ * It should be NULL when not in use.
*
*****************************************************************************/
struct datum
{
- H5C_cache_entry_t header;
- haddr_t base_addr;
- size_t len;
- size_t local_len;
- int ver;
- hbool_t dirty;
- hbool_t valid;
- hbool_t locked;
- hbool_t global_pinned;
- hbool_t local_pinned;
- hbool_t cleared;
+ H5C_cache_entry_t header;
+ haddr_t base_addr;
+ size_t len;
+ size_t local_len;
+ int ver;
+ hbool_t dirty;
+ hbool_t valid;
+ hbool_t locked;
+ hbool_t global_pinned;
+ hbool_t local_pinned;
+ hbool_t cleared;
hbool_t flushed;
- int reads;
- int writes;
- int index;
+ int reads;
+ int writes;
+ int index;
struct H5AC_aux_t * aux_ptr;
};
/*****************************************************************************
* data array
*
- * The data array is an array of instances of datum of size
- * NUM_DATA_ENTRIES that is used to track the particulars of all
- * the entries that may be loaded into the cache.
+ * The data array is an array of instances of datum of size
+ * NUM_DATA_ENTRIES that is used to track the particulars of all
+ * the entries that may be loaded into the cache.
*
- * It exists on all processes, although the master copy is maintained
- * by the server process. If the cache is performing correctly, all
- * versions should be effectively identical. By that I mean that
- * the data received from the server should always match that in
- * the local version of the data array.
+ * It exists on all processes, although the master copy is maintained
+ * by the server process. If the cache is performing correctly, all
+ * versions should be effectively identical. By that I mean that
+ * the data received from the server should always match that in
+ * the local version of the data array.
*
*****************************************************************************/
-#define NUM_DATA_ENTRIES 100000
+#define NUM_DATA_ENTRIES 100000
struct datum data[NUM_DATA_ENTRIES];
@@ -216,10 +215,12 @@ struct datum data[NUM_DATA_ENTRIES];
* Further, this value must be consistant across all processes.
*/
-#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES
-#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10)
+#define STD_VIRT_NUM_DATA_ENTRIES NUM_DATA_ENTRIES
+#define EXPRESS_VIRT_NUM_DATA_ENTRIES (NUM_DATA_ENTRIES / 10)
/* Use a smaller test size to avoid creating huge MPE logfiles. */
-#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100)
+#ifdef H5_HAVE_MPE
+#define MPE_VIRT_NUM_DATA_ENTIES (NUM_DATA_ENTRIES / 100)
+#endif
int virt_num_data_entries = NUM_DATA_ENTRIES;
@@ -227,14 +228,14 @@ int virt_num_data_entries = NUM_DATA_ENTRIES;
/*****************************************************************************
* data_index array
*
- * The data_index array is an array of integer used to maintain a list
- * of instances of datum in the data array in increasing base_addr order.
+ * The data_index array is an array of integer used to maintain a list
+ * of instances of datum in the data array in increasing base_addr order.
*
- * This array is necessary, as move operations can swap the values
- * of the base_addr fields of two instances of datum. Without this
- * array, we would no longer be able to use a binary search on a sorted
- * list to find the indexes of instances of datum given the values of
- * their base_addr fields.
+ * This array is necessary, as move operations can swap the values
+ * of the base_addr fields of two instances of datum. Without this
+ * array, we would no longer be able to use a binary search on a sorted
+ * list to find the indexes of instances of datum given the values of
+ * their base_addr fields.
*
*****************************************************************************/
@@ -243,99 +244,99 @@ int data_index[NUM_DATA_ENTRIES];
/*****************************************************************************
* The following two #defines are used to control code that is in turn used
- * to force "POSIX" semantics on the server process used to simulate metadata
- * reads and writes. Without some such mechanism, the test code contains
+ * to force "POSIX" semantics on the server process used to simulate metadata
+ * reads and writes. Without some such mechanism, the test code contains
* race conditions that will frequently cause spurious failures.
*
* When set to TRUE, DO_WRITE_REQ_ACK forces the server to send an ack after
- * each write request, and the client to wait until the ack is received
+ * each write request, and the client to wait until the ack is received
* before proceeding. This was my first solution to the problem, and at
* first glance, it would seem to have a lot of unnecessary overhead.
*
* In an attempt to reduce the overhead, I implemented a second solution
- * in which no acks are sent after writes. Instead, the metadata cache is
- * provided with a callback function to call after each sequence of writes.
- * This callback simply causes the client to send the server process a
+ * in which no acks are sent after writes. Instead, the metadata cache is
+ * provided with a callback function to call after each sequence of writes.
+ * This callback simply causes the client to send the server process a
* "sync" message and and await an ack in reply.
*
- * Strangely, at least on Phoenix, the first solution runs faster by a
- * rather large margin. However, I can imagine this changing with
+ * Strangely, at least on Phoenix, the first solution runs faster by a
+ * rather large margin. However, I can imagine this changing with
* different OS's and MPI implementatins.
*
- * Thus I have left code supporting the second solution in place.
+ * Thus I have left code supporting the second solution in place.
*
- * Note that while one of these two #defines must be set to TRUE, there
- * should never be any need to set both of them to TRUE (although the
+ * Note that while one of these two #defines must be set to TRUE, there
+ * should never be any need to set both of them to TRUE (although the
* tests will still function with this setting).
*****************************************************************************/
-#define DO_WRITE_REQ_ACK TRUE
-#define DO_SYNC_AFTER_WRITE FALSE
+#define DO_WRITE_REQ_ACK TRUE
+#define DO_SYNC_AFTER_WRITE FALSE
/*****************************************************************************
* struct mssg
*
- * The mssg structure is used as a generic container for messages to
- * and from the server. Not all fields are used in all cases.
+ * The mssg structure is used as a generic container for messages to
+ * and from the server. Not all fields are used in all cases.
*
- * req: Integer field containing the type of the message.
+ * req: Integer field containing the type of the message.
*
- * src: World communicator MPI rank of the sending process.
+ * src: World communicator MPI rank of the sending process.
*
- * dest: World communicator MPI rank of the destination process.
+ * dest: World communicator MPI rank of the destination process.
*
- * mssg_num: Serial number assigned to the message by the sender.
+ * mssg_num: Serial number assigned to the message by the sender.
*
- * base_addr: Base address of a datum. Not used in all mssgs.
+ * base_addr: Base address of a datum. Not used in all mssgs.
*
- * len: Length of a datum (in bytes). Not used in all mssgs.
+ * len: Length of a datum (in bytes). Not used in all mssgs.
*
- * ver: Version number of a datum. Not used in all mssgs.
+ * ver: Version number of a datum. Not used in all mssgs.
*
- * count: Reported number of total/entry reads/writes. Not used
- * in all mssgs.
+ * count: Reported number of total/entry reads/writes. Not used
+ * in all mssgs.
*
- * magic: Magic number for error detection. Must be set to
- * MSSG_MAGIC.
+ * magic: Magic number for error detection. Must be set to
+ * MSSG_MAGIC.
*
*****************************************************************************/
-#define WRITE_REQ_CODE 0
-#define WRITE_REQ_ACK_CODE 1
-#define READ_REQ_CODE 2
-#define READ_REQ_REPLY_CODE 3
-#define SYNC_REQ_CODE 4
-#define SYNC_ACK_CODE 5
-#define REQ_TTL_WRITES_CODE 6
-#define REQ_TTL_WRITES_RPLY_CODE 7
-#define REQ_TTL_READS_CODE 8
-#define REQ_TTL_READS_RPLY_CODE 9
-#define REQ_ENTRY_WRITES_CODE 10
-#define REQ_ENTRY_WRITES_RPLY_CODE 11
-#define REQ_ENTRY_READS_CODE 12
-#define REQ_ENTRY_READS_RPLY_CODE 13
-#define REQ_RW_COUNT_RESET_CODE 14
-#define REQ_RW_COUNT_RESET_RPLY_CODE 15
-#define DONE_REQ_CODE 16
-#define MAX_REQ_CODE 16
-
-#define MSSG_MAGIC 0x1248
+#define WRITE_REQ_CODE 0
+#define WRITE_REQ_ACK_CODE 1
+#define READ_REQ_CODE 2
+#define READ_REQ_REPLY_CODE 3
+#define SYNC_REQ_CODE 4
+#define SYNC_ACK_CODE 5
+#define REQ_TTL_WRITES_CODE 6
+#define REQ_TTL_WRITES_RPLY_CODE 7
+#define REQ_TTL_READS_CODE 8
+#define REQ_TTL_READS_RPLY_CODE 9
+#define REQ_ENTRY_WRITES_CODE 10
+#define REQ_ENTRY_WRITES_RPLY_CODE 11
+#define REQ_ENTRY_READS_CODE 12
+#define REQ_ENTRY_READS_RPLY_CODE 13
+#define REQ_RW_COUNT_RESET_CODE 14
+#define REQ_RW_COUNT_RESET_RPLY_CODE 15
+#define DONE_REQ_CODE 16
+#define MAX_REQ_CODE 16
+
+#define MSSG_MAGIC 0x1248
struct mssg_t
{
- int req;
- int src;
- int dest;
- long int mssg_num;
- haddr_t base_addr;
- unsigned len;
- int ver;
- unsigned count;
- unsigned magic;
+ int req;
+ int src;
+ int dest;
+ long int mssg_num;
+ haddr_t base_addr;
+ unsigned len;
+ int ver;
+ unsigned count;
+ unsigned magic;
};
-MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */
+MPI_Datatype mpi_mssg_t; /* for MPI derived type created from mssg */
/*****************************************************************************/
@@ -409,24 +410,24 @@ static herr_t datum_notify(H5C_notify_action_t action, void *thing);
static herr_t datum_free_icr(void * thing);
/* Masquerade as object header entries to the cache */
-#define DATUM_ENTRY_TYPE H5AC_OHDR_ID
+#define DATUM_ENTRY_TYPE H5AC_OHDR_ID
-#define NUMBER_OF_ENTRY_TYPES 1
+#define NUMBER_OF_ENTRY_TYPES 1
/* Note the use of the H5AC__CLASS_SKIP_READS and H5AC__CLASS_SKIP_WRITES
* flags. As a result of these flags, the metadata cache does no file I/O
* on metadata of the datum type.
*
- * Instead, this test uses a server process to keep track of who has
+ * Instead, this test uses a server process to keep track of who has
* written and read what, and to verify that there are no messages from
* the past / future.
*
- * In the callbacks for the version 2 cache, this activity was hidden in
+ * In the callbacks for the version 2 cache, this activity was hidden in
* the load and flush callbacks. However, now we handle this function in
* notify callbacks for the after load and after flush events.
*
- * JRM -- 1/13/15
+ * JRM -- 1/13/15
*/
const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
{
@@ -468,15 +469,13 @@ static void lock_and_unlock_random_entry(H5F_t * file_ptr,
static void lock_entry(H5F_t * file_ptr, int32_t idx);
static void mark_entry_dirty(int32_t idx);
static void pin_entry(H5F_t * file_ptr, int32_t idx, hbool_t global, hbool_t dirty);
-#ifdef H5_METADATA_TRACE_FILE
static void pin_protected_entry(int32_t idx, hbool_t global);
-#endif /* H5_METADATA_TRACE_FILE */
static void move_entry(H5F_t * file_ptr, int32_t old_idx, int32_t new_idx);
static hbool_t reset_server_counts(void);
static void resize_entry(int32_t idx, size_t new_size);
-static hbool_t setup_cache_for_test(hid_t * fid_ptr,
+static hbool_t setup_cache_for_test(hid_t * fid_ptr,
H5F_t ** file_ptr_ptr,
- H5C_t ** cache_ptr_ptr,
+ H5C_t ** cache_ptr_ptr,
int metadata_write_strategy);
static void setup_rand(void);
static hbool_t take_down_cache(hid_t fid, H5C_t * cache_ptr);
@@ -550,17 +549,17 @@ print_stats(void)
/*****************************************************************************
*
- * Function: reset_stats()
+ * Function: reset_stats()
*
- * Purpose: Reset the rudementary stats maintained by t_cache.
+ * Purpose: Reset the rudementary stats maintained by t_cache.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 4/17/06
+ * Programmer: JRM -- 4/17/06
*
* Modifications:
*
- * None.
+ * None.
*
*****************************************************************************/
@@ -573,9 +572,9 @@ reset_stats(void)
datum_flushes = 0;
datum_pinned_flushes = 0;
datum_loads = 0;
- global_pins = 0;
- global_dirty_pins = 0;
- local_pins = 0;
+ global_pins = 0;
+ global_dirty_pins = 0;
+ local_pins = 0;
return;
@@ -588,20 +587,20 @@ reset_stats(void)
/*****************************************************************************
*
- * Function: set_up_file_communicator()
+ * Function: set_up_file_communicator()
*
- * Purpose: Create the MPI communicator used to open a HDF5 file with.
- * In passing, also initialize the file_mpi... globals.
+ * Purpose: Create the MPI communicator used to open a HDF5 file with.
+ * In passing, also initialize the file_mpi... globals.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 11/16/05
+ * Programmer: JRM -- 11/16/05
*
* Modifications:
*
- * None.
+ * None.
*
*****************************************************************************/
@@ -623,8 +622,8 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
- if ( verbose ) {
- fprintf(stdout,
+ if ( verbose ) {
+ HDfprintf(stdout,
"%d:%s: MPI_Comm_group() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -643,7 +642,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Group_excl() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -660,7 +659,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Comm_create() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -674,7 +673,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: file_mpi_comm == MPI_COMM_NULL.\n",
world_mpi_rank, FUNC);
}
@@ -688,7 +687,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: file_mpi_comm != MPI_COMM_NULL.\n",
world_mpi_rank, FUNC);
}
@@ -706,7 +705,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Comm_size() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -722,7 +721,7 @@ set_up_file_communicator(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: MPI_Comm_rank() failed with error %d.\n",
world_mpi_rank, FUNC, mpi_result);
}
@@ -740,16 +739,16 @@ set_up_file_communicator(void)
/*****************************************************************************
*
- * Function: addr_to_datum_index()
+ * Function: addr_to_datum_index()
*
- * Purpose: Given the base address of a datum, find and return its index
- * in the data array.
+ * Purpose: Given the base address of a datum, find and return its index
+ * in the data array.
*
- * Return: Success: index of target datum.
+ * Return: Success: index of target datum.
*
- * Failure: -1.
+ * Failure: -1.
*
- * Programmer: JRM -- 12/20/05
+ * Programmer: JRM -- 12/20/05
*
*****************************************************************************/
static int
@@ -787,16 +786,16 @@ addr_to_datum_index(haddr_t base_addr)
/*****************************************************************************
*
- * Function: init_data()
+ * Function: init_data()
*
- * Purpose: Initialize the data array, from which cache entries are
- * loaded.
+ * Purpose: Initialize the data array, from which cache entries are
+ * loaded.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/20/05
+ * Programmer: JRM -- 12/20/05
*
*****************************************************************************/
static void
@@ -831,14 +830,14 @@ init_data(void)
data[i].dirty = FALSE;
data[i].valid = FALSE;
data[i].locked = FALSE;
- data[i].global_pinned = FALSE;
- data[i].local_pinned = FALSE;
- data[i].cleared = FALSE;
+ data[i].global_pinned = FALSE;
+ data[i].local_pinned = FALSE;
+ data[i].cleared = FALSE;
data[i].flushed = FALSE;
data[i].reads = 0;
data[i].writes = 0;
- data[i].index = i;
- data[i].aux_ptr = NULL;
+ data[i].index = i;
+ data[i].aux_ptr = NULL;
data_index[i] = i;
@@ -862,22 +861,22 @@ init_data(void)
/*****************************************************************************
*
- * Function: do_express_test()
+ * Function: do_express_test()
*
- * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
- * by GetTestExpress() across all processes. Return this
- * value.
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
+ * by GetTestExpress() across all processes. Return this
+ * value.
*
- * Envirmoment variables can be different across different
- * processes. This function ensures that all processes agree
- * on whether to do an express test.
+ * Envirmoment variables can be different across different
+ * processes. This function ensures that all processes agree
+ * on whether to do an express test.
*
- * Return: Success: Maximum of the values returned by
- * GetTestExpress() across all processes.
+ * Return: Success: Maximum of the values returned by
+ * GetTestExpress() across all processes.
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: JRM -- 4/25/06
+ * Programmer: JRM -- 4/25/06
*
*****************************************************************************/
static int
@@ -913,19 +912,19 @@ do_express_test(void)
/*****************************************************************************
*
- * Function: do_sync()
+ * Function: do_sync()
*
- * Purpose: Ensure that all messages sent by this process have been
- * processed before proceeding.
+ * Purpose: Ensure that all messages sent by this process have been
+ * processed before proceeding.
*
- * Do this by exchanging sync req / sync ack messages with
- * the server.
+ * Do this by exchanging sync req / sync ack messages with
+ * the server.
*
- * Do nothing if nerrors is greater than zero.
+ * Do nothing if nerrors is greater than zero.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 5/10/06
+ * Programmer: JRM -- 5/10/06
*
*****************************************************************************/
static void
@@ -937,7 +936,7 @@ do_sync(void)
if ( nerrors <= 0 ) {
/* compose the message */
- mssg.req = SYNC_REQ_CODE;
+ mssg.req = SYNC_REQ_CODE;
mssg.src = world_mpi_rank;
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
@@ -947,10 +946,10 @@ do_sync(void)
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
- if ( ! send_mssg(&mssg, FALSE) ) {
+ if ( ! send_mssg(&mssg, FALSE) ) {
- nerrors++;
- if ( verbose ) {
+ nerrors++;
+ if ( verbose ) {
HDfprintf(stdout, "%d:%s: send_mssg() failed.\n",
world_mpi_rank, FUNC);
}
@@ -959,24 +958,24 @@ do_sync(void)
if ( nerrors <= 0 ) {
- if ( ! recv_mssg(&mssg, SYNC_ACK_CODE) ) {
+ if ( ! recv_mssg(&mssg, SYNC_ACK_CODE) ) {
nerrors++;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: recv_mssg() failed.\n",
world_mpi_rank, FUNC);
}
- } else if ( ( mssg.req != SYNC_ACK_CODE ) ||
+ } else if ( ( mssg.req != SYNC_ACK_CODE ) ||
( mssg.src != world_server_mpi_rank ) ||
( mssg.dest != world_mpi_rank ) ||
- ( mssg.magic != MSSG_MAGIC ) ) {
+ ( mssg.magic != MSSG_MAGIC ) ) {
nerrors++;
- if ( verbose ) {
+ if ( verbose ) {
HDfprintf(stdout, "%d:%s: Bad data in sync ack.\n",
world_mpi_rank, FUNC);
}
- }
+ }
}
return;
@@ -986,17 +985,17 @@ do_sync(void)
/*****************************************************************************
*
- * Function: get_max_nerrors()
+ * Function: get_max_nerrors()
*
- * Purpose: Do an MPI_Allreduce to obtain the maximum value of nerrors
- * across all processes. Return this value.
+ * Purpose: Do an MPI_Allreduce to obtain the maximum value of nerrors
+ * across all processes. Return this value.
*
- * Return: Success: Maximum of the nerrors global variables across
- * all processes.
+ * Return: Success: Maximum of the nerrors global variables across
+ * all processes.
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: JRM -- 1/3/06
+ * Programmer: JRM -- 1/3/06
*
*****************************************************************************/
static int
@@ -1033,29 +1032,29 @@ get_max_nerrors(void)
/*****************************************************************************
*
- * Function: recv_mssg()
+ * Function: recv_mssg()
*
- * Purpose: Receive a message from any process in the provided instance
- * of struct mssg.
+ * Purpose: Receive a message from any process in the provided instance
+ * of struct mssg.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
* Modifications:
*
- * JRM -- 5/10/06
- * Added mssg_tag_offset parameter and supporting code.
+ * JRM -- 5/10/06
+ * Added mssg_tag_offset parameter and supporting code.
*
*****************************************************************************/
-#define CACHE_TEST_TAG 99 /* different from any used by the library */
+#define CACHE_TEST_TAG 99 /* different from any used by the library */
static hbool_t
recv_mssg(struct mssg_t *mssg_ptr,
- int mssg_tag_offset)
+ int mssg_tag_offset)
{
hbool_t success = TRUE;
int mssg_tag = CACHE_TEST_TAG;
@@ -1117,28 +1116,28 @@ recv_mssg(struct mssg_t *mssg_ptr,
/*****************************************************************************
*
- * Function: send_mssg()
+ * Function: send_mssg()
*
- * Purpose: Send the provided instance of mssg to the indicated target.
+ * Purpose: Send the provided instance of mssg to the indicated target.
*
- * Note that all source and destination ranks are in the
- * global communicator.
+ * Note that all source and destination ranks are in the
+ * global communicator.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
* Modifications:
*
- * JRM -- 5/10/06
- * Added the add_req_to_tag parameter and supporting code.
+ * JRM -- 5/10/06
+ * Added the add_req_to_tag parameter and supporting code.
*
*****************************************************************************/
static hbool_t
send_mssg(struct mssg_t *mssg_ptr,
- hbool_t add_req_to_tag)
+ hbool_t add_req_to_tag)
{
hbool_t success = TRUE;
int mssg_tag = CACHE_TEST_TAG;
@@ -1166,10 +1165,10 @@ send_mssg(struct mssg_t *mssg_ptr,
mssg_ptr->mssg_num = mssg_num++;
- if ( add_req_to_tag ) {
+ if ( add_req_to_tag ) {
- mssg_tag += mssg_ptr->req;
- }
+ mssg_tag += mssg_ptr->req;
+ }
result = MPI_Send((void *)mssg_ptr, 1, mpi_mssg_t,
mssg_ptr->dest, mssg_tag, world_mpi_comm);
@@ -1189,19 +1188,19 @@ send_mssg(struct mssg_t *mssg_ptr,
} /* send_mssg() */
-
+
/*****************************************************************************
*
- * Function: setup_derived_types()
+ * Function: setup_derived_types()
*
- * Purpose: Set up the derived types used by the test bed. At present,
- * only the mpi_mssg derived type is needed.
+ * Purpose: Set up the derived types used by the test bed. At present,
+ * only the mpi_mssg derived type is needed.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
*****************************************************************************/
static hbool_t
@@ -1218,20 +1217,20 @@ setup_derived_types(void)
struct mssg_t sample; /* used to compute displacements */
/* setup the displacements array */
- if ( ( MPI_SUCCESS != MPI_Address(&sample.req, &displs[0]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.src, &displs[1]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.dest, &displs[2]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.mssg_num, &displs[3]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.base_addr, &displs[4]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.len, &displs[5]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.ver, &displs[6]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.count, &displs[7]) ) ||
- ( MPI_SUCCESS != MPI_Address(&sample.magic, &displs[8]) ) ) {
+ if ( ( MPI_SUCCESS != MPI_Get_address(&sample.req, &displs[0]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.src, &displs[1]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.dest, &displs[2]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.mssg_num, &displs[3]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.base_addr, &displs[4]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.len, &displs[5]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.ver, &displs[6]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.count, &displs[7]) ) ||
+ ( MPI_SUCCESS != MPI_Get_address(&sample.magic, &displs[8]) ) ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Address() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Get_address() call failed.\n",
world_mpi_rank, FUNC);
}
@@ -1246,14 +1245,14 @@ setup_derived_types(void)
if ( success ) {
- result = MPI_Type_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
+ result = MPI_Type_create_struct(9, block_len, displs, mpi_types, &mpi_mssg_t);
if ( result != MPI_SUCCESS ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: MPI_Type_struct() call failed.\n",
+ HDfprintf(stdout, "%d:%s: MPI_Type_create_struct() call failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -1278,19 +1277,19 @@ setup_derived_types(void)
} /* setup_derived_types */
-
+
/*****************************************************************************
*
- * Function: takedown_derived_types()
+ * Function: takedown_derived_types()
*
- * Purpose: take down the derived types used by the test bed. At present,
- * only the mpi_mssg derived type is needed.
+ * Purpose: take down the derived types used by the test bed. At present,
+ * only the mpi_mssg derived type is needed.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
*****************************************************************************/
static hbool_t
@@ -1322,16 +1321,16 @@ takedown_derived_types(void)
/*****************************************************************************
*
- * Function: reset_server_counters()
+ * Function: reset_server_counters()
*
- * Purpose: Reset the counters maintained by the server, doing a
- * sanity check in passing.
+ * Purpose: Reset the counters maintained by the server, doing a
+ * sanity check in passing.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -1363,7 +1362,7 @@ reset_server_counters(void)
nerrors++;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: actual/total reads mismatch (%ld/%ld).\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
actual_total_reads, total_reads);
}
}
@@ -1374,7 +1373,7 @@ reset_server_counters(void)
nerrors++;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: actual/total writes mismatch (%ld/%ld).\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
actual_total_writes, total_writes);
}
}
@@ -1389,25 +1388,25 @@ reset_server_counters(void)
/*****************************************************************************
*
- * Function: server_main()
+ * Function: server_main()
*
- * Purpose: Main function for the server process. This process exists
- * to provide an independant view of the data array.
+ * Purpose: Main function for the server process. This process exists
+ * to provide an independant view of the data array.
*
- * The function handles request from the other processes in
- * the test until the count of done messages received equals
- * the number of client processes.
+ * The function handles request from the other processes in
+ * the test until the count of done messages received equals
+ * the number of client processes.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
* Modifications:
*
- * JRM -- 5/10/06
- * Updated for sync message.
+ * JRM -- 5/10/06
+ * Updated for sync message.
*
*****************************************************************************/
static hbool_t
@@ -1437,98 +1436,98 @@ server_main(void)
switch ( mssg.req )
{
- case WRITE_REQ_CODE:
- success = serve_write_request(&mssg);
- break;
+ case WRITE_REQ_CODE:
+ success = serve_write_request(&mssg);
+ break;
- case WRITE_REQ_ACK_CODE:
+ case WRITE_REQ_ACK_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received write ack?!?.\n", FUNC);
- break;
+ break;
- case READ_REQ_CODE:
+ case READ_REQ_CODE:
success = serve_read_request(&mssg);
- break;
+ break;
- case READ_REQ_REPLY_CODE:
+ case READ_REQ_REPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received read req reply?!?.\n", FUNC);
- break;
+ break;
- case SYNC_REQ_CODE:
+ case SYNC_REQ_CODE:
success = serve_sync_request(&mssg);
- break;
+ break;
- case SYNC_ACK_CODE:
+ case SYNC_ACK_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received sync ack?!?.\n", FUNC);
- break;
+ break;
- case REQ_TTL_WRITES_CODE:
- success = serve_total_writes_request(&mssg);
- break;
+ case REQ_TTL_WRITES_CODE:
+ success = serve_total_writes_request(&mssg);
+ break;
- case REQ_TTL_WRITES_RPLY_CODE:
+ case REQ_TTL_WRITES_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received total writes reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_TTL_READS_CODE:
- success = serve_total_reads_request(&mssg);
- break;
+ case REQ_TTL_READS_CODE:
+ success = serve_total_reads_request(&mssg);
+ break;
- case REQ_TTL_READS_RPLY_CODE:
+ case REQ_TTL_READS_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received total reads reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_ENTRY_WRITES_CODE:
- success = serve_entry_writes_request(&mssg);
- break;
+ case REQ_ENTRY_WRITES_CODE:
+ success = serve_entry_writes_request(&mssg);
+ break;
- case REQ_ENTRY_WRITES_RPLY_CODE:
+ case REQ_ENTRY_WRITES_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received entry writes reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_ENTRY_READS_CODE:
- success = serve_entry_reads_request(&mssg);
- break;
+ case REQ_ENTRY_READS_CODE:
+ success = serve_entry_reads_request(&mssg);
+ break;
- case REQ_ENTRY_READS_RPLY_CODE:
+ case REQ_ENTRY_READS_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received entry reads reply?!?.\n", FUNC);
- break;
+ break;
- case REQ_RW_COUNT_RESET_CODE:
- success = serve_rw_count_reset_request(&mssg);
- break;
+ case REQ_RW_COUNT_RESET_CODE:
+ success = serve_rw_count_reset_request(&mssg);
+ break;
- case REQ_RW_COUNT_RESET_RPLY_CODE:
+ case REQ_RW_COUNT_RESET_RPLY_CODE:
success = FALSE;
if(verbose)
HDfprintf(stdout, "%s: Received RW count reset reply?!?.\n", FUNC);
- break;
+ break;
- case DONE_REQ_CODE:
- done_count++;
- if(done_count >= file_mpi_size)
- done = TRUE;
- break;
+ case DONE_REQ_CODE:
+ done_count++;
+ if(done_count >= file_mpi_size)
+ done = TRUE;
+ break;
- default:
+ default:
nerrors++;
success = FALSE;
if(verbose)
- HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, FUNC);
- break;
+ HDfprintf(stdout, "%d:%s: Unknown request code.\n", world_mpi_rank, FUNC);
+ break;
}
}
}
@@ -1537,23 +1536,23 @@ server_main(void)
} /* server_main() */
-
+
/*****************************************************************************
*
- * Function: serve_read_request()
+ * Function: serve_read_request()
*
- * Purpose: Serve a read request.
+ * Purpose: Serve a read request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * a copy of the indicated datum from the data array to
- * the requesting process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * a copy of the indicated datum from the data array to
+ * the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/22/05
+ * Programmer: JRM -- 12/22/05
*
*****************************************************************************/
static hbool_t
@@ -1609,8 +1608,8 @@ serve_read_request(struct mssg_t * mssg_ptr)
"%d:%s: proc %d read invalid entry. idx/base_addr = %d/%a.\n",
world_mpi_rank, FUNC,
mssg_ptr->src,
- target_index,
- data[target_index].base_addr);
+ target_index,
+ data[target_index].base_addr);
}
} else {
@@ -1620,13 +1619,13 @@ serve_read_request(struct mssg_t * mssg_ptr)
reply.dest = mssg_ptr->src;
reply.mssg_num = -1; /* set by send function */
reply.base_addr = data[target_index].base_addr;
- reply.len = data[target_index].len;
+ H5_CHECKED_ASSIGN(reply.len, unsigned, data[target_index].len, size_t);
reply.ver = data[target_index].ver;
- reply.count = 0;
+ reply.count = 0;
reply.magic = MSSG_MAGIC;
- /* and update the counters */
- total_reads++;
+ /* and update the counters */
+ total_reads++;
(data[target_index].reads)++;
}
}
@@ -1641,7 +1640,7 @@ serve_read_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d read 0x%llx. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(int)(data[target_index].len),
(int)(data[target_index].ver));
@@ -1649,38 +1648,38 @@ serve_read_request(struct mssg_t * mssg_ptr)
} else {
HDfprintf(stdout, "%d read 0x%llx FAILED. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(int)(data[target_index].len),
(int)(data[target_index].ver));
}
- }
+ }
return(success);
} /* serve_read_request() */
-
+
/*****************************************************************************
*
- * Function: serve_sync_request()
+ * Function: serve_sync_request()
*
- * Purpose: Serve a sync request.
+ * Purpose: Serve a sync request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends a
- * sync ack to the requesting process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends a
+ * sync ack to the requesting process.
*
- * This service exist to allow the sending process to ensure
- * that all previous messages have been processed before
- * proceeding.
+ * This service exist to allow the sending process to ensure
+ * that all previous messages have been processed before
+ * proceeding.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/10/06
+ * Programmer: JRM -- 5/10/06
*
*****************************************************************************/
static hbool_t
@@ -1712,7 +1711,7 @@ serve_sync_request(struct mssg_t * mssg_ptr)
reply.base_addr = 0;
reply.len = 0;
reply.ver = 0;
- reply.count = 0;
+ reply.count = 0;
reply.magic = MSSG_MAGIC;
}
@@ -1732,29 +1731,29 @@ serve_sync_request(struct mssg_t * mssg_ptr)
HDfprintf(stdout, "%d sync FAILED.\n", (int)(mssg_ptr->src));
}
- }
+ }
return(success);
} /* serve_sync_request() */
-
+
/*****************************************************************************
*
- * Function: serve_write_request()
+ * Function: serve_write_request()
*
- * Purpose: Serve a write request.
+ * Purpose: Serve a write request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it updates
- * the version number of the target data array entry as
- * specified in the message.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it updates
+ * the version number of the target data array entry as
+ * specified in the message.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/21/05
+ * Programmer: JRM -- 12/21/05
*
*****************************************************************************/
static hbool_t
@@ -1763,7 +1762,7 @@ serve_write_request(struct mssg_t * mssg_ptr)
hbool_t report_mssg = FALSE;
hbool_t success = TRUE;
int target_index;
- int new_ver_num;
+ int new_ver_num = 0;
haddr_t target_addr;
#if DO_WRITE_REQ_ACK
struct mssg_t reply;
@@ -1812,7 +1811,7 @@ serve_write_request(struct mssg_t * mssg_ptr)
new_ver_num = mssg_ptr->ver;
/* this check should catch duplicate writes */
- if ( new_ver_num <= data[target_index].ver ) {
+ if ( new_ver_num <= data[target_index].ver ) {
nerrors++;
success = FALSE;
@@ -1826,12 +1825,12 @@ serve_write_request(struct mssg_t * mssg_ptr)
if ( success ) {
- /* process the write */
+ /* process the write */
data[target_index].ver = new_ver_num;
data[target_index].valid = TRUE;
/* and update the counters */
- total_writes++;
+ total_writes++;
(data[target_index].writes)++;
#if DO_WRITE_REQ_ACK
@@ -1842,12 +1841,12 @@ serve_write_request(struct mssg_t * mssg_ptr)
reply.dest = mssg_ptr->src;
reply.mssg_num = -1; /* set by send function */
reply.base_addr = data[target_index].base_addr;
- reply.len = data[target_index].len;
+ H5_CHECKED_ASSIGN(reply.len, unsigned, data[target_index].len, size_t);
reply.ver = data[target_index].ver;
reply.count = 0;
reply.magic = MSSG_MAGIC;
- /* and send it */
+ /* and send it */
success = send_mssg(&reply, TRUE);
#endif /* DO_WRITE_REQ_ACK */
@@ -1859,7 +1858,7 @@ serve_write_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d write 0x%llx. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(int)(data[target_index].len),
(int)(data[target_index].ver));
@@ -1867,36 +1866,36 @@ serve_write_request(struct mssg_t * mssg_ptr)
} else {
HDfprintf(stdout, "%d write 0x%llx FAILED. len = %d. ver = %d.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(int)(data[target_index].len),
(int)(data[target_index].ver));
}
- }
+ }
return(success);
} /* serve_write_request() */
-
+
/*****************************************************************************
*
- * Function: serve_total_writes_request()
+ * Function: serve_total_writes_request()
*
- * Purpose: Serve a request for the total number of writes recorded since
- * the last reset.
+ * Purpose: Serve a request for the total number of writes recorded since
+ * the last reset.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * the current value of the total_writes global variable to
- * the requesting process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * the current value of the total_writes global variable to
+ * the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -1928,7 +1927,7 @@ serve_total_writes_request(struct mssg_t * mssg_ptr)
reply.base_addr = 0;
reply.len = 0;
reply.ver = 0;
- reply.count = total_writes;
+ reply.count = (unsigned)total_writes;
reply.magic = MSSG_MAGIC;
}
@@ -1942,40 +1941,40 @@ serve_total_writes_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d request total writes %ld.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
total_writes);
} else {
HDfprintf(stdout, "%d request total writes %ld -- FAILED.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
total_writes);
}
- }
+ }
return(success);
} /* serve_total_writes_request() */
-
+
/*****************************************************************************
*
- * Function: serve_total_reads_request()
+ * Function: serve_total_reads_request()
*
- * Purpose: Serve a request for the total number of reads recorded since
- * the last reset.
+ * Purpose: Serve a request for the total number of reads recorded since
+ * the last reset.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * the current value of the total_reads global variable to
- * the requesting process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * the current value of the total_reads global variable to
+ * the requesting process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -2007,7 +2006,7 @@ serve_total_reads_request(struct mssg_t * mssg_ptr)
reply.base_addr = 0;
reply.len = 0;
reply.ver = 0;
- reply.count = total_reads;
+ reply.count = (unsigned)total_reads;
reply.magic = MSSG_MAGIC;
}
@@ -2021,40 +2020,40 @@ serve_total_reads_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d request total reads %ld.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
total_reads);
} else {
HDfprintf(stdout, "%d request total reads %ld -- FAILED.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
total_reads);
}
- }
+ }
return(success);
} /* serve_total_reads_request() */
-
+
/*****************************************************************************
*
- * Function: serve_entry_writes_request()
+ * Function: serve_entry_writes_request()
*
- * Purpose: Serve an entry writes request.
+ * Purpose: Serve an entry writes request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * the number of times that the indicated datum has been
- * written since the last counter reset to the requesting
- * process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * the number of times that the indicated datum has been
+ * written since the last counter reset to the requesting
+ * process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -2101,7 +2100,7 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr)
reply.base_addr = target_addr;
reply.len = 0;
reply.ver = 0;
- reply.count = data[target_index].writes;
+ reply.count = (unsigned)data[target_index].writes;
reply.magic = MSSG_MAGIC;
}
}
@@ -2116,42 +2115,42 @@ serve_entry_writes_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d request entry 0x%llx writes = %ld.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(long)(data[target_index].writes));
} else {
HDfprintf(stdout, "%d request entry 0x%llx writes = %ld FAILED.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(long)(data[target_index].writes));
}
- }
+ }
return(success);
} /* serve_entry_writes_request() */
-
+
/*****************************************************************************
*
- * Function: serve_entry_reads_request()
+ * Function: serve_entry_reads_request()
*
- * Purpose: Serve an entry reads request.
+ * Purpose: Serve an entry reads request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it sends
- * the number of times that the indicated datum has been
- * read since the last counter reset to the requesting
- * process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it sends
+ * the number of times that the indicated datum has been
+ * read since the last counter reset to the requesting
+ * process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -2198,7 +2197,7 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr)
reply.base_addr = target_addr;
reply.len = 0;
reply.ver = 0;
- reply.count = (long)(data[target_index].reads);
+ reply.count = (unsigned)(data[target_index].reads);
reply.magic = MSSG_MAGIC;
}
}
@@ -2213,41 +2212,41 @@ serve_entry_reads_request(struct mssg_t * mssg_ptr)
if ( success ) {
HDfprintf(stdout, "%d request entry 0x%llx reads = %ld.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(long)(data[target_index].reads));
} else {
HDfprintf(stdout, "%d request entry 0x%llx reads = %ld FAILED.\n",
- (int)(mssg_ptr->src),
+ (int)(mssg_ptr->src),
(long long)(data[target_index].base_addr),
(long)(data[target_index].reads));
}
- }
+ }
return(success);
} /* serve_entry_reads_request() */
-
+
/*****************************************************************************
*
- * Function: serve_rw_count_reset_request()
+ * Function: serve_rw_count_reset_request()
*
- * Purpose: Serve read/write count reset request.
+ * Purpose: Serve read/write count reset request.
*
- * The function accepts a pointer to an instance of struct
- * mssg_t as input. If all sanity checks pass, it resets the
- * read/write counters, and sends a confirmation message to
- * the calling process.
+ * The function accepts a pointer to an instance of struct
+ * mssg_t as input. If all sanity checks pass, it resets the
+ * read/write counters, and sends a confirmation message to
+ * the calling process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/5/10
+ * Programmer: JRM -- 5/5/10
*
*****************************************************************************/
static hbool_t
@@ -2272,7 +2271,7 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
if ( success ) {
success = reset_server_counters();
- }
+ }
if ( success ) {
@@ -2306,7 +2305,7 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
(int)(mssg_ptr->src));
}
- }
+ }
return(success);
@@ -2317,15 +2316,15 @@ serve_rw_count_reset_request(struct mssg_t * mssg_ptr)
/**************************** Call back functions ****************************/
/*****************************************************************************/
-
+
/*-------------------------------------------------------------------------
- * Function: datum_get_initial_load_size
+ * Function: datum_get_initial_load_size
*
- * Purpose: Query the image size for an entry before deserializing it
+ * Purpose: Query the image size for an entry before deserializing it
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
- * Programmer: Quincey Koziol
+ * Programmer: Quincey Koziol
* 5/18/10
*
*-------------------------------------------------------------------------
@@ -2355,9 +2354,9 @@ datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr)
if ( callbacks_verbose ) {
HDfprintf(stdout,
- "%d: get_initial_load_size() idx = %d, addr = %ld, len = %d.\n",
+ "%d: get_initial_load_size() idx = %d, addr = %ld, len = %d.\n",
world_mpi_rank, idx, (long)addr, (int)entry_ptr->local_len);
- fflush(stdout);
+ fflush(stdout);
}
/* Set image length size */
@@ -2366,21 +2365,21 @@ datum_get_initial_load_size(void *udata_ptr, size_t *image_len_ptr)
return(SUCCEED);
} /* get_initial_load_size() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_deserialize
+ * Function: datum_deserialize
*
- * Purpose: deserialize the entry.
+ * Purpose: deserialize the entry.
*
- * Return: void * (pointer to the in core representation of the entry)
+ * Return: void * (pointer to the in core representation of the entry)
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 9/20/07
*
*-------------------------------------------------------------------------
*/
static void *
-datum_deserialize(const void * image_ptr,
+datum_deserialize(const void H5_ATTR_NDEBUG_UNUSED *image_ptr,
H5_ATTR_UNUSED size_t len,
void * udata_ptr,
hbool_t * dirty_ptr)
@@ -2409,10 +2408,10 @@ datum_deserialize(const void * image_ptr,
if ( callbacks_verbose ) {
HDfprintf(stdout,
- "%d: deserialize() idx = %d, addr = %ld, len = %d, is_dirty = %d.\n",
- world_mpi_rank, idx, (long)addr, (int)len,
- (int)(entry_ptr->header.is_dirty));
- fflush(stdout);
+ "%d: deserialize() idx = %d, addr = %ld, len = %d, is_dirty = %d.\n",
+ world_mpi_rank, idx, (long)addr, (int)len,
+ (int)(entry_ptr->header.is_dirty));
+ fflush(stdout);
}
*dirty_ptr = FALSE;
@@ -2427,18 +2426,18 @@ datum_deserialize(const void * image_ptr,
} /* deserialize() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_image_len
+ * Function: datum_image_len
*
- * Purpose: Return the real (and possibly reduced) length of the image.
- * The helper functions verify that the correct version of
- * deserialize is being called, and then call deserialize
- * proper.
+ * Purpose: Return the real (and possibly reduced) length of the image.
+ * The helper functions verify that the correct version of
+ * deserialize is being called, and then call deserialize
+ * proper.
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 9/19/07
*
*-------------------------------------------------------------------------
@@ -2465,10 +2464,10 @@ datum_image_len(const void *thing, size_t *image_len)
if(callbacks_verbose) {
HDfprintf(stdout,
- "%d: image_len() idx = %d, addr = %ld, len = %d.\n",
- world_mpi_rank, idx, (long)(entry_ptr->base_addr),
- (int)(entry_ptr->local_len));
- fflush(stdout);
+ "%d: image_len() idx = %d, addr = %ld, len = %d.\n",
+ world_mpi_rank, idx, (long)(entry_ptr->base_addr),
+ (int)(entry_ptr->local_len));
+ fflush(stdout);
}
HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
@@ -2478,29 +2477,28 @@ datum_image_len(const void *thing, size_t *image_len)
return(SUCCEED);
} /* datum_image_len() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_serialize
+ * Function: datum_serialize
*
- * Purpose: Serialize the supplied entry.
+ * Purpose: Serialize the supplied entry.
*
- * Return: SUCCEED if successful, FAIL otherwise.
+ * Return: SUCCEED if successful, FAIL otherwise.
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 10/30/07
*
*-------------------------------------------------------------------------
*/
static herr_t
datum_serialize(const H5F_t *f,
- void *image_ptr,
+ void H5_ATTR_NDEBUG_UNUSED *image_ptr,
size_t len,
void *thing_ptr)
{
herr_t ret_value = SUCCEED;
int idx;
struct datum * entry_ptr;
- H5C_t * cache_ptr;
struct H5AC_aux_t * aux_ptr;
HDassert( thing_ptr );
@@ -2511,11 +2509,8 @@ datum_serialize(const H5F_t *f,
HDassert( f );
HDassert( f->shared );
HDassert( f->shared->cache );
-
- cache_ptr = f->shared->cache;
-
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( cache_ptr->aux_ptr );
+ HDassert( f->shared->cache->magic == H5C__H5C_T_MAGIC );
+ HDassert( f->shared->cache->aux_ptr );
aux_ptr = (H5AC_aux_t *)(f->shared->cache->aux_ptr);
@@ -2534,9 +2529,9 @@ datum_serialize(const H5F_t *f,
if ( callbacks_verbose ) {
HDfprintf(stdout,
- "%d: serialize() idx = %d, addr = %ld, len = %d.\n",
- world_mpi_rank, idx, (long)entry_ptr->header.addr, (int)len);
- fflush(stdout);
+ "%d: serialize() idx = %d, addr = %ld, len = %d.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr, (int)len);
+ fflush(stdout);
}
HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
@@ -2557,16 +2552,16 @@ datum_serialize(const H5F_t *f,
} /* datum_serialize() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_notify
+ * Function: datum_notify
*
- * Purpose: Do the communication with the server we used to do in the
- * flush and load callbacks in the version 2 cache.
+ * Purpose: Do the communication with the server we used to do in the
+ * flush and load callbacks in the version 2 cache.
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 1/12/15
*
*-------------------------------------------------------------------------
@@ -2596,7 +2591,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
HDfprintf(stdout,
"%d: notify() action = %d, idx = %d, addr = %ld.\n",
- world_mpi_rank, (int) action, idx,
+ world_mpi_rank, (int) action, idx,
(long)entry_ptr->header.addr);
fflush(stdout);
}
@@ -2638,7 +2633,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
mssg.base_addr = entry_ptr->base_addr;
- mssg.len = entry_ptr->len;
+ H5_CHECKED_ASSIGN(mssg.len, unsigned, entry_ptr->len, size_t);
mssg.ver = 0; /* bogus -- should be corrected by server */
mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
@@ -2685,62 +2680,62 @@ datum_notify(H5C_notify_action_t action, void *thing)
}
#if 0 /* This has been useful debugging code -- keep it for now. */
- if ( mssg.req != READ_REQ_REPLY_CODE ) {
+ if ( mssg.req != READ_REQ_REPLY_CODE ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.req != READ_REQ_REPLY_CODE.\n",
- world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.req = %d.\n",
- world_mpi_rank, FUNC, (int)(mssg.req));
- }
+ world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg.req = %d.\n",
+ world_mpi_rank, FUNC, (int)(mssg.req));
+ }
- if ( mssg.src != world_server_mpi_rank ) {
+ if ( mssg.src != world_server_mpi_rank ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.src != world_server_mpi_rank.\n",
- world_mpi_rank, FUNC);
- }
+ world_mpi_rank, FUNC);
+ }
- if ( mssg.dest != world_mpi_rank ) {
+ if ( mssg.dest != world_mpi_rank ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.dest != world_mpi_rank.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, FUNC);
}
- if ( mssg.base_addr != entry_ptr->base_addr ) {
+ if ( mssg.base_addr != entry_ptr->base_addr ) {
- HDfprintf(stdout,
- "%d:%s: mssg.base_addr != entry_ptr->base_addr.\n",
- world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n",
- world_mpi_rank, FUNC, mssg.base_addr);
- HDfprintf(stdout,
+ HDfprintf(stdout,
+ "%d:%s: mssg.base_addr != entry_ptr->base_addr.\n",
+ world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg.base_addr = %a.\n",
+ world_mpi_rank, FUNC, mssg.base_addr);
+ HDfprintf(stdout,
"%d:%s: entry_ptr->base_addr = %a.\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
entry_ptr->base_addr);
}
- if ( mssg.len != entry_ptr->len ) {
+ if ( mssg.len != entry_ptr->len ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.len != entry_ptr->len.\n",
- world_mpi_rank, FUNC);
- HDfprintf(stdout, "%d:%s: mssg.len = %a.\n",
- world_mpi_rank, FUNC, mssg.len);
+ world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg.len = %a.\n",
+ world_mpi_rank, FUNC, mssg.len);
}
- if ( mssg.ver < entry_ptr->ver ) {
+ if ( mssg.ver < entry_ptr->ver ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: mssg.ver < entry_ptr->ver.\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, FUNC);
}
- if ( mssg.magic != MSSG_MAGIC ) {
+ if ( mssg.magic != MSSG_MAGIC ) {
- HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: mssg.magic != MSSG_MAGIC.\n",
+ world_mpi_rank, FUNC);
}
#endif /* JRM */
@@ -2753,7 +2748,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
}
break;
- case H5C_NOTIFY_ACTION_AFTER_FLUSH:
+ case H5C_NOTIFY_ACTION_AFTER_FLUSH:
if ( callbacks_verbose ) {
HDfprintf(stdout,
@@ -2767,11 +2762,11 @@ datum_notify(H5C_notify_action_t action, void *thing)
aux_ptr = entry_ptr->aux_ptr;
entry_ptr->aux_ptr = NULL;
- HDassert(entry_ptr->header.is_dirty); /* JRM */
+ HDassert(entry_ptr->header.is_dirty); /* JRM */
- if ( ( file_mpi_rank != 0 ) &&
+ if ( ( file_mpi_rank != 0 ) &&
( entry_ptr->dirty ) &&
- ( aux_ptr->metadata_write_strategy ==
+ ( aux_ptr->metadata_write_strategy ==
H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY ) ) {
ret_value = FAIL;
@@ -2784,8 +2779,8 @@ datum_notify(H5C_notify_action_t action, void *thing)
if ( entry_ptr->header.is_dirty ) {
- was_dirty = TRUE; /* so we will receive the ack
- * if requested
+ was_dirty = TRUE; /* so we will receive the ack
+ * if requested
*/
/* compose the message */
@@ -2794,7 +2789,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
mssg.base_addr = entry_ptr->base_addr;
- mssg.len = entry_ptr->len;
+ H5_CHECKED_ASSIGN(mssg.len, unsigned, entry_ptr->len, size_t);
mssg.ver = entry_ptr->ver;
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
@@ -2811,7 +2806,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
else
{
entry_ptr->dirty = FALSE;
- entry_ptr->flushed = TRUE;
+ entry_ptr->flushed = TRUE;
}
}
}
@@ -2839,7 +2834,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
nerrors++;
ret_value = FAIL;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: Bad data in write req ack.\n",
world_mpi_rank, FUNC);
}
@@ -2855,7 +2850,7 @@ datum_notify(H5C_notify_action_t action, void *thing)
datum_pinned_flushes++;
HDassert(entry_ptr->global_pinned || entry_ptr->local_pinned);
}
- break;
+ break;
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
if ( callbacks_verbose ) {
@@ -2950,33 +2945,33 @@ datum_notify(H5C_notify_action_t action, void *thing)
/* do nothing */
break;
- default:
+ default:
nerrors++;
ret_value = FAIL;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: Unknown notify action.\n",
world_mpi_rank, FUNC);
}
- break;
+ break;
}
return(ret_value);
} /* datum_notify() */
-
+
/*-------------------------------------------------------------------------
- * Function: datum_free_icr
+ * Function: datum_free_icr
*
- * Purpose: Nominally, this callback is supposed to free the
- * in core representation of the entry.
+ * Purpose: Nominally, this callback is supposed to free the
+ * in core representation of the entry.
*
- * In the context of this test bed, we use it to do
- * do all the processing we used to do on a destroy.
+ * In the context of this test bed, we use it to do
+ * do all the processing we used to do on a destroy.
*
- * Return: SUCCEED
+ * Return: SUCCEED
*
- * Programmer: John Mainzer
+ * Programmer: John Mainzer
* 9/19/07
*
*-------------------------------------------------------------------------
@@ -3001,9 +2996,9 @@ datum_free_icr(void * thing)
if ( callbacks_verbose ) {
HDfprintf(stdout,
- "%d: free_icr() idx = %d, dirty = %d.\n",
- world_mpi_rank, idx, (int)(entry_ptr->dirty));
- fflush(stdout);
+ "%d: free_icr() idx = %d, dirty = %d.\n",
+ world_mpi_rank, idx, (int)(entry_ptr->dirty));
+ fflush(stdout);
}
HDassert( entry_ptr->header.addr == entry_ptr->base_addr );
@@ -3020,7 +3015,7 @@ datum_free_icr(void * thing)
return(SUCCEED);
} /* datum_free_icr() */
-
+
/*****************************************************************************/
/************************** test utility functions ***************************/
/*****************************************************************************/
@@ -3029,9 +3024,9 @@ datum_free_icr(void * thing)
* Function: expunge_entry()
*
* Purpose: Expunge the entry indicated by the type and index, mark it
- * as clean, and don't increment its version number.
+ * as clean, and don't increment its version number.
*
- * Do nothing if nerrors is non-zero on entry.
+ * Do nothing if nerrors is non-zero on entry.
*
* Return: void
*
@@ -3061,37 +3056,36 @@ expunge_entry(H5F_t * file_ptr,
if ( nerrors == 0 ) {
- result = H5AC_expunge_entry(file_ptr, (hid_t)-1, &(types[0]),
- entry_ptr->header.addr, H5AC__NO_FLAGS_SET);
+ result = H5AC_expunge_entry(file_ptr, &(types[0]), entry_ptr->header.addr, H5AC__NO_FLAGS_SET);
if ( result < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5AC_expunge_entry().\n",
+ world_mpi_rank, FUNC);
}
}
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
- HDassert( ! ((entry_ptr->header).is_dirty) );
+ HDassert( ! ((entry_ptr->header).is_dirty) );
- result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
+ result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr,
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
- if ( result < 0 ) {
+ if ( result < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5C_get_entry_status().\n",
+ world_mpi_rank, FUNC);
}
} else if ( in_cache ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Expunged entry still in cache?!?\n",
+ world_mpi_rank, FUNC);
}
}
}
@@ -3100,14 +3094,14 @@ expunge_entry(H5F_t * file_ptr,
} /* expunge_entry() */
-
+
/*****************************************************************************
* Function: insert_entry()
*
* Purpose: Insert the entry indicated by the type and index, mark it
- * as dirty, and increment its version number.
+ * as dirty, and increment its version number.
*
- * Do nothing if nerrors is non-zero on entry.
+ * Do nothing if nerrors is non-zero on entry.
*
* Return: void
*
@@ -3148,7 +3142,7 @@ insert_entry(H5C_t * cache_ptr,
(entry_ptr->ver)++;
entry_ptr->dirty = TRUE;
- result = H5AC_insert_entry(file_ptr, H5AC_ind_read_dxpl_id, &(types[0]),
+ result = H5AC_insert_entry(file_ptr, &(types[0]),
entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags);
if ( ( result < 0 ) ||
@@ -3158,33 +3152,33 @@ insert_entry(H5C_t * cache_ptr,
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: Error in H5AC_insert_entry().\n",
+ world_mpi_rank, FUNC);
}
}
if ( ! (entry_ptr->header.is_dirty) ) {
- /* it is possible that we just exceeded the dirty bytes
- * threshold, triggering a write of the newly inserted
- * entry. Test for this, and only flag an error if this
- * is not the case.
- */
+ /* it is possible that we just exceeded the dirty bytes
+ * threshold, triggering a write of the newly inserted
+ * entry. Test for this, and only flag an error if this
+ * is not the case.
+ */
- struct H5AC_aux_t * aux_ptr;
+ struct H5AC_aux_t * aux_ptr;
- aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr));
+ aux_ptr = ((H5AC_aux_t *)(cache_ptr->aux_ptr));
- if ( ! ( ( aux_ptr != NULL ) &&
- ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
- ( aux_ptr->dirty_bytes == 0 ) ) ) {
+ if ( ! ( ( aux_ptr != NULL ) &&
+ ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
+ ( aux_ptr->dirty_bytes == 0 ) ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n",
- world_mpi_rank, FUNC, idx,
+ HDfprintf(stdout, "%d:%s: data[%d].header.is_dirty = %d.\n",
+ world_mpi_rank, FUNC, idx,
(int)(data[idx].header.is_dirty));
- }
+ }
}
}
@@ -3192,7 +3186,7 @@ insert_entry(H5C_t * cache_ptr,
HDassert( entry_ptr->header.is_pinned );
entry_ptr->global_pinned = TRUE;
- global_pins++;
+ global_pins++;
} else {
@@ -3209,7 +3203,7 @@ insert_entry(H5C_t * cache_ptr,
} /* insert_entry() */
-
+
/*****************************************************************************
* Function: local_pin_and_unpin_random_entries()
*
@@ -3228,8 +3222,8 @@ static void
local_pin_and_unpin_random_entries(H5F_t * file_ptr,
int min_idx,
int max_idx,
- int min_count,
- int max_count)
+ int min_count,
+ int max_count)
{
if ( nerrors == 0 ) {
@@ -3244,40 +3238,40 @@ local_pin_and_unpin_random_entries(H5F_t * file_ptr,
HDassert( min_idx < max_idx );
HDassert( max_idx < NUM_DATA_ENTRIES );
HDassert( max_idx < virt_num_data_entries );
- HDassert( 0 <= min_count );
- HDassert( min_count < max_count );
+ HDassert( 0 <= min_count );
+ HDassert( min_count < max_count );
- count = (HDrand() % (max_count - min_count)) + min_count;
+ count = (HDrand() % (max_count - min_count)) + min_count;
- HDassert( min_count <= count );
- HDassert( count <= max_count );
+ HDassert( min_count <= count );
+ HDassert( count <= max_count );
- for ( i = 0; i < count; i++ )
- {
+ for ( i = 0; i < count; i++ )
+ {
local_pin_random_entry(file_ptr, min_idx, max_idx);
- }
+ }
- count = (HDrand() % (max_count - min_count)) + min_count;
+ count = (HDrand() % (max_count - min_count)) + min_count;
- HDassert( min_count <= count );
- HDassert( count <= max_count );
+ HDassert( min_count <= count );
+ HDassert( count <= max_count );
i = 0;
- idx = 0;
+ idx = 0;
- while ( ( i < count ) && ( idx >= 0 ) )
- {
- via_unprotect = ( (((unsigned)i) & 0x0001) == 0 );
- idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect);
- i++;
- }
+ while ( ( i < count ) && ( idx >= 0 ) )
+ {
+ via_unprotect = ( (((unsigned)i) & 0x0001) == 0 );
+ idx = local_unpin_next_pinned_entry(file_ptr, idx, via_unprotect);
+ i++;
+ }
}
return;
} /* local_pin_and_unpin_random_entries() */
-
+
/*****************************************************************************
* Function: local_pin_random_entry()
*
@@ -3309,13 +3303,13 @@ local_pin_random_entry(H5F_t * file_ptr,
HDassert( max_idx < NUM_DATA_ENTRIES );
HDassert( max_idx < virt_num_data_entries );
- do
- {
- idx = (HDrand() % (max_idx - min_idx)) + min_idx;
+ do
+ {
+ idx = (HDrand() % (max_idx - min_idx)) + min_idx;
HDassert( min_idx <= idx );
HDassert( idx <= max_idx );
- }
- while ( data[idx].global_pinned || data[idx].local_pinned );
+ }
+ while ( data[idx].global_pinned || data[idx].local_pinned );
pin_entry(file_ptr, idx, FALSE, FALSE);
}
@@ -3324,7 +3318,7 @@ local_pin_random_entry(H5F_t * file_ptr,
} /* local_pin_random_entry() */
-
+
/*****************************************************************************
* Function: local_unpin_all_entries()
*
@@ -3340,7 +3334,7 @@ local_pin_random_entry(H5F_t * file_ptr,
*****************************************************************************/
static void
local_unpin_all_entries(H5F_t * file_ptr,
- hbool_t via_unprotect)
+ hbool_t via_unprotect)
{
if ( nerrors == 0 ) {
@@ -3349,25 +3343,25 @@ local_unpin_all_entries(H5F_t * file_ptr,
HDassert( file_ptr );
- idx = 0;
+ idx = 0;
- while ( idx >= 0 )
- {
- idx = local_unpin_next_pinned_entry(file_ptr,
- idx, via_unprotect);
- }
+ while ( idx >= 0 )
+ {
+ idx = local_unpin_next_pinned_entry(file_ptr,
+ idx, via_unprotect);
+ }
}
return;
} /* local_unpin_all_entries() */
-
+
/*****************************************************************************
* Function: local_unpin_next_pinned_entry()
*
* Purpose: Find the next locally pinned entry after the specified
- * starting point, and unpin it.
+ * starting point, and unpin it.
*
* Do nothing if nerrors is non-zero on entry.
*
@@ -3382,7 +3376,7 @@ local_unpin_all_entries(H5F_t * file_ptr,
static int
local_unpin_next_pinned_entry(H5F_t * file_ptr,
int start_idx,
- hbool_t via_unprotect)
+ hbool_t via_unprotect)
{
int i = 0;
int idx = -1;
@@ -3394,39 +3388,39 @@ local_unpin_next_pinned_entry(H5F_t * file_ptr,
HDassert( start_idx < NUM_DATA_ENTRIES );
HDassert( start_idx < virt_num_data_entries );
- idx = start_idx;
+ idx = start_idx;
- while ( ( i < virt_num_data_entries ) &&
- ( ! ( data[idx].local_pinned ) ) )
- {
- i++;
- idx++;
- if ( idx >= virt_num_data_entries ) {
- idx = 0;
- }
- }
+ while ( ( i < virt_num_data_entries ) &&
+ ( ! ( data[idx].local_pinned ) ) )
+ {
+ i++;
+ idx++;
+ if ( idx >= virt_num_data_entries ) {
+ idx = 0;
+ }
+ }
- if ( data[idx].local_pinned ) {
+ if ( data[idx].local_pinned ) {
- unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect);
+ unpin_entry(file_ptr, idx, FALSE, FALSE, via_unprotect);
- } else {
+ } else {
- idx = -1;
- }
+ idx = -1;
+ }
}
return(idx);
} /* local_unpin_next_pinned_entry() */
-
+
/*****************************************************************************
* Function: lock_and_unlock_random_entries()
*
* Purpose: Obtain a random number in the closed interval [min_count,
- * max_count]. Then protect and unprotect that number of
- * random entries.
+ * max_count]. Then protect and unprotect that number of
+ * random entries.
*
* Do nothing if nerrors is non-zero on entry.
*
@@ -3467,12 +3461,12 @@ lock_and_unlock_random_entries(H5F_t * file_ptr,
} /* lock_and_unlock_random_entries() */
-
+
/*****************************************************************************
* Function: lock_and_unlock_random_entry()
*
* Purpose: Protect and then unprotect a random entry with index in
- * the data[] array in the close interval [min_idx, max_idx].
+ * the data[] array in the close interval [min_idx, max_idx].
*
* Do nothing if nerrors is non-zero on entry.
*
@@ -3502,15 +3496,15 @@ lock_and_unlock_random_entry(H5F_t * file_ptr,
HDassert( min_idx <= idx );
HDassert( idx <= max_idx );
- lock_entry(file_ptr, idx);
- unlock_entry(file_ptr, idx, H5AC__NO_FLAGS_SET);
+ lock_entry(file_ptr, idx);
+ unlock_entry(file_ptr, idx, H5AC__NO_FLAGS_SET);
}
return;
} /* lock_and_unlock_random_entry() */
-
+
/*****************************************************************************
* Function: lock_entry()
*
@@ -3525,9 +3519,9 @@ lock_and_unlock_random_entry(H5F_t * file_ptr,
*
* Modifications:
*
- * JRM -- 7/11/06
- * Modified asserts to handle the new local_len field in
- * datum.
+ * JRM -- 7/11/06
+ * Modified asserts to handle the new local_len field in
+ * datum.
*
*****************************************************************************/
static void
@@ -3544,30 +3538,29 @@ lock_entry(H5F_t * file_ptr,
entry_ptr = &(data[idx]);
- HDassert( ! (entry_ptr->locked) );
+ HDassert( ! (entry_ptr->locked) );
- cache_entry_ptr = (H5C_cache_entry_t *)H5AC_protect(file_ptr,
- H5AC_ind_read_dxpl_id,
+ cache_entry_ptr = (H5C_cache_entry_t *)H5AC_protect(file_ptr,
&(types[0]), entry_ptr->base_addr,
- &entry_ptr->base_addr,
+ &entry_ptr->base_addr,
H5AC__NO_FLAGS_SET);
if ( ( cache_entry_ptr != (void *)(&(entry_ptr->header)) ) ||
( entry_ptr->header.type != &(types[0]) ) ||
( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
+ ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: error in H5AC_protect().\n",
+ world_mpi_rank, FUNC);
}
} else {
- entry_ptr->locked = TRUE;
+ entry_ptr->locked = TRUE;
- }
+ }
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
}
@@ -3604,33 +3597,33 @@ mark_entry_dirty(int32_t idx)
entry_ptr = &(data[idx]);
HDassert ( entry_ptr->locked || entry_ptr->global_pinned );
- HDassert ( ! (entry_ptr->local_pinned) );
+ HDassert ( ! (entry_ptr->local_pinned) );
(entry_ptr->ver)++;
entry_ptr->dirty = TRUE;
- result = H5AC_mark_entry_dirty( (void *)entry_ptr);
+ result = H5AC_mark_entry_dirty( (void *)entry_ptr);
if ( result < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: error in H5AC_mark_entry_dirty().\n",
world_mpi_rank, FUNC);
}
}
- else if ( ! ( entry_ptr->locked ) )
- {
- global_dirty_pins++;
- }
+ else if ( ! ( entry_ptr->locked ) )
+ {
+ global_dirty_pins++;
+ }
}
return;
} /* mark_entry_dirty() */
-
+
/*****************************************************************************
* Function: pin_entry()
*
@@ -3647,8 +3640,8 @@ mark_entry_dirty(int32_t idx)
static void
pin_entry(H5F_t * file_ptr,
int32_t idx,
- hbool_t global,
- hbool_t dirty)
+ hbool_t global,
+ hbool_t dirty)
{
unsigned int flags = H5AC__PIN_ENTRY_FLAG;
struct datum * entry_ptr;
@@ -3661,50 +3654,49 @@ pin_entry(H5F_t * file_ptr,
entry_ptr = &(data[idx]);
- HDassert ( ! (entry_ptr->global_pinned) );
- HDassert ( ! (entry_ptr->local_pinned) );
- HDassert ( ! ( dirty && ( ! global ) ) );
+ HDassert ( ! (entry_ptr->global_pinned) );
+ HDassert ( ! (entry_ptr->local_pinned) );
+ HDassert ( ! ( dirty && ( ! global ) ) );
- lock_entry(file_ptr, idx);
+ lock_entry(file_ptr, idx);
- if ( dirty ) {
+ if ( dirty ) {
- flags |= H5AC__DIRTIED_FLAG;
- }
+ flags |= H5AC__DIRTIED_FLAG;
+ }
- unlock_entry(file_ptr, idx, flags);
+ unlock_entry(file_ptr, idx, flags);
HDassert( (entry_ptr->header).is_pinned );
- HDassert( ( ! dirty ) || ( (entry_ptr->header).is_dirty ) );
+ HDassert( ( ! dirty ) || ( (entry_ptr->header).is_dirty ) );
- if ( global ) {
+ if ( global ) {
- entry_ptr->global_pinned = TRUE;
+ entry_ptr->global_pinned = TRUE;
- global_pins++;
+ global_pins++;
- } else {
+ } else {
- entry_ptr->local_pinned = TRUE;
+ entry_ptr->local_pinned = TRUE;
- local_pins++;
+ local_pins++;
- }
+ }
}
return;
} /* pin_entry() */
-#ifdef H5_METADATA_TRACE_FILE
-
+
/*****************************************************************************
* Function: pin_protected_entry()
*
* Purpose: Insert the entry indicated by the type and index, mark it
- * as dirty, and increment its version number.
+ * as dirty, and increment its version number.
*
- * Do nothing if nerrors is non-zero on entry.
+ * Do nothing if nerrors is non-zero on entry.
*
* Return: void
*
@@ -3714,7 +3706,7 @@ pin_entry(H5F_t * file_ptr,
*****************************************************************************/
static void
pin_protected_entry(int32_t idx,
- hbool_t global)
+ hbool_t global)
{
herr_t result;
struct datum * entry_ptr;
@@ -3728,36 +3720,36 @@ pin_protected_entry(int32_t idx,
if ( nerrors == 0 ) {
- result = H5AC_pin_protected_entry((void *)entry_ptr);
+ result = H5AC_pin_protected_entry((void *)entry_ptr);
if ( ( result < 0 ) ||
( entry_ptr->header.type != &(types[0]) ) ||
( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) )||
+ ( entry_ptr->local_len != entry_ptr->header.size ) )||
( entry_ptr->base_addr != entry_ptr->header.addr ) ||
- ( ! ( (entry_ptr->header).is_pinned ) ) ) {
+ ( ! ( (entry_ptr->header).is_pinned ) ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: Error in H5AC_pin_protected entry().\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout,
+ "%d:%s: Error in H5AC_pin_protected entry().\n",
+ world_mpi_rank, FUNC);
}
}
if ( global ) {
- entry_ptr->global_pinned = TRUE;
+ entry_ptr->global_pinned = TRUE;
- global_pins++;
+ global_pins++;
- } else {
+ } else {
- entry_ptr->local_pinned = TRUE;
+ entry_ptr->local_pinned = TRUE;
- local_pins++;
+ local_pins++;
- }
+ }
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
}
@@ -3765,18 +3757,17 @@ pin_protected_entry(int32_t idx,
return;
} /* pin_protected_entry() */
-#endif /* H5_METADATA_TRACE_FILE */
-
+
/*****************************************************************************
* Function: move_entry()
*
* Purpose: Move the entry indicated old_idx to the entry indicated
- * by new_idex. Touch up the data array so that flush will
- * not choke.
+ * by new_idex. Touch up the data array so that flush will
+ * not choke.
*
- * Do nothing if nerrors isn't zero, or if old_idx equals
- * new_idx.
+ * Do nothing if nerrors isn't zero, or if old_idx equals
+ * new_idx.
*
* Return: void
*
@@ -3790,8 +3781,8 @@ move_entry(H5F_t * file_ptr,
int32_t new_idx)
{
herr_t result;
- int tmp;
- size_t tmp_len;
+ int tmp;
+ size_t tmp_len;
haddr_t old_addr = HADDR_UNDEF;
haddr_t new_addr = HADDR_UNDEF;
struct datum * old_entry_ptr;
@@ -3819,12 +3810,12 @@ move_entry(H5F_t * file_ptr,
/* Moving will mark the entry dirty if it is not already */
old_entry_ptr->dirty = TRUE;
- /* touch up versions, base_addrs, and data_index. Do this
- * now as it is possible that the rename will trigger a
+ /* touch up versions, base_addrs, and data_index. Do this
+ * now as it is possible that the rename will trigger a
* sync point.
*/
if(old_entry_ptr->ver < new_entry_ptr->ver)
- old_entry_ptr->ver = new_entry_ptr->ver;
+ old_entry_ptr->ver = new_entry_ptr->ver;
else
(old_entry_ptr->ver)++;
@@ -3838,20 +3829,20 @@ move_entry(H5F_t * file_ptr,
old_entry_ptr->index = new_entry_ptr->index;
new_entry_ptr->index = tmp;
- if(old_entry_ptr->local_len != new_entry_ptr->local_len) {
- tmp_len = old_entry_ptr->local_len;
- old_entry_ptr->local_len = new_entry_ptr->local_len;
- new_entry_ptr->local_len = tmp_len;
- } /* end if */
+ if(old_entry_ptr->local_len != new_entry_ptr->local_len) {
+ tmp_len = old_entry_ptr->local_len;
+ old_entry_ptr->local_len = new_entry_ptr->local_len;
+ new_entry_ptr->local_len = tmp_len;
+ } /* end if */
- result = H5AC_move_entry(file_ptr, &(types[0]), old_addr, new_addr, H5AC_ind_read_dxpl_id);
+ result = H5AC_move_entry(file_ptr, &(types[0]), old_addr, new_addr);
if ( ( result < 0 ) || ( old_entry_ptr->header.addr != new_addr ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_move_entry() failed.\n",
+ world_mpi_rank, FUNC);
}
} else {
@@ -3860,27 +3851,27 @@ move_entry(H5F_t * file_ptr,
if ( ! (old_entry_ptr->header.is_dirty) ) {
- /* it is possible that we just exceeded the dirty bytes
- * threshold, triggering a write of the newly inserted
- * entry. Test for this, and only flag an error if this
- * is not the case.
- */
+ /* it is possible that we just exceeded the dirty bytes
+ * threshold, triggering a write of the newly inserted
+ * entry. Test for this, and only flag an error if this
+ * is not the case.
+ */
- struct H5AC_aux_t * aux_ptr;
+ struct H5AC_aux_t * aux_ptr;
- aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr));
+ aux_ptr = ((H5AC_aux_t *)(file_ptr->shared->cache->aux_ptr));
- if ( ! ( ( aux_ptr != NULL ) &&
- ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
- ( aux_ptr->dirty_bytes == 0 ) ) ) {
+ if ( ! ( ( aux_ptr != NULL ) &&
+ ( aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC ) &&
+ ( aux_ptr->dirty_bytes == 0 ) ) ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: data[%d].header.is_dirty = %d.\n",
- world_mpi_rank, FUNC, new_idx,
+ world_mpi_rank, FUNC, new_idx,
(int)(data[new_idx].header.is_dirty));
- }
+ }
}
} else {
@@ -3891,19 +3882,19 @@ move_entry(H5F_t * file_ptr,
} /* move_entry() */
-
+
/*****************************************************************************
*
- * Function: reset_server_counts()
+ * Function: reset_server_counts()
*
- * Purpose: Send a message to the server process requesting it to reset
- * its counters. Await confirmation message.
+ * Purpose: Send a message to the server process requesting it to reset
+ * its counters. Await confirmation message.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/6/10
+ * Programmer: JRM -- 5/6/10
*
*****************************************************************************/
static hbool_t
@@ -3958,7 +3949,7 @@ reset_server_counts(void)
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: Bad data in req r/w counter reset reply.\n",
world_mpi_rank, FUNC);
}
@@ -3969,15 +3960,15 @@ reset_server_counts(void)
} /* reset_server_counts() */
-
+
/*****************************************************************************
* Function: resize_entry()
*
* Purpose: Resize the pinned entry indicated by idx to the new_size.
- * Note that new_size must be greater than 0, and must be
- * less than or equal to the original size of the entry.
+ * Note that new_size must be greater than 0, and must be
+ * less than or equal to the original size of the entry.
*
- * Do nothing if nerrors isn't zero.
+ * Do nothing if nerrors isn't zero.
*
* Return: void
*
@@ -3987,7 +3978,7 @@ reset_server_counts(void)
*****************************************************************************/
static void
resize_entry(int32_t idx,
- size_t new_size)
+ size_t new_size)
{
herr_t result;
struct datum * entry_ptr;
@@ -4001,21 +3992,21 @@ resize_entry(int32_t idx,
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
HDassert( !(entry_ptr->locked) );
- HDassert( ( entry_ptr->global_pinned ) &&
- ( ! entry_ptr->local_pinned ) );
- HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
- ( entry_ptr->header.size == entry_ptr->local_len ) );
- HDassert( new_size > 0 );
- HDassert( new_size <= entry_ptr->len );
+ HDassert( ( entry_ptr->global_pinned ) &&
+ ( ! entry_ptr->local_pinned ) );
+ HDassert( ( entry_ptr->header.size == entry_ptr->len ) ||
+ ( entry_ptr->header.size == entry_ptr->local_len ) );
+ HDassert( new_size > 0 );
+ HDassert( new_size <= entry_ptr->len );
- result = H5AC_resize_entry((void *)entry_ptr, new_size);
+ result = H5AC_resize_entry((void *)entry_ptr, new_size);
if ( result < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n",
- world_mpi_rank, FUNC);
+ HDfprintf(stdout, "%d:%s: H5AC_resize_entry() failed.\n",
+ world_mpi_rank, FUNC);
}
} else {
@@ -4025,7 +4016,7 @@ resize_entry(int32_t idx,
HDassert( entry_ptr->header.size == new_size );
entry_ptr->dirty = TRUE;
- entry_ptr->local_len = new_size;
+ entry_ptr->local_len = new_size;
/* touch up version. */
@@ -4037,24 +4028,24 @@ resize_entry(int32_t idx,
} /* resize_entry() */
-
+
/*****************************************************************************
*
- * Function: setup_cache_for_test()
+ * Function: setup_cache_for_test()
*
- * Purpose: Setup the parallel cache for a test, and return the file id
- * and a pointer to the cache's internal data structures.
+ * Purpose: Setup the parallel cache for a test, and return the file id
+ * and a pointer to the cache's internal data structures.
*
- * To do this, we must create a file, flush it (so that we
- * don't have to worry about entries in the metadata cache),
- * look up the address of the metadata cache, and then instruct
- * the cache to omit sanity checks on dxpl IDs.
+ * To do this, we must create a file, flush it (so that we
+ * don't have to worry about entries in the metadata cache),
+ * look up the address of the metadata cache, and then instruct
+ * the cache to omit sanity checks on dxpl IDs.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/4/06
+ * Programmer: JRM -- 1/4/06
*
*****************************************************************************/
static hbool_t
@@ -4078,26 +4069,29 @@ setup_cache_for_test(hid_t * fid_ptr,
fid = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ /* Push API context */
+ H5CX_push();
+
if ( fid < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fcreate() failed.\n",
world_mpi_rank, FUNC);
}
} else if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
} else {
- file_ptr = (H5F_t *)H5I_object_verify(fid, H5I_FILE);
+ file_ptr = (H5F_t *)H5VL_object_verify(fid, H5I_FILE);
}
if ( file_ptr == NULL ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n",
+ HDfprintf(stdout, "%d:%s: Can't get file_ptr.\n",
world_mpi_rank, FUNC);
}
} else {
@@ -4107,13 +4101,13 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( cache_ptr == NULL ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n",
+ HDfprintf(stdout, "%d:%s: Can't get cache_ptr.\n",
world_mpi_rank, FUNC);
}
} else if ( cache_ptr->magic != H5C__H5C_T_MAGIC ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n",
+ HDfprintf(stdout, "%d:%s: Bad cache_ptr magic.\n",
world_mpi_rank, FUNC);
}
} else {
@@ -4132,7 +4126,7 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
!= SUCCEED ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: H5AC_get_cache_auto_resize_config(1) failed.\n",
world_mpi_rank, FUNC);
@@ -4142,9 +4136,9 @@ setup_cache_for_test(hid_t * fid_ptr,
config.metadata_write_strategy = metadata_write_strategy;
if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ != SUCCEED ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
world_mpi_rank, FUNC);
@@ -4167,15 +4161,15 @@ setup_cache_for_test(hid_t * fid_ptr,
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n",
+ HDfprintf(stdout, "%d:%s: cache_ptr->aux_ptr == NULL.\n",
world_mpi_rank, FUNC);
}
- } else if ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic !=
+ } else if ( ((H5AC_aux_t *)(cache_ptr->aux_ptr))->magic !=
H5AC__H5AC_AUX_T_MAGIC ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: cache_ptr->aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC.\n",
world_mpi_rank, FUNC);
}
@@ -4184,14 +4178,14 @@ setup_cache_for_test(hid_t * fid_ptr,
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: bad cache_ptr->aux_ptr->metadata_write_strategy\n",
world_mpi_rank, FUNC);
}
}
}
- /* also verify that the expected metadata write strategy is reported
+ /* also verify that the expected metadata write strategy is reported
* when we get the current configuration.
*/
@@ -4202,41 +4196,41 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( H5AC_get_cache_auto_resize_config(cache_ptr, &test_config)
!= SUCCEED ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: H5AC_get_cache_auto_resize_config(2) failed.\n",
world_mpi_rank, FUNC);
- } else if ( test_config.metadata_write_strategy !=
+ } else if ( test_config.metadata_write_strategy !=
metadata_write_strategy ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: unexpected metadata_write_strategy.\n",
world_mpi_rank, FUNC);
}
}
}
- /* allocate space for test entries -- do this before we set the
+ /* allocate space for test entries -- do this before we set the
* sync point done callback as it will dirty the superblock, requiring
* another flush. If the sync point done callback is set, this will
* cause a spurious failure.
*/
if ( success ) { /* allocate space for test entries */
- actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT, H5AC_ind_read_dxpl_id,
+ actual_base_addr = H5MF_alloc(file_ptr, H5FD_MEM_DEFAULT,
(hsize_t)(max_addr + BASE_ADDR));
if ( actual_base_addr == HADDR_UNDEF ) {
success = FALSE;
- nerrors++;
+ nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5MF_alloc() failed.\n",
world_mpi_rank, FUNC);
}
@@ -4247,10 +4241,10 @@ setup_cache_for_test(hid_t * fid_ptr,
* if the size of the superblock is increase.
*/
success = FALSE;
- nerrors++;
+ nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n",
+ HDfprintf(stdout, "%d:%s: actual_base_addr > BASE_ADDR.\n",
world_mpi_rank, FUNC);
}
}
@@ -4263,7 +4257,7 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: second H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -4273,62 +4267,62 @@ setup_cache_for_test(hid_t * fid_ptr,
if ( success ) {
- if ( H5AC__set_write_done_callback(cache_ptr, do_sync) != SUCCEED ) {
+ if ( H5AC__set_write_done_callback(cache_ptr, do_sync) != SUCCEED ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: H5C_set_write_done_callback failed.\n",
+ HDfprintf(stdout,
+ "%d:%s: H5C_set_write_done_callback failed.\n",
world_mpi_rank, FUNC);
}
- }
+ }
}
#endif /* DO_SYNC_AFTER_WRITE */
if ( success ) {
- if ( H5AC__set_sync_point_done_callback(cache_ptr, verify_writes) != SUCCEED ) {
+ if ( H5AC__set_sync_point_done_callback(cache_ptr, verify_writes) != SUCCEED ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: H5AC__set_sync_point_done_callback failed.\n",
+ HDfprintf(stdout,
+ "%d:%s: H5AC__set_sync_point_done_callback failed.\n",
world_mpi_rank, FUNC);
}
- }
+ }
}
return(success);
} /* setup_cache_for_test() */
-
+
/*****************************************************************************
*
- * Function: verify_writes()
+ * Function: verify_writes()
*
- * Purpose: Verify that the indicated entries have been written exactly
- * once each, and that the indicated total number of writes
- * has been processed by the server process. Flag an error if
- * discrepency is noted. Finally reset the counters maintained
- * by the server process.
+ * Purpose: Verify that the indicated entries have been written exactly
+ * once each, and that the indicated total number of writes
+ * has been processed by the server process. Flag an error if
+ * discrepency is noted. Finally reset the counters maintained
+ * by the server process.
*
- * This function should only be called by the metadata cache
- * as the "sync point done" function, as it must do some
- * synchronization to avoid false positives.
+ * This function should only be called by the metadata cache
+ * as the "sync point done" function, as it must do some
+ * synchronization to avoid false positives.
*
- * Note that at present, this function does not allow for the
- * case in which one or more of the indicated entries should
- * have been written more than once since the last time the
- * server process's counters were reset. That is fine for now,
- * as with the current metadata write strategies, no entry
- * should be written more than once per sync point. If this
- * changes this limitation will have to be revisited.
+ * Note that at present, this function does not allow for the
+ * case in which one or more of the indicated entries should
+ * have been written more than once since the last time the
+ * server process's counters were reset. That is fine for now,
+ * as with the current metadata write strategies, no entry
+ * should be written more than once per sync point. If this
+ * changes this limitation will have to be revisited.
*
- * Return: void.
+ * Return: void.
*
- * Programmer: JRM -- 5/9/10
+ * Programmer: JRM -- 5/9/10
*
*****************************************************************************/
static void
@@ -4404,7 +4398,7 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
}
/* final barrier to ensure that all processes think that the server
- * counters have been reset before we leave the sync point. This
+ * counters have been reset before we leave the sync point. This
* barrier is probaby not necessary at this point in time (5/9/10),
* but I can think of at least one likely change to the metadata write
* strategies that will require it -- hence its insertion now.
@@ -4426,24 +4420,24 @@ verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
} /* verify_writes() */
-
+
/*****************************************************************************
*
- * Function: setup_rand()
+ * Function: setup_rand()
*
- * Purpose: Use gettimeofday() to obtain a seed for rand(), print the
- * seed to stdout, and then pass it to srand().
+ * Purpose: Use gettimeofday() to obtain a seed for rand(), print the
+ * seed to stdout, and then pass it to srand().
*
- * Increment nerrors if any errors are detected.
+ * Increment nerrors if any errors are detected.
*
- * Return: void.
+ * Return: void.
*
- * Programmer: JRM -- 1/12/06
+ * Programmer: JRM -- 1/12/06
*
* Modifications:
*
- * JRM -- 5/9/06
- * Modified function to facilitate setting predefined seeds.
+ * JRM -- 5/9/06
+ * Modified function to facilitate setting predefined seeds.
*
*****************************************************************************/
static void
@@ -4458,13 +4452,13 @@ setup_rand(void)
if ( ( use_predefined_seeds ) &&
( world_mpi_size == num_predefined_seeds ) ) {
- HDassert( world_mpi_rank >= 0 );
- HDassert( world_mpi_rank < world_mpi_size );
+ HDassert( world_mpi_rank >= 0 );
+ HDassert( world_mpi_rank < world_mpi_size );
seed = predefined_seeds[world_mpi_rank];
- HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n",
+ HDfprintf(stdout, "%d:%s: predefined_seed = %d.\n",
world_mpi_rank, FUNC, seed);
- fflush(stdout);
+ fflush(stdout);
HDsrand(seed);
} else {
@@ -4473,7 +4467,7 @@ setup_rand(void)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n",
+ HDfprintf(stdout, "%d:%s: gettimeofday() failed.\n",
world_mpi_rank, FUNC);
}
} else {
@@ -4491,21 +4485,21 @@ setup_rand(void)
} /* setup_rand() */
-
+
/*****************************************************************************
*
- * Function: take_down_cache()
+ * Function: take_down_cache()
*
- * Purpose: Take down the parallel cache after a test.
+ * Purpose: Take down the parallel cache after a test.
*
- * To do this, we must close the file, and delete if if
- * possible.
+ * To do this, we must close the file, and delete if if
+ * possible.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/4/06
+ * Programmer: JRM -- 1/4/06
*
*****************************************************************************/
static hbool_t
@@ -4513,7 +4507,7 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
{
hbool_t success = TRUE; /* will set to FALSE if appropriate. */
- /* flush the file -- this should write out any remaining test
+ /* flush the file -- this should write out any remaining test
* entries in the cache.
*/
if ( ( success ) && ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) ) {
@@ -4526,7 +4520,7 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
}
}
- /* Now reset the sync point done callback. Must do this as with
+ /* Now reset the sync point done callback. Must do this as with
* the SWMR mods, the cache will do additional I/O on file close
* un-related to the test entries, and thereby corrupt our counts
* of entry writes.
@@ -4557,7 +4551,10 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
world_mpi_rank, FUNC);
}
- }
+ }
+
+ /* Pop API context */
+ H5CX_pop();
if ( success ) {
@@ -4574,11 +4571,11 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
}
} else {
- /* verify that there have been no further writes of test
+ /* verify that there have been no further writes of test
* entries during the close
*/
success = verify_total_writes(0);
-
+
}
}
@@ -4586,19 +4583,19 @@ take_down_cache(hid_t fid, H5C_t * cache_ptr)
} /* take_down_cache() */
-
+
/*****************************************************************************
* Function: verify_entry_reads
*
- * Purpose: Query the server to determine the number of times the
- * indicated entry has been read since the last time the
- * server counters were reset.
+ * Purpose: Query the server to determine the number of times the
+ * indicated entry has been read since the last time the
+ * server counters were reset.
*
- * Return TRUE if successful, and if the supplied expected
- * number of reads matches the number of reads reported by
- * the server process.
+ * Return TRUE if successful, and if the supplied expected
+ * number of reads matches the number of reads reported by
+ * the server process.
*
- * Return FALSE and flag an error otherwise.
+ * Return FALSE and flag an error otherwise.
*
* Return: TRUE if successful, FALSE otherwise.
*
@@ -4612,7 +4609,7 @@ verify_entry_reads(haddr_t addr,
int expected_entry_reads)
{
hbool_t success = TRUE;
- int reported_entry_reads;
+ int reported_entry_reads = 0;
struct mssg_t mssg;
if ( success ) {
@@ -4670,42 +4667,42 @@ verify_entry_reads(haddr_t addr,
}
} else {
- reported_entry_reads = mssg.count;
+ H5_CHECKED_ASSIGN(reported_entry_reads, int, mssg.count, unsigned);
}
}
- if ( ! success ) {
+ if ( success ) {
if ( reported_entry_reads != expected_entry_reads ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: rep/exp entry 0x%llx reads mismatch (%ld/%ld).\n",
world_mpi_rank, FUNC, (long long)addr,
reported_entry_reads, expected_entry_reads);
}
- }
+ }
}
return(success);
} /* verify_entry_reads() */
-
+
/*****************************************************************************
* Function: verify_entry_writes
*
- * Purpose: Query the server to determine the number of times the
- * indicated entry has been written since the last time the
- * server counters were reset.
+ * Purpose: Query the server to determine the number of times the
+ * indicated entry has been written since the last time the
+ * server counters were reset.
*
- * Return TRUE if successful, and if the supplied expected
- * number of reads matches the number of reads reported by
- * the server process.
+ * Return TRUE if successful, and if the supplied expected
+ * number of reads matches the number of reads reported by
+ * the server process.
*
- * Return FALSE and flag an error otherwise.
+ * Return FALSE and flag an error otherwise.
*
* Return: TRUE if successful, FALSE otherwise.
*
@@ -4719,7 +4716,7 @@ verify_entry_writes(haddr_t addr,
int expected_entry_writes)
{
hbool_t success = TRUE;
- int reported_entry_writes;
+ int reported_entry_writes = 0;
struct mssg_t mssg;
if ( success ) {
@@ -4777,47 +4774,47 @@ verify_entry_writes(haddr_t addr,
}
} else {
- reported_entry_writes = mssg.count;
+ H5_CHECKED_ASSIGN(reported_entry_writes, int, mssg.count, unsigned);
}
}
- if ( ! success ) {
+ if ( success ) {
if ( reported_entry_writes != expected_entry_writes ) {
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: rep/exp entry 0x%llx writes mismatch (%ld/%ld).\n",
world_mpi_rank, FUNC, (long long)addr,
reported_entry_writes, expected_entry_writes);
}
- }
+ }
}
return(success);
} /* verify_entry_writes() */
-
+
/*****************************************************************************
*
- * Function: verify_total_reads()
+ * Function: verify_total_reads()
*
- * Purpose: Query the server to obtain the total reads since the last
- * server counter reset, and compare this value with the supplied
- * expected value.
+ * Purpose: Query the server to obtain the total reads since the last
+ * server counter reset, and compare this value with the supplied
+ * expected value.
*
- * If the values match, return TRUE.
+ * If the values match, return TRUE.
*
- * If the values don't match, flag an error and return FALSE.
+ * If the values don't match, flag an error and return FALSE.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/6/10
+ * Programmer: JRM -- 5/6/10
*
*****************************************************************************/
static hbool_t
@@ -4888,9 +4885,9 @@ verify_total_reads(int expected_total_reads)
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: reported/expected total reads mismatch (%ld/%ld).\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
reported_total_reads, expected_total_reads);
}
@@ -4901,24 +4898,24 @@ verify_total_reads(int expected_total_reads)
} /* verify_total_reads() */
-
+
/*****************************************************************************
*
- * Function: verify_total_writes()
+ * Function: verify_total_writes()
*
- * Purpose: Query the server to obtain the total writes since the last
- * server counter reset, and compare this value with the supplied
- * expected value.
+ * Purpose: Query the server to obtain the total writes since the last
+ * server counter reset, and compare this value with the supplied
+ * expected value.
*
- * If the values match, return TRUE.
+ * If the values match, return TRUE.
*
- * If the values don't match, flag an error and return FALSE.
+ * If the values don't match, flag an error and return FALSE.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/6/10
+ * Programmer: JRM -- 5/6/10
*
*****************************************************************************/
static hbool_t
@@ -4989,9 +4986,9 @@ verify_total_writes(unsigned expected_total_writes)
nerrors++;
success = FALSE;
if ( verbose ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d:%s: reported/expected total writes mismatch (%u/%u).\n",
- world_mpi_rank, FUNC,
+ world_mpi_rank, FUNC,
reported_total_writes, expected_total_writes);
}
}
@@ -5001,7 +4998,7 @@ verify_total_writes(unsigned expected_total_writes)
} /* verify_total_writes() */
-
+
/*****************************************************************************
* Function: unlock_entry()
*
@@ -5016,8 +5013,8 @@ verify_total_writes(unsigned expected_total_writes)
*
* Modifications:
*
- * 7/11/06
- * Updated for the new local_len field in datum.
+ * 7/11/06
+ * Updated for the new local_len field in datum.
*
*****************************************************************************/
static void
@@ -5037,7 +5034,7 @@ unlock_entry(H5F_t * file_ptr,
entry_ptr = &(data[idx]);
- HDassert( entry_ptr->locked );
+ HDassert( entry_ptr->locked );
dirtied = ((flags & H5AC__DIRTIED_FLAG) == H5AC__DIRTIED_FLAG );
@@ -5047,13 +5044,13 @@ unlock_entry(H5F_t * file_ptr,
entry_ptr->dirty = TRUE;
}
- result = H5AC_unprotect(file_ptr, H5AC_ind_read_dxpl_id, &(types[0]),
+ result = H5AC_unprotect(file_ptr, &(types[0]),
entry_ptr->base_addr, (void *)(&(entry_ptr->header)), flags);
if ( ( result < 0 ) ||
( entry_ptr->header.type != &(types[0]) ) ||
( ( entry_ptr->len != entry_ptr->header.size ) &&
- ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
+ ( entry_ptr->local_len != entry_ptr->header.size ) ) ||
( entry_ptr->base_addr != entry_ptr->header.addr ) ) {
nerrors++;
@@ -5065,7 +5062,7 @@ unlock_entry(H5F_t * file_ptr,
entry_ptr->locked = FALSE;
- }
+ }
HDassert( ((entry_ptr->header).type)->id == DATUM_ENTRY_TYPE );
@@ -5086,7 +5083,7 @@ unlock_entry(H5F_t * file_ptr,
} /* unlock_entry() */
-
+
/*****************************************************************************
* Function: unpin_entry()
*
@@ -5101,8 +5098,8 @@ unlock_entry(H5F_t * file_ptr,
*
* Modifications:
*
- * JRM -- 8/15/06
- * Added assertion that entry is pinned on entry.
+ * JRM -- 8/15/06
+ * Added assertion that entry is pinned on entry.
*
*****************************************************************************/
static void
@@ -5124,54 +5121,54 @@ unpin_entry(H5F_t * file_ptr,
entry_ptr = &(data[idx]);
- HDassert( (entry_ptr->header).is_pinned );
- HDassert ( ! ( entry_ptr->global_pinned && entry_ptr->local_pinned) );
- HDassert ( ( global && entry_ptr->global_pinned ) ||
- ( ! global && entry_ptr->local_pinned ) );
- HDassert ( ! ( dirty && ( ! global ) ) );
+ HDassert( (entry_ptr->header).is_pinned );
+ HDassert ( ! ( entry_ptr->global_pinned && entry_ptr->local_pinned) );
+ HDassert ( ( global && entry_ptr->global_pinned ) ||
+ ( ! global && entry_ptr->local_pinned ) );
+ HDassert ( ! ( dirty && ( ! global ) ) );
- if ( via_unprotect ) {
+ if ( via_unprotect ) {
- lock_entry(file_ptr, idx);
+ lock_entry(file_ptr, idx);
- if ( dirty ) {
+ if ( dirty ) {
- flags |= H5AC__DIRTIED_FLAG;
- }
+ flags |= H5AC__DIRTIED_FLAG;
+ }
- unlock_entry(file_ptr, idx, flags);
+ unlock_entry(file_ptr, idx, flags);
- } else {
+ } else {
- if ( dirty ) {
+ if ( dirty ) {
- mark_entry_dirty(idx);
+ mark_entry_dirty(idx);
- }
+ }
- result = H5AC_unpin_entry(entry_ptr);
+ result = H5AC_unpin_entry(entry_ptr);
- if ( result < 0 ) {
+ if ( result < 0 ) {
nerrors++;
if ( verbose ) {
HDfprintf(stdout, "%d:%s: error in H5AC_unpin_entry().\n",
- world_mpi_rank, FUNC);
+ world_mpi_rank, FUNC);
}
- }
- }
+ }
+ }
HDassert( ! ((entry_ptr->header).is_pinned) );
- if ( global ) {
+ if ( global ) {
- entry_ptr->global_pinned = FALSE;
+ entry_ptr->global_pinned = FALSE;
- } else {
+ } else {
- entry_ptr->local_pinned = FALSE;
+ entry_ptr->local_pinned = FALSE;
- }
+ }
}
return;
@@ -5183,18 +5180,18 @@ unpin_entry(H5F_t * file_ptr,
/****************************** test functions *******************************/
/*****************************************************************************/
-
+
/*****************************************************************************
*
- * Function: server_smoke_check()
+ * Function: server_smoke_check()
*
- * Purpose: Quick smoke check for the server process.
+ * Purpose: Quick smoke check for the server process.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 12/21/05
+ * Programmer: JRM -- 12/21/05
*
*****************************************************************************/
static hbool_t
@@ -5215,12 +5212,12 @@ server_smoke_check(void)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5233,7 +5230,7 @@ server_smoke_check(void)
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
mssg.base_addr = data[world_mpi_rank].base_addr;
- mssg.len = data[world_mpi_rank].len;
+ H5_CHECKED_ASSIGN(mssg.len, unsigned, data[world_mpi_rank].len, size_t);
mssg.ver = ++(data[world_mpi_rank].ver);
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
@@ -5286,9 +5283,9 @@ server_smoke_check(void)
#endif /* DO_WRITE_REQ_ACK */
- do_sync();
+ do_sync();
- /* barrier to allow all writes to complete */
+ /* barrier to allow all writes to complete */
if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
success = FALSE;
@@ -5303,12 +5300,12 @@ server_smoke_check(void)
if ( success ) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 1);
- }
+ }
if ( success ) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 0);
- }
+ }
if ( success ) {
@@ -5320,7 +5317,7 @@ server_smoke_check(void)
success = verify_total_reads(0);
}
- /* barrier to allow all writes to complete */
+ /* barrier to allow all writes to complete */
if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
success = FALSE;
@@ -5338,7 +5335,7 @@ server_smoke_check(void)
mssg.dest = world_server_mpi_rank;
mssg.mssg_num = -1; /* set by send function */
mssg.base_addr = data[world_mpi_rank].base_addr;
- mssg.len = data[world_mpi_rank].len;
+ H5_CHECKED_ASSIGN(mssg.len, unsigned, data[world_mpi_rank].len, size_t);
mssg.ver = 0; /* bogus -- should be corrected by server */
mssg.count = 0;
mssg.magic = MSSG_MAGIC;
@@ -5392,7 +5389,7 @@ server_smoke_check(void)
}
}
- /* barrier to allow all writes to complete */
+ /* barrier to allow all writes to complete */
if ( MPI_SUCCESS != MPI_Barrier(file_mpi_comm) ) {
success = FALSE;
@@ -5407,12 +5404,12 @@ server_smoke_check(void)
if ( success ) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 1);
- }
+ }
if ( success ) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 1);
- }
+ }
if ( success ) {
@@ -5456,12 +5453,12 @@ server_smoke_check(void)
if ( success ) {
success = verify_entry_writes(data[world_mpi_rank].base_addr, 0);
- }
+ }
if ( success ) {
success = verify_entry_reads(data[world_mpi_rank].base_addr, 0);
- }
+ }
if ( success ) {
@@ -5514,9 +5511,9 @@ server_smoke_check(void)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -5531,18 +5528,18 @@ server_smoke_check(void)
} /* server_smoke_check() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_1()
+ * Function: smoke_check_1()
*
- * Purpose: First smoke check for the parallel cache.
+ * Purpose: First smoke check for the parallel cache.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/4/06
+ * Programmer: JRM -- 1/4/06
*
*****************************************************************************/
static hbool_t
@@ -5558,23 +5555,23 @@ smoke_check_1(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- process 0 only md write strategy");
+ TESTING("smoke check #1 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- distributed md write strategy");
+ TESTING("smoke check #1 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #1 -- unknown md write strategy");
+ TESTING("smoke check #1 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -5583,12 +5580,12 @@ smoke_check_1(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5602,7 +5599,7 @@ smoke_check_1(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5614,24 +5611,24 @@ smoke_check_1(int metadata_write_strategy)
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
}
/* Move the first half of the entries... */
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
}
/* ...and then move them back. */
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
}
if ( fid >= 0 ) {
@@ -5640,7 +5637,7 @@ smoke_check_1(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5686,9 +5683,9 @@ smoke_check_1(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -5703,21 +5700,21 @@ smoke_check_1(int metadata_write_strategy)
} /* smoke_check_1() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_2()
+ * Function: smoke_check_2()
*
- * Purpose: Second smoke check for the parallel cache.
+ * Purpose: Second smoke check for the parallel cache.
*
- * Introduce random reads, but keep all processes with roughly
- * the same work load.
+ * Introduce random reads, but keep all processes with roughly
+ * the same work load.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/12/06
+ * Programmer: JRM -- 1/12/06
*
*****************************************************************************/
static hbool_t
@@ -5733,23 +5730,23 @@ smoke_check_2(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- process 0 only md write strategy");
+ TESTING("smoke check #2 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- distributed md write strategy");
+ TESTING("smoke check #2 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #2 -- unknown md write strategy");
+ TESTING("smoke check #2 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -5758,12 +5755,12 @@ smoke_check_2(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5777,7 +5774,7 @@ smoke_check_2(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5788,73 +5785,73 @@ smoke_check_2(int metadata_write_strategy)
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i, 0, 10);
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i, 0, 10);
}
}
- for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
- {
- /* Make sure we don't step on any locally pinned entries */
- if ( data[i].local_pinned ) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
- }
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
+ {
+ /* Make sure we don't step on any locally pinned entries */
+ if ( data[i].local_pinned ) {
+ unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ }
- pin_entry(file_ptr, i, TRUE, FALSE);
- }
+ pin_entry(file_ptr, i, TRUE, FALSE);
+ }
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
- 0, 100);
- local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 4),
- 0, 3);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 20),
+ 0, 100);
+ local_pin_and_unpin_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 4),
+ 0, 3);
}
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 10),
- 0, 100);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 10),
+ 0, 100);
}
- /* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ /* we can't move pinned entries, so release any local pins now. */
+ local_unpin_all_entries(file_ptr, FALSE);
/* Move the first half of the entries... */
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- ((virt_num_data_entries / 50) - 1),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ ((virt_num_data_entries / 50) - 1),
0, 100);
}
/* ...and then move them back. */
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 100),
- 0, 100);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 100),
+ 0, 100);
}
- for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
- {
- hbool_t via_unprotect = ( (((unsigned)i) & 0x01) == 0 );
- hbool_t dirty = ( (((unsigned)i) & 0x02) == 0 );
+ for ( i = 0; i < (virt_num_data_entries / 2); i+=61 )
+ {
+ hbool_t via_unprotect = ( (((unsigned)i) & 0x01) == 0 );
+ hbool_t dirty = ( (((unsigned)i) & 0x02) == 0 );
- unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
- }
+ unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
+ }
if ( fid >= 0 ) {
@@ -5862,7 +5859,7 @@ smoke_check_2(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -5908,9 +5905,9 @@ smoke_check_2(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -5925,24 +5922,24 @@ smoke_check_2(int metadata_write_strategy)
} /* smoke_check_2() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_3()
+ * Function: smoke_check_3()
*
- * Purpose: Third smoke check for the parallel cache.
+ * Purpose: Third smoke check for the parallel cache.
*
- * Use random reads to vary the loads on the diffferent
- * processors. Also force different cache size adjustments.
+ * Use random reads to vary the loads on the diffferent
+ * processors. Also force different cache size adjustments.
*
- * In this test, load process 0 heavily, and the other
- * processes lightly.
+ * In this test, load process 0 heavily, and the other
+ * processes lightly.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/13/06
+ * Programmer: JRM -- 1/13/06
*
*****************************************************************************/
static hbool_t
@@ -5962,23 +5959,23 @@ smoke_check_3(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- process 0 only md write strategy");
+ TESTING("smoke check #3 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- distributed md write strategy");
+ TESTING("smoke check #3 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #3 -- unknown md write strategy");
+ TESTING("smoke check #3 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -5987,12 +5984,12 @@ smoke_check_3(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6006,7 +6003,7 @@ smoke_check_3(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6020,7 +6017,7 @@ smoke_check_3(int metadata_write_strategy)
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
}
@@ -6030,48 +6027,48 @@ smoke_check_3(int metadata_write_strategy)
max_count = min_count + 50;
for ( i = (virt_num_data_entries / 4);
- i < (virt_num_data_entries / 2);
- i++ )
+ i < (virt_num_data_entries / 2);
+ i++ )
{
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i % 59 == 0 ) {
+ if ( i % 59 == 0 ) {
- hbool_t dirty = ( (i % 2) == 0);
+ hbool_t dirty = ( (i % 2) == 0);
- if ( data[i].local_pinned ) {
- unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
- }
+ if ( data[i].local_pinned ) {
+ unpin_entry(file_ptr, i, FALSE, FALSE, FALSE);
+ }
- pin_entry(file_ptr, i, TRUE, dirty);
+ pin_entry(file_ptr, i, TRUE, dirty);
- HDassert( !dirty || data[i].header.is_dirty );
- HDassert( data[i].header.is_pinned );
- HDassert( data[i].global_pinned );
- HDassert( ! data[i].local_pinned );
- }
+ HDassert( !dirty || data[i].header.is_dirty );
+ HDassert( data[i].header.is_pinned );
+ HDassert( data[i].global_pinned );
+ HDassert( ! data[i].local_pinned );
+ }
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
- local_pin_and_unpin_random_entries(file_ptr, 0,
+ local_pin_and_unpin_random_entries(file_ptr, 0,
virt_num_data_entries / 4,
- 0, (file_mpi_rank + 2));
+ 0, (file_mpi_rank + 2));
- }
+ }
- /* flush the file to be sure that we have no problems flushing
- * pinned entries
- */
+ /* flush the file to be sure that we have no problems flushing
+ * pinned entries
+ */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6087,27 +6084,27 @@ smoke_check_3(int metadata_write_strategy)
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) {
+ if ( ( i >= (virt_num_data_entries / 4) ) && ( i % 59 == 0 ) ) {
hbool_t via_unprotect = ( (((unsigned)i) & 0x02) == 0 );
- hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 );
-
- HDassert( data[i].global_pinned );
- HDassert( ! data[i].local_pinned );
-
- unpin_entry(file_ptr, i, TRUE, dirty,
- via_unprotect);
- }
- if ( i % 2 == 0 ) {
-
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- local_pin_and_unpin_random_entries(file_ptr, 0,
- virt_num_data_entries / 2,
- 0, 2);
- lock_and_unlock_random_entries(file_ptr,
+ hbool_t dirty = ( (((unsigned)i) & 0x04) == 0 );
+
+ HDassert( data[i].global_pinned );
+ HDassert( ! data[i].local_pinned );
+
+ unpin_entry(file_ptr, i, TRUE, dirty,
+ via_unprotect);
+ }
+ if ( i % 2 == 0 ) {
+
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ local_pin_and_unpin_random_entries(file_ptr, 0,
+ virt_num_data_entries / 2,
+ 0, 2);
+ lock_and_unlock_random_entries(file_ptr,
min_idx, max_idx, 0, 100);
- }
+ }
}
min_idx = 0;
@@ -6120,9 +6117,9 @@ smoke_check_3(int metadata_write_strategy)
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr,
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr,
min_idx, max_idx, 0, 100);
}
@@ -6135,22 +6132,22 @@ smoke_check_3(int metadata_write_strategy)
/* move the first half of the entries... */
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 20),
min_count, max_count);
}
/* ...and then move them back. */
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 40),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 40),
min_count, max_count);
}
@@ -6162,16 +6159,16 @@ smoke_check_3(int metadata_write_strategy)
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 2),
- 0, 5);
+ local_pin_and_unpin_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 2),
+ 0, 5);
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
}
@@ -6185,7 +6182,7 @@ smoke_check_3(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6232,9 +6229,9 @@ smoke_check_3(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -6249,24 +6246,24 @@ smoke_check_3(int metadata_write_strategy)
} /* smoke_check_3() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_4()
+ * Function: smoke_check_4()
*
- * Purpose: Fourth smoke check for the parallel cache.
+ * Purpose: Fourth smoke check for the parallel cache.
*
- * Use random reads to vary the loads on the diffferent
- * processors. Also force different cache size adjustments.
+ * Use random reads to vary the loads on the diffferent
+ * processors. Also force different cache size adjustments.
*
- * In this test, load process 0 lightly, and the other
- * processes heavily.
+ * In this test, load process 0 lightly, and the other
+ * processes heavily.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/13/06
+ * Programmer: JRM -- 1/13/06
*
*****************************************************************************/
static hbool_t
@@ -6286,23 +6283,23 @@ smoke_check_4(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- process 0 only md write strategy");
+ TESTING("smoke check #4 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- distributed md write strategy");
+ TESTING("smoke check #4 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #4 -- unknown md write strategy");
+ TESTING("smoke check #4 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -6311,12 +6308,12 @@ smoke_check_4(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6330,7 +6327,7 @@ smoke_check_4(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6345,7 +6342,7 @@ smoke_check_4(int metadata_write_strategy)
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
}
@@ -6354,23 +6351,23 @@ smoke_check_4(int metadata_write_strategy)
max_count = min_count + 100;
for ( i = (virt_num_data_entries / 4);
- i < (virt_num_data_entries / 2);
- i++ )
+ i < (virt_num_data_entries / 2);
+ i++ )
{
- if ( i % 2 == 0 ) {
+ if ( i % 2 == 0 ) {
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
- } else {
+ } else {
- /* Insert some entries pinned, and then unpin them
- * immediately. We have tested pinned entries elsewhere,
- * so it should be sufficient to verify that the
- * entries are in fact pinned (which unpin_entry() should do).
- */
+ /* Insert some entries pinned, and then unpin them
+ * immediately. We have tested pinned entries elsewhere,
+ * so it should be sufficient to verify that the
+ * entries are in fact pinned (which unpin_entry() should do).
+ */
insert_entry(cache_ptr, file_ptr, i, H5C__PIN_ENTRY_FLAG);
unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
- }
+ }
if ( i % 59 == 0 ) {
@@ -6390,19 +6387,19 @@ smoke_check_4(int metadata_write_strategy)
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
local_pin_and_unpin_random_entries(file_ptr, 0,
- (virt_num_data_entries / 4),
+ (virt_num_data_entries / 4),
0, (file_mpi_rank + 2));
}
/* flush the file to be sure that we have no problems flushing
- * pinned entries
- */
+ * pinned entries
+ */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
@@ -6429,13 +6426,13 @@ smoke_check_4(int metadata_write_strategy)
unpin_entry(file_ptr, i, TRUE, dirty, via_unprotect);
}
- if ( i % 2 == 0 ) {
+ if ( i % 2 == 0 ) {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- lock_and_unlock_random_entries(file_ptr,
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ lock_and_unlock_random_entries(file_ptr,
min_idx, max_idx, 0, 100);
- }
+ }
}
min_idx = 0;
@@ -6444,14 +6441,14 @@ smoke_check_4(int metadata_write_strategy)
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- lock_and_unlock_random_entries(file_ptr,
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_and_unlock_random_entries(file_ptr,
min_idx, max_idx, 0, 100);
}
- /* we can't move pinned entries, so release any local pins now. */
- local_unpin_all_entries(file_ptr, FALSE);
+ /* we can't move pinned entries, so release any local pins now. */
+ local_unpin_all_entries(file_ptr, FALSE);
min_count = 10 * (file_mpi_rank % 4);
max_count = min_count + 100;
@@ -6459,22 +6456,22 @@ smoke_check_4(int metadata_write_strategy)
/* move the first half of the entries... */
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 20),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 20),
min_count, max_count);
}
/* ...and then move them back. */
for ( i = (virt_num_data_entries / 2) - 1; i >= 0; i-- )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
- lock_and_unlock_random_entries(file_ptr, 0,
- (virt_num_data_entries / 40),
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ move_entry(file_ptr, i, (i + (virt_num_data_entries / 2)));
+ lock_and_unlock_random_entries(file_ptr, 0,
+ (virt_num_data_entries / 40),
min_count, max_count);
}
@@ -6486,12 +6483,12 @@ smoke_check_4(int metadata_write_strategy)
for ( i = 0; i < (virt_num_data_entries / 2); i+=2 )
{
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
if ( i > 100 ) {
- lock_and_unlock_random_entries(file_ptr, (i - 100), i,
+ lock_and_unlock_random_entries(file_ptr, (i - 100), i,
min_count, max_count);
}
}
@@ -6502,7 +6499,7 @@ smoke_check_4(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6549,9 +6546,9 @@ smoke_check_4(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -6566,19 +6563,19 @@ smoke_check_4(int metadata_write_strategy)
} /* smoke_check_4() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_5()
+ * Function: smoke_check_5()
*
- * Purpose: Similar to smoke check 1, but modified to verify that
- * H5AC_mark_entry_dirty() works in the parallel case.
+ * Purpose: Similar to smoke check 1, but modified to verify that
+ * H5AC_mark_entry_dirty() works in the parallel case.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 5/18/06
+ * Programmer: JRM -- 5/18/06
*
*****************************************************************************/
static hbool_t
@@ -6594,23 +6591,23 @@ smoke_check_5(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- process 0 only md write strategy");
+ TESTING("smoke check #5 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- distributed md write strategy");
+ TESTING("smoke check #5 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #5 -- unknown md write strategy");
+ TESTING("smoke check #5 -- unknown md write strategy");
}
- break;
+ break;
}
@@ -6620,12 +6617,12 @@ smoke_check_5(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6640,7 +6637,7 @@ smoke_check_5(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6650,60 +6647,60 @@ smoke_check_5(int metadata_write_strategy)
insert_entry(cache_ptr, file_ptr, i, H5AC__NO_FLAGS_SET);
}
- /* flush the file so we can lock known clean entries. */
+ /* flush the file so we can lock known clean entries. */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
for ( i = 0; i < (virt_num_data_entries / 4); i++ )
{
- lock_entry(file_ptr, i);
+ lock_entry(file_ptr, i);
- if ( i % 2 == 0 )
- {
- mark_entry_dirty(i);
- }
+ if ( i % 2 == 0 )
+ {
+ mark_entry_dirty(i);
+ }
- unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
+ unlock_entry(file_ptr, i, H5AC__NO_FLAGS_SET);
- if ( i % 2 == 1 )
- {
- if ( i % 4 == 1 ) {
+ if ( i % 2 == 1 )
+ {
+ if ( i % 4 == 1 ) {
- lock_entry(file_ptr, i);
- unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
- }
+ lock_entry(file_ptr, i);
+ unlock_entry(file_ptr, i, H5AC__DIRTIED_FLAG);
+ }
- expunge_entry(file_ptr, i);
- }
+ expunge_entry(file_ptr, i);
+ }
}
for ( i = (virt_num_data_entries / 2) - 1;
i >= (virt_num_data_entries / 4);
- i-- )
+ i-- )
{
- pin_entry(file_ptr, i, TRUE, FALSE);
+ pin_entry(file_ptr, i, TRUE, FALSE);
- if ( i % 2 == 0 )
- {
- if ( i % 8 <= 4 ) {
+ if ( i % 2 == 0 )
+ {
+ if ( i % 8 <= 4 ) {
- resize_entry(i, data[i].len / 2);
- }
+ resize_entry(i, data[i].len / 2);
+ }
mark_entry_dirty(i);
- if ( i % 8 <= 4 ) {
+ if ( i % 8 <= 4 ) {
- resize_entry(i, data[i].len);
- }
- }
+ resize_entry(i, data[i].len);
+ }
+ }
- unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
+ unpin_entry(file_ptr, i, TRUE, FALSE, FALSE);
}
if ( fid >= 0 ) {
@@ -6712,7 +6709,7 @@ smoke_check_5(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -6758,9 +6755,9 @@ smoke_check_5(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -6775,10 +6772,10 @@ smoke_check_5(int metadata_write_strategy)
} /* smoke_check_5() */
-
+
/*****************************************************************************
*
- * Function: trace_file_check()
+ * Function: trace_file_check()
*
* Purpose: A basic test of the trace file capability. In essence,
* we invoke all operations that generate trace file output,
@@ -6806,13 +6803,11 @@ smoke_check_5(int metadata_write_strategy)
* - H5AC_expunge_entry()
* - H5AC_resize_entry()
*
- * This test is skipped if H5_METADATA_TRACE_FILE is undefined.
- *
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 6/13/06
+ * Programmer: JRM -- 6/13/06
*
*****************************************************************************/
static hbool_t
@@ -6820,63 +6815,63 @@ trace_file_check(int metadata_write_strategy)
{
hbool_t success = TRUE;
-#ifdef H5_METADATA_TRACE_FILE
-
const char *((* expected_output)[]) = NULL;
const char * expected_output_0[] =
{
"### HDF5 metadata cache trace file version 1 ###\n",
- "H5AC_set_cache_auto_resize_config 1 0 1 0 \"t_cache_trace.txt\" 1 0 2097152 0.300000 33554432 1048576 50000 1 0.900000 2.000000 1 1.000000 0.250000 1 4194304 3 0.999000 0.900000 1 1048576 3 1 0.100000 262144 0 0\n",
- "H5AC_insert_entry 0x400 27 0x0 2 0\n",
- "H5AC_insert_entry 0x402 27 0x0 2 0\n",
- "H5AC_insert_entry 0x404 27 0x0 4 0\n",
- "H5AC_insert_entry 0x408 27 0x0 6 0\n",
- "H5AC_protect 0x400 27 0x0 2 1\n",
- "H5AC_mark_entry_dirty 0x400 0\n",
- "H5AC_unprotect 0x400 27 0x0 0\n",
- "H5AC_protect 0x402 27 0x0 2 1\n",
- "H5AC_pin_protected_entry 0x402 0\n",
- "H5AC_unprotect 0x402 27 0x0 0\n",
- "H5AC_unpin_entry 0x402 0\n",
- "H5AC_expunge_entry 0x402 27 0\n",
- "H5AC_protect 0x404 27 0x0 4 1\n",
- "H5AC_pin_protected_entry 0x404 0\n",
- "H5AC_unprotect 0x404 27 0x0 0\n",
- "H5AC_mark_entry_dirty 0x404 0\n",
- "H5AC_resize_entry 0x404 2 0\n",
- "H5AC_resize_entry 0x404 4 0\n",
- "H5AC_unpin_entry 0x404 0\n",
- "H5AC_move_entry 0x400 0x8e65 27 0\n",
- "H5AC_move_entry 0x8e65 0x400 27 0\n",
- "H5AC_flush 0\n",
+ "H5AC_set_cache_auto_resize_config",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_protect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_unprotect",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_unpin_entry",
+ "H5AC_expunge_entry",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_resize_entry",
+ "H5AC_resize_entry",
+ "H5AC_unpin_entry",
+ "H5AC_move_entry",
+ "H5AC_move_entry",
+ "H5AC_flush",
+ "H5AC_flush",
NULL
};
const char * expected_output_1[] =
{
"### HDF5 metadata cache trace file version 1 ###\n",
- "H5AC_set_cache_auto_resize_config 1 0 1 0 \"t_cache_trace.txt\" 1 0 2097152 0.300000 33554432 1048576 50000 1 0.900000 2.000000 1 1.000000 0.250000 1 4194304 3 0.999000 0.900000 1 1048576 3 1 0.100000 262144 1 0\n",
- "H5AC_insert_entry 0x400 27 0x0 2 0\n",
- "H5AC_insert_entry 0x402 27 0x0 2 0\n",
- "H5AC_insert_entry 0x404 27 0x0 4 0\n",
- "H5AC_insert_entry 0x408 27 0x0 6 0\n",
- "H5AC_protect 0x400 27 0x0 2 1\n",
- "H5AC_mark_entry_dirty 0x400 0\n",
- "H5AC_unprotect 0x400 27 0x0 0\n",
- "H5AC_protect 0x402 27 0x0 2 1\n",
- "H5AC_pin_protected_entry 0x402 0\n",
- "H5AC_unprotect 0x402 27 0x0 0\n",
- "H5AC_unpin_entry 0x402 0\n",
- "H5AC_expunge_entry 0x402 27 0\n",
- "H5AC_protect 0x404 27 0x0 4 1\n",
- "H5AC_pin_protected_entry 0x404 0\n",
- "H5AC_unprotect 0x404 27 0x0 0\n",
- "H5AC_mark_entry_dirty 0x404 0\n",
- "H5AC_resize_entry 0x404 2 0\n",
- "H5AC_resize_entry 0x404 4 0\n",
- "H5AC_unpin_entry 0x404 0\n",
- "H5AC_move_entry 0x400 0x8e65 27 0\n",
- "H5AC_move_entry 0x8e65 0x400 27 0\n",
- "H5AC_flush 0\n",
+ "H5AC_set_cache_auto_resize_config",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_insert_entry",
+ "H5AC_protect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_unprotect",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_unpin_entry",
+ "H5AC_expunge_entry",
+ "H5AC_protect",
+ "H5AC_pin_protected_entry",
+ "H5AC_unprotect",
+ "H5AC_mark_entry_dirty",
+ "H5AC_resize_entry",
+ "H5AC_resize_entry",
+ "H5AC_unpin_entry",
+ "H5AC_move_entry",
+ "H5AC_move_entry",
+ "H5AC_flush",
+ "H5AC_flush",
NULL
};
char buffer[256];
@@ -6884,8 +6879,8 @@ trace_file_check(int metadata_write_strategy)
hbool_t done = FALSE;
int i;
int max_nerrors;
- int expected_line_len;
- int actual_line_len;
+ size_t expected_line_len;
+ size_t actual_line_len;
hid_t fid = -1;
H5F_t * file_ptr = NULL;
H5C_t * cache_ptr = NULL;
@@ -6893,188 +6888,151 @@ trace_file_check(int metadata_write_strategy)
H5AC_cache_config_t config;
struct mssg_t mssg;
-#endif /* H5_METADATA_TRACE_FILE */
- switch ( metadata_write_strategy ) {
+ switch(metadata_write_strategy) {
+
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
-#ifdef H5_METADATA_TRACE_FILE
expected_output = &expected_output_0;
-#endif /* H5_METADATA_TRACE_FILE */
- if ( world_mpi_rank == 0 ) {
- TESTING(
- "trace file collection -- process 0 only md write strategy");
- }
- break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
-#ifdef H5_METADATA_TRACE_FILE
+ if(world_mpi_rank == 0)
+ TESTING("trace file collection -- process 0 only md write strategy");
+ break;
+
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+
expected_output = &expected_output_1;
-#endif /* H5_METADATA_TRACE_FILE */
- if ( world_mpi_rank == 0 ) {
- TESTING(
- "trace file collection -- distributed md write strategy");
- }
- break;
+
+ if(world_mpi_rank == 0)
+ TESTING("trace file collection -- distributed md write strategy");
+ break;
default:
-#ifdef H5_METADATA_TRACE_FILE
+
/* this will almost certainly cause a failure, but it keeps us
* from de-referenceing a NULL pointer.
*/
expected_output = &expected_output_0;
-#endif /* H5_METADATA_TRACE_FILE */
- if ( world_mpi_rank == 0 ) {
- TESTING("trace file collection -- unknown md write strategy");
- }
- break;
- }
-#ifdef H5_METADATA_TRACE_FILE
+ if(world_mpi_rank == 0)
+ TESTING("trace file collection -- unknown md write strategy");
+ break;
+ } /* end switch */
+
nerrors = 0;
init_data();
reset_stats();
- if ( world_mpi_rank == world_server_mpi_rank ) {
+ if(world_mpi_rank == world_server_mpi_rank) {
- if ( ! server_main() ) {
+ if(!server_main()) {
/* some error occured in the server -- report failure */
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
- world_mpi_rank, FUNC);
- }
+ if ( verbose )
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n", world_mpi_rank, FUNC);
}
}
- else /* run the clients */
- {
+ else {
+ /* run the clients */
- if ( ! setup_cache_for_test(&fid, &file_ptr, &cache_ptr,
- metadata_write_strategy) ) {
+ if(!setup_cache_for_test(&fid, &file_ptr, &cache_ptr, metadata_write_strategy) ) {
nerrors++;
fid = -1;
cache_ptr = NULL;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n", world_mpi_rank, FUNC);
}
- if ( nerrors == 0 ) {
+ if(nerrors == 0) {
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
-
- nerrors++;
- HDfprintf(stdout,
- "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
-
- } else {
-
+ if(H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ nerrors++;
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
+ }
+ else {
config.open_trace_file = TRUE;
- strcpy(config.trace_file_name, "t_cache_trace.txt");
+ strcpy(config.trace_file_name, "t_cache_trace.txt");
- if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
-
- nerrors++;
- HDfprintf(stdout,
- "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
+ if(H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ nerrors++;
+ HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
}
}
- }
+ } /* end if */
- insert_entry(cache_ptr, file_ptr, 0, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 1, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 2, H5AC__NO_FLAGS_SET);
- insert_entry(cache_ptr, file_ptr, 3, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 0, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 1, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 2, H5AC__NO_FLAGS_SET);
+ insert_entry(cache_ptr, file_ptr, 3, H5AC__NO_FLAGS_SET);
- lock_entry(file_ptr, 0);
- mark_entry_dirty(0);
- unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET);
+ lock_entry(file_ptr, 0);
+ mark_entry_dirty(0);
+ unlock_entry(file_ptr, 0, H5AC__NO_FLAGS_SET);
- lock_entry(file_ptr, 1);
+ lock_entry(file_ptr, 1);
pin_protected_entry(1, TRUE);
- unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET);
+ unlock_entry(file_ptr, 1, H5AC__NO_FLAGS_SET);
unpin_entry(file_ptr, 1, TRUE, FALSE, FALSE);
expunge_entry(file_ptr, 1);
- lock_entry(file_ptr, 2);
+ lock_entry(file_ptr, 2);
pin_protected_entry(2, TRUE);
- unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET);
- mark_entry_dirty(2);
+ unlock_entry(file_ptr, 2, H5AC__NO_FLAGS_SET);
+ mark_entry_dirty(2);
resize_entry(2, data[2].len / 2);
resize_entry(2, data[2].len);
unpin_entry(file_ptr, 2, TRUE, FALSE, FALSE);
- move_entry(file_ptr, 0, 20);
- move_entry(file_ptr, 0, 20);
+ move_entry(file_ptr, 0, 20);
+ move_entry(file_ptr, 0, 20);
- if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n", world_mpi_rank, FUNC);
}
- if ( nerrors == 0 ) {
-
+ if(nerrors == 0) {
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
- if ( H5AC_get_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
-
- nerrors++;
- HDfprintf(stdout,
- "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
-
- } else {
-
+ if(H5AC_get_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ nerrors++;
+ HDfprintf(stdout, "%d:%s: H5AC_get_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
+ }
+ else {
config.open_trace_file = FALSE;
config.close_trace_file = TRUE;
- config.trace_file_name[0] = '\0';
-
- if ( H5AC_set_cache_auto_resize_config(cache_ptr, &config)
- != SUCCEED ) {
+ config.trace_file_name[0] = '\0';
- nerrors++;
- HDfprintf(stdout,
- "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n",
- world_mpi_rank, FUNC);
+ if(H5AC_set_cache_auto_resize_config(cache_ptr, &config) != SUCCEED) {
+ nerrors++;
+ HDfprintf(stdout, "%d:%s: H5AC_set_cache_auto_resize_config() failed.\n", world_mpi_rank, FUNC);
}
}
- }
+ } /* end if */
- if ( fid >= 0 ) {
-
- if ( ! take_down_cache(fid, cache_ptr) ) {
+ if(fid >= 0) {
+ if(!take_down_cache(fid, cache_ptr)) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n", world_mpi_rank, FUNC);
}
- }
+ } /* end if */
/* verify that all instance of datum are back where the started
* and are clean.
*/
- for ( i = 0; i < NUM_DATA_ENTRIES; i++ )
- {
- HDassert( data_index[i] == i );
- HDassert( ! (data[i].dirty) );
+ for(i = 0; i < NUM_DATA_ENTRIES; i++) {
+ HDassert(data_index[i] == i);
+ HDassert(!(data[i].dirty));
}
/* compose the done message */
@@ -7088,134 +7046,138 @@ trace_file_check(int metadata_write_strategy)
mssg.count = 0; /* not used */
mssg.magic = MSSG_MAGIC;
- if ( success ) {
-
+ if(success) {
success = send_mssg(&mssg, FALSE);
- if ( ! success ) {
-
+ if(!success) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: send_mssg() failed on done.\n", world_mpi_rank, FUNC);
}
- }
-
- if ( nerrors == 0 ) {
+ } /* end if */
- sprintf(trace_file_name, "t_cache_trace.txt.%d",
- (int)file_mpi_rank);
+ if(nerrors == 0) {
+ HDsprintf(trace_file_name, "t_cache_trace.txt.%d", (int)file_mpi_rank);
- if ( (trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) {
+ if((trace_file_ptr = HDfopen(trace_file_name, "r")) == NULL ) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: HDfopen failed.\n",
- world_mpi_rank, FUNC);
- }
+ if(verbose)
+ HDfprintf(stdout, "%d:%s: HDfopen failed.\n", world_mpi_rank, FUNC);
}
- }
-
- i = 0;
- while ( ( nerrors == 0 ) && ( ! done ) )
- {
- if ( (*expected_output)[i] == NULL ) {
-
- expected_line_len = 0;
-
- } else {
-
- expected_line_len = HDstrlen((*expected_output)[i]);
- }
-
- if ( HDfgets(buffer, 255, trace_file_ptr) != NULL ) {
+ } /* end if */
- actual_line_len = strlen(buffer);
- } else {
-
- actual_line_len = 0;
- }
+ i = 0;
+ while((nerrors == 0) && (!done)) {
+ /* Get lines of actual and expected data */
+ if((*expected_output)[i] == NULL)
+ expected_line_len = (size_t)0;
+ else
+ expected_line_len = HDstrlen((*expected_output)[i]);
+
+ if(HDfgets(buffer, 255, trace_file_ptr) != NULL)
+ actual_line_len = HDstrlen(buffer);
+ else
+ actual_line_len = (size_t)0;
+
+ /* Compare the lines */
+ /* Handle running out of data */
+ if((actual_line_len == 0) || (expected_line_len == 0)) {
+ if((actual_line_len == 0) && (expected_line_len == 0)) {
+ /* Both ran out at the same time - we're done */
+ done = TRUE;
+ }
+ else {
+ /* One ran out before the other - BADNESS */
+ nerrors++;
+ if(verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
+ if(expected_line_len == 0) {
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC, "<EMPTY>", expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC, buffer, actual_line_len);
+ }
+ if(actual_line_len == 0) {
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC, (*expected_output)[i], expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC, "<EMPTY>", actual_line_len);
+ }
+ }
+ HDfprintf(stdout, "BADNESS BADNESS BADNESS\n");
+ }
+ }
+ /* We directly compare the header line (line 0) */
+ else if(0 == i) {
+ if((actual_line_len != expected_line_len) || (HDstrcmp(buffer, (*expected_output)[i]) != 0 )) {
- if ( ( actual_line_len == 0 ) && ( expected_line_len == 0 ) ) {
+ nerrors++;
+ if(verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n", world_mpi_rank, FUNC, (*expected_output)[i], expected_line_len);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n", world_mpi_rank, FUNC, buffer, actual_line_len);
+ }
+ }
+ }
+ /* All other lines we tokenize and just compare the function name. This
+ * keeps the test from being too fragile.
+ */
+ else {
+ char *tok = NULL; /* token for actual line */
- done = TRUE;
+ tok = HDstrtok(buffer, " ");
- } else if ( ( actual_line_len != expected_line_len ) ||
- ( HDstrcmp(buffer, (*expected_output)[i]) != 0 ) ) {
+ if(HDstrcmp(tok, (*expected_output)[i]) != 0 ) {
- nerrors++;
- if ( verbose ) {
- HDfprintf(stdout,
- "%d:%s: Unexpected data in trace file line %d.\n",
- world_mpi_rank, FUNC, i);
- HDfprintf(stdout, "%d:%s: expected = \"%s\" %d\n",
- world_mpi_rank, FUNC, (*expected_output)[i],
- expected_line_len);
- HDfprintf(stdout, "%d:%s: actual = \"%s\" %d\n",
- world_mpi_rank, FUNC, buffer,
- actual_line_len);
+ nerrors++;
+ if(verbose) {
+ HDfprintf(stdout, "%d:%s: Unexpected data in trace file line %d.\n", world_mpi_rank, FUNC, i);
+ HDfprintf(stdout, "%d:%s: expected = \"%s\"\n", world_mpi_rank, FUNC, (*expected_output)[i]);
+ HDfprintf(stdout, "%d:%s: actual = \"%s\"\n", world_mpi_rank, FUNC, tok);
+ }
}
- } else {
- i++;
- }
- }
+ } /* end else */
- if ( trace_file_ptr != NULL ) {
+ i++;
+ } /* end while */
- HDfclose(trace_file_ptr);
- trace_file_ptr = NULL;
-#if 1
- HDremove(trace_file_name);
-#endif
+ /* Clean up the trace file */
+ if(trace_file_ptr != NULL) {
+ HDfclose(trace_file_ptr);
+ trace_file_ptr = NULL;
+ HDremove(trace_file_name);
}
- }
+ } /* end giant else that runs clients */
max_nerrors = get_max_nerrors();
- if ( world_mpi_rank == 0 ) {
-
- if ( max_nerrors == 0 ) {
-
- PASSED();
-
- } else {
+ if(world_mpi_rank == 0) {
+ if(max_nerrors == 0) {
+ PASSED();
+ }
+ else {
failures++;
H5_FAILED();
}
}
- success = ( ( success ) && ( max_nerrors == 0 ) );
-
-#else /* H5_METADATA_TRACE_FILE */
-
- if ( world_mpi_rank == 0 ) {
-
- SKIPPED();
-
- HDfprintf(stdout, " trace file support disabled.\n");
- }
-
-#endif /* H5_METADATA_TRACE_FILE */
+ success = ((success) && (max_nerrors == 0));
return(success);
} /* trace_file_check() */
-
+
/*****************************************************************************
*
- * Function: smoke_check_6()
+ * Function: smoke_check_6()
*
- * Purpose: Sixth smoke check for the parallel cache.
+ * Purpose: Sixth smoke check for the parallel cache.
*
- * Return: Success: TRUE
+ * Return: Success: TRUE
*
- * Failure: FALSE
+ * Failure: FALSE
*
- * Programmer: JRM -- 1/13/06
+ * Programmer: JRM -- 1/13/06
*
*****************************************************************************/
static hbool_t
@@ -7231,23 +7193,23 @@ smoke_check_6(int metadata_write_strategy)
switch ( metadata_write_strategy ) {
- case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- process 0 only md write strategy");
+ TESTING("smoke check #6 -- process 0 only md write strategy");
}
- break;
+ break;
- case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- distributed md write strategy");
+ TESTING("smoke check #6 -- distributed md write strategy");
}
- break;
+ break;
default:
if ( world_mpi_rank == 0 ) {
- TESTING("smoke check #6 -- unknown md write strategy");
+ TESTING("smoke check #6 -- unknown md write strategy");
}
- break;
+ break;
}
nerrors = 0;
@@ -7256,12 +7218,12 @@ smoke_check_6(int metadata_write_strategy)
if ( world_mpi_rank == world_server_mpi_rank ) {
- if ( ! server_main() ) {
+ if ( ! server_main() ) {
/* some error occured in the server -- report failure */
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: server_main() failed.\n",
+ HDfprintf(stdout, "%d:%s: server_main() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7277,7 +7239,7 @@ smoke_check_6(int metadata_write_strategy)
fid = -1;
cache_ptr = NULL;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
+ HDfprintf(stdout, "%d:%s: setup_cache_for_test() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7286,7 +7248,7 @@ smoke_check_6(int metadata_write_strategy)
virt_num_data_entries = NUM_DATA_ENTRIES;
/* insert the first half collectively */
- file_ptr->coll_md_read = H5P_USER_TRUE;
+ H5CX_set_coll_metadata_read(TRUE);
for ( i = 0; i < virt_num_data_entries/2; i++ )
{
struct datum * entry_ptr;
@@ -7303,11 +7265,12 @@ smoke_check_6(int metadata_write_strategy)
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ H5_CHECK_OVERFLOW(cache_ptr->max_cache_size, size_t, double);
+ HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
/* insert the other half independently */
- file_ptr->coll_md_read = H5P_USER_FALSE;
+ H5CX_set_coll_metadata_read(FALSE);
for ( i = virt_num_data_entries/2; i < virt_num_data_entries; i++ )
{
struct datum * entry_ptr;
@@ -7324,26 +7287,26 @@ smoke_check_6(int metadata_write_strategy)
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
- /* flush the file */
+ /* flush the file */
if ( H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0 ) {
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
+ HDfprintf(stdout, "%d:%s: H5Fflush() failed.\n",
world_mpi_rank, FUNC);
}
}
/* Protect the first half of the entries collectively */
- file_ptr->coll_md_read = H5P_USER_TRUE;
+ H5CX_set_coll_metadata_read(TRUE);
for ( i = 0; i < (virt_num_data_entries / 2); i++ )
{
struct datum * entry_ptr;
entry_ptr = &(data[i]);
- lock_entry(file_ptr, i);
+ lock_entry(file_ptr, i);
if(TRUE != entry_ptr->header.coll_access) {
nerrors++;
@@ -7354,17 +7317,17 @@ smoke_check_6(int metadata_write_strategy)
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
/* protect the other half independently */
- file_ptr->coll_md_read = H5P_USER_FALSE;
+ H5CX_set_coll_metadata_read(FALSE);
for ( i = virt_num_data_entries/2; i < virt_num_data_entries; i++ )
{
struct datum * entry_ptr;
entry_ptr = &(data[i]);
- lock_entry(file_ptr, i);
+ lock_entry(file_ptr, i);
if(FALSE != entry_ptr->header.coll_access) {
nerrors++;
@@ -7375,7 +7338,7 @@ smoke_check_6(int metadata_write_strategy)
}
/* Make sure coll entries do not cross the 80% threshold */
- HDassert(cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
+ HDassert((double)cache_ptr->max_cache_size*0.8 > cache_ptr->coll_list_size);
}
for ( i = 0; i < (virt_num_data_entries); i++ )
@@ -7389,7 +7352,7 @@ smoke_check_6(int metadata_write_strategy)
nerrors++;
if ( verbose ) {
- HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
+ HDfprintf(stdout, "%d:%s: take_down_cache() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7437,9 +7400,9 @@ smoke_check_6(int metadata_write_strategy)
if ( world_mpi_rank == 0 ) {
- if ( max_nerrors == 0 ) {
+ if ( max_nerrors == 0 ) {
- PASSED();
+ PASSED();
} else {
@@ -7454,18 +7417,18 @@ smoke_check_6(int metadata_write_strategy)
} /* smoke_check_6() */
-
+
/*****************************************************************************
*
- * Function: main()
+ * Function: main()
*
- * Purpose: Main function for the parallel cache test.
+ * Purpose: Main function for the parallel cache test.
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: 1
+ * Failure: 1
*
- * Programmer: JRM -- 12/23/05
+ * Programmer: JRM -- 12/23/05
*
*****************************************************************************/
int
@@ -7492,7 +7455,7 @@ main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("%d:Failed to turn off atexit processing. Continue.\n",
+ HDprintf("%d:Failed to turn off atexit processing. Continue.\n",
mpi_rank);
};
H5open();
@@ -7503,32 +7466,32 @@ main(int argc, char **argv)
#endif /* JRM */
if ( express_test ) {
- virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
+ virt_num_data_entries = EXPRESS_VIRT_NUM_DATA_ENTRIES;
} else {
- virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
+ virt_num_data_entries = STD_VIRT_NUM_DATA_ENTRIES;
}
#ifdef H5_HAVE_MPE
- if ( MAINPROCESS ) { printf(" Tests compiled for MPE.\n"); }
+ if ( MAINPROCESS ) { HDprintf(" Tests compiled for MPE.\n"); }
virt_num_data_entries = MPE_VIRT_NUM_DATA_ENTIES;
#endif /* H5_HAVE_MPE */
if (MAINPROCESS){
- printf("===================================\n");
- printf("Parallel metadata cache tests\n");
- printf(" mpi_size = %d\n", mpi_size);
- printf(" express_test = %d\n", express_test);
- printf("===================================\n");
+ HDprintf("===================================\n");
+ HDprintf("Parallel metadata cache tests\n");
+ HDprintf(" mpi_size = %d\n", mpi_size);
+ HDprintf(" express_test = %d\n", express_test);
+ HDprintf("===================================\n");
}
if ( mpi_size < 3 ) {
if ( MAINPROCESS ) {
- printf(" Need at least 3 processes. Exiting.\n");
+ HDprintf(" Need at least 3 processes. Exiting.\n");
}
goto finish;
}
@@ -7547,8 +7510,8 @@ main(int argc, char **argv)
/* setup file access property list with the world communicator */
if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n",
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 1.\n",
world_mpi_rank, FUNC);
}
}
@@ -7580,8 +7543,8 @@ main(int argc, char **argv)
/* close the fapl before we set it up again */
if ( H5Pclose(fapl) < 0 ) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n",
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pclose() failed.\n",
world_mpi_rank, FUNC);
}
}
@@ -7591,9 +7554,9 @@ main(int argc, char **argv)
/* setup file access property list */
if ( FAIL == (fapl = H5Pcreate(H5P_FILE_ACCESS)) ) {
- nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n",
+ nerrors++;
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pcreate() failed 2.\n",
world_mpi_rank, FUNC);
}
}
@@ -7601,8 +7564,8 @@ main(int argc, char **argv)
if ( H5Pset_fapl_mpio(fapl, file_mpi_comm, MPI_INFO_NULL) < 0 ) {
nerrors++;
- if ( verbose ) {
- HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n",
+ if ( verbose ) {
+ HDfprintf(stdout, "%d:%s: H5Pset_fapl_mpio() failed 2.\n",
world_mpi_rank, FUNC);
}
}
@@ -7620,7 +7583,7 @@ main(int argc, char **argv)
HDfprintf(stdout, "Errors in test initialization. Exiting.\n");
}
- goto finish;
+ goto finish;
}
/* run the tests */
@@ -7673,16 +7636,16 @@ finish:
* and exit.
*/
MPI_Barrier(MPI_COMM_WORLD);
- if (MAINPROCESS){ /* only process 0 reports */
- printf("===================================\n");
- if (failures){
- printf("***metadata cache tests detected %d failures***\n",
+ if (MAINPROCESS){ /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (failures){
+ HDprintf("***metadata cache tests detected %d failures***\n",
failures);
- }
- else{
- printf("metadata cache tests finished with no failures\n");
- }
- printf("===================================\n");
+ }
+ else{
+ HDprintf("metadata cache tests finished with no failures\n");
+ }
+ HDprintf("===================================\n");
}
takedown_derived_types();
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
index 524a63f..14e3d10 100644
--- a/testpar/t_cache_image.c
+++ b/testpar/t_cache_image.c
@@ -14,12 +14,11 @@
/* Programmer: John Mainzer
* 7/13/15
*
- * This file contains tests specific to the cache image
- * feature implemented in H5C.c
+ * This file contains tests specific to the cache image
+ * feature implemented in H5C.c
*/
-#include "h5test.h"
#include "testphdf5.h"
-#include "testpar.h"
+
#include "cache_common.h"
#include "genall5.h"
@@ -28,7 +27,7 @@
#define DSET_SIZE (40 * CHUNK_SIZE)
#define MAX_NUM_DSETS 256
#define PAR_NUM_DSETS 32
-#define PAGE_SIZE (4 * 1024)
+#define PAGE_SIZE (4 * 1024)
#define PB_SIZE (64 * PAGE_SIZE)
/* global variable declarations: */
@@ -59,8 +58,8 @@ static void open_hdf5_file(const hbool_t create_file,
hid_t * file_id_ptr,
H5F_t ** file_ptr_ptr,
H5C_t ** cache_ptr_ptr,
- MPI_Comm comm,
- MPI_Info info,
+ MPI_Comm comm,
+ MPI_Info info,
int l_facc_type,
const hbool_t all_coll_metadata_ops,
const hbool_t coll_metadata_write,
@@ -70,11 +69,11 @@ static void verify_data_sets(hid_t file_id, int min_dset, int max_dset);
/* local test function declarations */
-static hbool_t parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
+static hbool_t parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display);
static void usage(void);
static unsigned construct_test_file(int test_file_index);
-static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank,
+static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank,
int mpi_size);
static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank);
static void par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank);
@@ -83,15 +82,15 @@ static hbool_t serial_insert_cache_image(int file_name_idx, int mpi_size);
static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size);
/* top level test function declarations */
-static unsigned verify_cache_image_RO(int file_name_id,
+static unsigned verify_cache_image_RO(int file_name_id,
int md_write_strat, int mpi_rank);
-static unsigned verify_cache_image_RW(int file_name_id,
+static unsigned verify_cache_image_RW(int file_name_id,
int md_write_strat, int mpi_rank);
-static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
+static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
int mpi_rank, int mpi_size);
-
+
/****************************************************************************/
/***************************** Utility Functions ****************************/
/****************************************************************************/
@@ -99,52 +98,52 @@ static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
/*-------------------------------------------------------------------------
* Function: construct_test_file()
*
- * Purpose: This function attempts to mimic the typical "poor man's
- * parallel use case in which the file is passed between
- * processes, each of which open the file, write some data,
- * close the file, and then pass control on to the next
- * process.
+ * Purpose: This function attempts to mimic the typical "poor man's
+ * parallel use case in which the file is passed between
+ * processes, each of which open the file, write some data,
+ * close the file, and then pass control on to the next
+ * process.
*
- * In this case, we create one group for each process, and
- * populate it with a "zoo" of HDF5 objects selected to
- * (ideally) exercise all HDF5 on disk data structures.
+ * In this case, we create one group for each process, and
+ * populate it with a "zoo" of HDF5 objects selected to
+ * (ideally) exercise all HDF5 on disk data structures.
*
- * The end result is a test file used verify that PHDF5
- * can open a file with a cache image.
+ * The end result is a test file used verify that PHDF5
+ * can open a file with a cache image.
*
- * Cycle of operation
+ * Cycle of operation
*
- * 1) Create a HDF5 file with the cache image FAPL entry.
+ * 1) Create a HDF5 file with the cache image FAPL entry.
*
- * Verify that the cache is informed of the cache image
- * FAPL entry.
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
*
- * Set all cache image flags, forcing full functionality.
+ * Set all cache image flags, forcing full functionality.
*
- * 2) Create a data set in the file.
+ * 2) Create a data set in the file.
*
- * 3) Close the file.
+ * 3) Close the file.
*
- * 4) Open the file.
+ * 4) Open the file.
*
- * Verify that the metadata cache is instructed to load
+ * Verify that the metadata cache is instructed to load
* the metadata cache image.
*
- * 5) Create a data set in the file.
+ * 5) Create a data set in the file.
*
- * 6) Close the file. If enough datasets have been created
+ * 6) Close the file. If enough datasets have been created
* goto 7. Otherwise return to 4.
*
- * 7) Open the file R/O.
+ * 7) Open the file R/O.
*
* Verify that the file contains a metadata cache image
* superblock extension message.
- *
- * 8) Verify all data sets.
*
- * Verify that the cache image has been loaded.
+ * 8) Verify all data sets.
+ *
+ * Verify that the cache image has been loaded.
*
- * 9) close the file.
+ * 9) close the file.
*
* Return: void
*
@@ -153,7 +152,7 @@ static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -175,7 +174,7 @@ construct_test_file(int test_file_index)
pass = TRUE;
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -184,7 +183,7 @@ construct_test_file(int test_file_index)
HDassert(FILENAMES[test_file_index]);
- if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
+ if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
filename, sizeof(filename))
== NULL ) {
@@ -193,13 +192,13 @@ construct_test_file(int test_file_index)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 1) Create a HDF5 file with the cache image FAPL entry.
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
*
- * Verify that the cache is informed of the cache image FAPL entry.
+ * Verify that the cache is informed of the cache image FAPL entry.
*
* Set flags forcing full function of the cache image feature.
*/
@@ -210,7 +209,7 @@ construct_test_file(int test_file_index)
/* mdci_sbem_expected */ FALSE,
/* read_only */ FALSE,
/* set_mdci_fapl */ TRUE,
- /* config_fsm */ TRUE,
+ /* config_fsm */ TRUE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -225,7 +224,7 @@ construct_test_file(int test_file_index)
/* md_write_strat */ 0);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -247,7 +246,7 @@ construct_test_file(int test_file_index)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -263,7 +262,7 @@ construct_test_file(int test_file_index)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -282,7 +281,7 @@ construct_test_file(int test_file_index)
/* mdci_sbem_expected */ TRUE,
/* read_only */ FALSE,
/* set_mdci_fapl */ TRUE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -356,7 +355,7 @@ construct_test_file(int test_file_index)
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -375,7 +374,7 @@ construct_test_file(int test_file_index)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 8) Open and close all data sets.
+ /* 8) Open and close all data sets.
*
* Verify that the cache image has been loaded.
*/
@@ -414,15 +413,15 @@ construct_test_file(int test_file_index)
} /* construct_test_file() */
-
+
/*-------------------------------------------------------------------------
* Function: create_data_sets()
*
* Purpose: If pass is TRUE on entry, create the specified data sets
- * in the indicated file.
+ * in the indicated file.
*
- * Data sets and their contents must be well know, as we
- * will verify that they contain the expected data later.
+ * Data sets and their contents must be well know, as we
+ * will verify that they contain the expected data later.
*
* On failure, set pass to FALSE, and set failure_mssg
* to point to an appropriate failure message.
@@ -436,15 +435,15 @@ construct_test_file(int test_file_index)
*
* Modifications:
*
- * Added min_dset and max_dset parameters and supporting
- * code. This allows the caller to specify a range of
- * datasets to create.
- * JRM -- 8/20/15
+ * Added min_dset and max_dset parameters and supporting
+ * code. This allows the caller to specify a range of
+ * datasets to create.
+ * JRM -- 8/20/15
*
*-------------------------------------------------------------------------
*/
-static void
+static void
create_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char * fcn_name = "create_data_sets()";
@@ -520,7 +519,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* create the dataset */
if ( pass ) {
- sprintf(dset_name, "/dset%03d", i);
+ HDsprintf(dset_name, "/dset%03d", i);
dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE,
dataspace_id, H5P_DEFAULT,
properties, H5P_DEFAULT);
@@ -666,8 +665,8 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
/* read the chunk from file */
if ( pass ) {
- status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
- memspace_id, filespace_ids[m],
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
+ memspace_id, filespace_ids[m],
H5P_DEFAULT, data_chunk);
if ( status < 0 ) {
@@ -692,7 +691,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
valid_chunk = FALSE;
- if ( verbose ) {
+ if ( verbose ) {
HDfprintf(stdout,
"data_chunk[%0d][%0d] = %0d, expect %0d.\n",
@@ -702,7 +701,7 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
HDfprintf(stdout,
"m = %d, i = %d, j = %d, k = %d, l = %d\n",
m, i, j, k, l);
- }
+ }
}
}
}
@@ -712,12 +711,12 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, m);
- }
+ }
}
}
m++;
@@ -768,16 +767,16 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* create_data_sets() */
-
+
/*-------------------------------------------------------------------------
* Function: delete_data_sets()
*
- * Purpose: If pass is TRUE on entry, verify and then delete the
- * dataset(s) indicated by min_dset and max_dset in the
- * indicated file.
+ * Purpose: If pass is TRUE on entry, verify and then delete the
+ * dataset(s) indicated by min_dset and max_dset in the
+ * indicated file.
*
- * Data sets and their contents must be well know, as we
- * will verify that they contain the expected data later.
+ * Data sets and their contents must be well know, as we
+ * will verify that they contain the expected data later.
*
* On failure, set pass to FALSE, and set failure_mssg
* to point to an appropriate failure message.
@@ -792,17 +791,17 @@ create_data_sets(hid_t file_id, int min_dset, int max_dset)
* Modifications:
*
* None.
- * JRM -- 8/20/15
+ * JRM -- 8/20/15
*
*-------------------------------------------------------------------------
*/
-#if 0
+#if 0
/* this code will be needed to test full support of cache image
* in parallel -- keep it around against that day.
*
* -- JRM
*/
-static void
+static void
delete_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char * fcn_name = "delete_data_sets()";
@@ -831,13 +830,13 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
while ( ( pass ) && ( i <= max_dset ) )
{
- sprintf(dset_name, "/dset%03d", i);
+ HDsprintf(dset_name, "/dset%03d", i);
- if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
+ if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
pass = FALSE;
failure_mssg = "H5Ldelete() failed.";
- }
+ }
i++;
}
@@ -850,32 +849,32 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* delete_data_sets() */
#endif
-
+
/*-------------------------------------------------------------------------
* Function: open_hdf5_file()
*
- * Purpose: If pass is true on entry, create or open the specified HDF5
- * and test to see if it has a metadata cache image superblock
- * extension message.
+ * Purpose: If pass is true on entry, create or open the specified HDF5
+ * and test to see if it has a metadata cache image superblock
+ * extension message.
*
- * Set pass to FALSE and issue a suitable failure
- * message if either the file contains a metadata cache image
- * superblock extension and mdci_sbem_expected is TRUE, or
- * vise versa.
+ * Set pass to FALSE and issue a suitable failure
+ * message if either the file contains a metadata cache image
+ * superblock extension and mdci_sbem_expected is TRUE, or
+ * vise versa.
*
- * If mdci_sbem_expected is TRUE, also verify that the metadata
- * cache has been advised of this.
+ * If mdci_sbem_expected is TRUE, also verify that the metadata
+ * cache has been advised of this.
*
- * If read_only is TRUE, open the file read only. Otherwise
- * open the file read/write.
+ * If read_only is TRUE, open the file read only. Otherwise
+ * open the file read/write.
*
- * If set_mdci_fapl is TRUE, set the metadata cache image
- * FAPL entry when opening the file, and verify that the
- * metadata cache is notified.
+ * If set_mdci_fapl is TRUE, set the metadata cache image
+ * FAPL entry when opening the file, and verify that the
+ * metadata cache is notified.
*
- * If config_fsm is TRUE, setup the persistant free space
- * manager. Note that this flag may only be set if
- * create_file is also TRUE.
+ * If config_fsm is TRUE, setup the persistant free space
+ * manager. Note that this flag may only be set if
+ * create_file is also TRUE.
*
* Return pointers to the cache data structure and file data
* structures.
@@ -893,10 +892,10 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
* Modifications:
*
* Modified function to handle parallel file creates / opens.
- *
+ *
* JRM -- 2/1/17
*
- * Modified function to handle
+ * Modified function to handle
*
*-------------------------------------------------------------------------
*/
@@ -904,17 +903,17 @@ delete_data_sets(hid_t file_id, int min_dset, int max_dset)
static void
open_hdf5_file(const hbool_t create_file,
const hbool_t mdci_sbem_expected,
- const hbool_t read_only,
- const hbool_t set_mdci_fapl,
- const hbool_t config_fsm,
+ const hbool_t read_only,
+ const hbool_t set_mdci_fapl,
+ const hbool_t config_fsm,
const hbool_t enable_page_buffer,
- const char * hdf_file_name,
+ const char * hdf_file_name,
const unsigned cache_image_flags,
hid_t * file_id_ptr,
H5F_t ** file_ptr_ptr,
H5C_t ** cache_ptr_ptr,
- MPI_Comm comm,
- MPI_Info info,
+ MPI_Comm comm,
+ MPI_Info info,
int l_facc_type,
const hbool_t all_coll_metadata_ops,
const hbool_t coll_metadata_write,
@@ -941,8 +940,8 @@ open_hdf5_file(const hbool_t create_file,
if ( pass )
{
- /* opening the file both read only and with a cache image
- * requested is a contradiction. We resolve it by ignoring
+ /* opening the file both read only and with a cache image
+ * requested is a contradiction. We resolve it by ignoring
* the cache image request silently.
*/
if ( ( create_file && mdci_sbem_expected ) ||
@@ -969,7 +968,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create a file access propertly list. */
@@ -984,13 +983,13 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* call H5Pset_libver_bounds() on the fapl_id */
if ( pass ) {
- if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST)
+ if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST)
< 0 ) {
pass = FALSE;
@@ -998,7 +997,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get metadata cache image config -- verify that it is the default */
@@ -1016,7 +1015,7 @@ open_hdf5_file(const hbool_t create_file,
H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
( cache_image_config.generate_image != FALSE ) ||
( cache_image_config.save_resize_status != FALSE ) ||
- ( cache_image_config.entry_ageout !=
+ ( cache_image_config.entry_ageout !=
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ) {
pass = FALSE;
@@ -1024,7 +1023,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* set metadata cache image fapl entry if indicated */
@@ -1044,24 +1043,24 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the persistant free space manager if indicated */
if ( ( pass ) && ( config_fsm ) ) {
- fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
- if ( fcpl_id <= 0 ) {
+ if ( fcpl_id <= 0 ) {
- pass = FALSE;
- failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
- }
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
+ }
}
if ( ( pass ) && ( config_fsm ) ) {
- if ( H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE,
+ if ( H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE,
TRUE, (hsize_t)1) == FAIL ) {
pass = FALSE;
failure_mssg = "H5Pset_file_space_strategy() failed.\n";
@@ -1077,7 +1076,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the page buffer if indicated */
@@ -1087,10 +1086,10 @@ open_hdf5_file(const hbool_t create_file,
pass = FALSE;
failure_mssg = "H5Pset_page_buffer_size() failed.\n";
- }
+ }
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1098,13 +1097,13 @@ open_hdf5_file(const hbool_t create_file,
/* set Parallel access with communicator */
if ( H5Pset_fapl_mpio(fapl_id, comm, info) < 0 ) {
-
+
pass = FALSE;
failure_mssg = "H5Pset_fapl_mpio() failed.\n";
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
@@ -1116,7 +1115,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
@@ -1128,7 +1127,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
@@ -1137,7 +1136,7 @@ open_hdf5_file(const hbool_t create_file,
H5AC_cache_config_t mdc_config;
mdc_config.version = H5C__CURR_AUTO_SIZE_CTL_VER;
-
+
if ( H5Pget_mdc_config(fapl_id, &mdc_config) < 0 ) {
pass = FALSE;
@@ -1153,7 +1152,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* open the file */
@@ -1161,13 +1160,13 @@ open_hdf5_file(const hbool_t create_file,
if ( create_file ) {
- if ( fcpl_id != -1 )
+ if ( fcpl_id != -1 )
- file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
- fcpl_id, fapl_id);
- else
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
+ fcpl_id, fapl_id);
+ else
- file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
H5P_DEFAULT, fapl_id);
} else {
@@ -1188,7 +1187,7 @@ open_hdf5_file(const hbool_t create_file,
} else {
- file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+ file_ptr = (struct H5F_t *)H5VL_object_verify(file_id, H5I_FILE);
if ( file_ptr == NULL ) {
@@ -1202,7 +1201,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get a pointer to the files internal data structure and then
@@ -1221,12 +1220,12 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* verify expected page buffer status. At present, page buffering
- * must be disabled in parallel -- hopefully this will change in the
+ /* verify expected page buffer status. At present, page buffering
+ * must be disabled in parallel -- hopefully this will change in the
* future.
*/
if ( pass ) {
@@ -1245,7 +1244,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1265,7 +1264,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -1273,26 +1272,26 @@ open_hdf5_file(const hbool_t create_file,
if ( set_mdci_fapl ) {
- if ( read_only ) {
+ if ( read_only ) {
- if ( ( image_ctl.version !=
+ if ( ( image_ctl.version !=
H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
( image_ctl.generate_image != FALSE ) ||
( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
+ ( image_ctl.entry_ageout !=
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
pass = FALSE;
failure_mssg = "Unexpected image_ctl values(1).\n";
}
- } else {
+ } else {
- if ( ( image_ctl.version !=
+ if ( ( image_ctl.version !=
H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
- ( image_ctl.generate_image != TRUE ) ||
+ ( image_ctl.generate_image != TRUE ) ||
( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
+ ( image_ctl.entry_ageout !=
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
@@ -1302,11 +1301,11 @@ open_hdf5_file(const hbool_t create_file,
}
} else {
- if ( ( image_ctl.version !=
+ if ( ( image_ctl.version !=
H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
( image_ctl.generate_image != FALSE ) ||
( image_ctl.save_resize_status != FALSE ) ||
- ( image_ctl.entry_ageout !=
+ ( image_ctl.entry_ageout !=
H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
@@ -1316,7 +1315,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( ( pass ) && ( set_mdci_fapl ) ) {
@@ -1330,7 +1329,7 @@ open_hdf5_file(const hbool_t create_file,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -1359,19 +1358,19 @@ open_hdf5_file(const hbool_t create_file,
pass = FALSE;
failure_mssg = "mdci sb extension message not present?\n";
}
- }
+ }
} else {
- if ( ( cache_ptr->load_image == TRUE ) ||
+ if ( ( cache_ptr->load_image == TRUE ) ||
( cache_ptr->delete_image == TRUE ) ) {
pass = FALSE;
failure_mssg = "mdci sb extension message present?\n";
- }
+ }
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -1382,11 +1381,11 @@ open_hdf5_file(const hbool_t create_file,
}
if ( show_progress ) {
- HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n",
+ HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n",
fcn_name, cp++, pass);
if ( ! pass )
- HDfprintf(stdout, "%s: failure_mssg = %s\n",
+ HDfprintf(stdout, "%s: failure_mssg = %s\n",
fcn_name, failure_mssg);
}
@@ -1394,11 +1393,11 @@ open_hdf5_file(const hbool_t create_file,
} /* open_hdf5_file() */
-
+
/*-------------------------------------------------------------------------
* Function: par_create_dataset()
*
- * Purpose: Collectively create a chunked dataset, and fill it with
+ * Purpose: Collectively create a chunked dataset, and fill it with
* known values.
*
* On failure, set pass to FALSE, and set failure_mssg
@@ -1447,7 +1446,7 @@ par_create_dataset(int dset_num,
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- sprintf(dset_name, "/dset%03d", dset_num);
+ HDsprintf(dset_name, "/dset%03d", dset_num);
if ( show_progress ) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1469,7 +1468,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* set the dataset creation plist to specify that the raw data is
@@ -1487,7 +1486,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -1503,7 +1502,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the dataset */
@@ -1520,7 +1519,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* get the file space ID */
@@ -1535,7 +1534,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read and write chunks */
@@ -1553,7 +1552,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
@@ -1575,7 +1574,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the DXPL for collective I/O */
@@ -1590,7 +1589,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -1602,7 +1601,7 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* initialize the dataset with collective writes */
@@ -1613,8 +1612,8 @@ par_create_dataset(int dset_num,
while ( ( pass ) && ( j < DSET_SIZE ) )
{
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n",
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n",
fcn_name, cp, pass);
/* initialize the slab */
@@ -1628,8 +1627,8 @@ par_create_dataset(int dset_num,
}
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n",
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n",
fcn_name, cp, pass);
/* select on disk hyperslab */
@@ -1637,7 +1636,7 @@ par_create_dataset(int dset_num,
offset[1] = (hsize_t)i;
offset[2] = (hsize_t)j;
a_size[0] = (hsize_t)1; /* size of hyperslab */
- a_size[1] = CHUNK_SIZE;
+ a_size[1] = CHUNK_SIZE;
a_size[2] = CHUNK_SIZE;
status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
offset, NULL, a_size, NULL);
@@ -1648,8 +1647,8 @@ par_create_dataset(int dset_num,
failure_mssg = "disk H5Sselect_hyperslab() failed.";
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n",
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n",
fcn_name, cp, pass);
/* write the chunk to file */
@@ -1662,8 +1661,8 @@ par_create_dataset(int dset_num,
failure_mssg = "H5Dwrite() failed.";
}
- if ( show_progress )
- HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n",
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n",
fcn_name, cp, pass);
j += CHUNK_SIZE;
@@ -1673,7 +1672,7 @@ par_create_dataset(int dset_num,
}
cp++;
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* read data from data sets and validate it */
@@ -1751,7 +1750,7 @@ par_create_dataset(int dset_num,
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, dset_num);
}
@@ -1762,7 +1761,7 @@ par_create_dataset(int dset_num,
i += CHUNK_SIZE;
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the data space */
@@ -1807,14 +1806,14 @@ par_create_dataset(int dset_num,
failure_mssg = "H5Pclose(dxpl) failed.";
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_create_dataset() */
-
+
/*-------------------------------------------------------------------------
* Function: par_delete_dataset()
*
@@ -1849,7 +1848,7 @@ par_delete_dataset(int dset_num,
show_progress = (show_progress && (mpi_rank == 0));
- sprintf(dset_name, "/dset%03d", dset_num);
+ HDsprintf(dset_name, "/dset%03d", dset_num);
if ( show_progress ) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -1862,7 +1861,7 @@ par_delete_dataset(int dset_num,
par_verify_dataset(dset_num, file_id, mpi_rank);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* delete the target dataset */
@@ -1875,21 +1874,29 @@ par_delete_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_delete_dataset() */
-
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+
/*-------------------------------------------------------------------------
* Function: par_insert_cache_image()
*
* Purpose: Insert a cache image in the supplied file.
*
- * At present, cache image is not enabled in the parallel
- * so we have to insert the cache image with a serial
+ * At present, cache image is not enabled in the parallel
+ * so we have to insert the cache image with a serial
* process. Do this via a fork and an execv from process 0.
* All processes wait until the child process completes, and
* then return.
@@ -1925,8 +1932,8 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
int child_status;
pid_t child_pid;
- sprintf(file_name_idx_str, "%d", file_name_idx);
- sprintf(mpi_size_str, "%d", mpi_size);
+ HDsprintf(file_name_idx_str, "%d", file_name_idx);
+ HDsprintf(mpi_size_str, "%d", mpi_size);
child_pid = fork();
@@ -1935,22 +1942,22 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
/* fun and games to shutup the compiler */
char param0[32] = "t_cache_image";
char param1[32] = "ici";
- char * child_argv[] = {param0,
- param1,
- file_name_idx_str,
- mpi_size_str,
+ char * child_argv[] = {param0,
+ param1,
+ file_name_idx_str,
+ mpi_size_str,
NULL};
/* we may need to play with the path here */
if ( execv("t_cache_image", child_argv) == -1 ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"execl() of ici process failed. errno = %d(%s)\n",
errno, strerror(errno));
- exit(1);
+ HDexit(1);
}
- } else if ( child_pid != -1 ) {
+ } else if ( child_pid != -1 ) {
/* this is the parent process -- wait until child is done */
if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
@@ -1973,7 +1980,7 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
}
} else { /* fork failed */
- HDfprintf(stdout,
+ HDfprintf(stdout,
"can't create process to insert cache image.\n");
pass = FALSE;
}
@@ -1982,8 +1989,8 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
if ( pass ) {
- /* make sure insertion of the cache image is complete
- * before proceeding
+ /* make sure insertion of the cache image is complete
+ * before proceeding
*/
MPI_Barrier(MPI_COMM_WORLD);
}
@@ -1991,8 +1998,17 @@ par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
return;
} /* par_insert_cache_image() */
+#else /* H5_HAVE_FORK */
+
+static void
+par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
+{
+ return;
+} /* par_insert_cache_image() */
+
+#endif /* H5_HAVE_FORK */
+
-
/*-------------------------------------------------------------------------
* Function: par_verify_dataset()
*
@@ -2040,7 +2056,7 @@ par_verify_dataset(int dset_num,
show_progress = (show_progress && (mpi_rank == 0));
verbose = (verbose && (mpi_rank == 0));
- sprintf(dset_name, "/dset%03d", dset_num);
+ HDsprintf(dset_name, "/dset%03d", dset_num);
if ( show_progress ) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2072,7 +2088,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read */
@@ -2090,7 +2106,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
@@ -2112,7 +2128,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the DXPL for collective I/O */
@@ -2127,7 +2143,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
if ( pass ) {
@@ -2139,7 +2155,7 @@ par_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* read data from data sets and validate it */
@@ -2217,7 +2233,7 @@ par_verify_dataset(int dset_num,
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, dset_num);
}
@@ -2228,7 +2244,7 @@ par_verify_dataset(int dset_num,
i += CHUNK_SIZE;
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the file space */
@@ -2259,23 +2275,23 @@ par_verify_dataset(int dset_num,
failure_mssg = "H5Pclose(dxpl) failed.";
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* par_verify_dataset() */
-
+
/*-------------------------------------------------------------------------
* Function: serial_insert_cache_image()
*
* Purpose: Insert a cache image in the supplied file.
*
- * To populate the cache image, validate the contents
- * of the file before closing.
+ * To populate the cache image, validate the contents
+ * of the file before closing.
*
- * On failure, print an appropriate error message and
+ * On failure, print an appropriate error message and
* return FALSE.
*
* Return: TRUE if succussful, FALSE otherwise.
@@ -2329,7 +2345,7 @@ serial_insert_cache_image(int file_name_idx, int mpi_size )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 2) Open the PHDF5 file with the cache image FAPL entry.
+ /* 2) Open the PHDF5 file with the cache image FAPL entry.
*/
if ( pass ) {
@@ -2388,7 +2404,7 @@ serial_insert_cache_image(int file_name_idx, int mpi_size )
} /* serial_insert_cache_image() */
-
+
/*-------------------------------------------------------------------------
* Function: serial_verify_dataset()
*
@@ -2432,7 +2448,7 @@ serial_verify_dataset(int dset_num,
hid_t dset_id = -1;
hid_t filespace_id = -1;
- sprintf(dset_name, "/dset%03d", dset_num);
+ HDsprintf(dset_name, "/dset%03d", dset_num);
if ( show_progress ) {
HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
@@ -2464,7 +2480,7 @@ serial_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* create the mem space to be used to read */
@@ -2482,7 +2498,7 @@ serial_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* select in memory hyperslab */
@@ -2504,7 +2520,7 @@ serial_verify_dataset(int dset_num,
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -2587,7 +2603,7 @@ serial_verify_dataset(int dset_num,
if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
j, k, dset_num);
}
@@ -2601,7 +2617,7 @@ serial_verify_dataset(int dset_num,
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* close the file space */
@@ -2625,18 +2641,18 @@ serial_verify_dataset(int dset_num,
failure_mssg = "H5Sclose(memspace_id) failed.";
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
return;
} /* serial_verify_dataset() */
-
+
/*-------------------------------------------------------------------------
* Function: parse_flags
*
- * Purpose: Parse the flags passed to this program, and load the
+ * Purpose: Parse the flags passed to this program, and load the
* values into the supplied field.
*
* Return: Success: 1
@@ -2648,7 +2664,7 @@ serial_verify_dataset(int dset_num,
*-------------------------------------------------------------------------
*/
static hbool_t
-parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
+parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display)
{
const char * fcn_name = "parse_flags()";
@@ -2669,7 +2685,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
}
- if ( ( success ) &&
+ if ( ( success ) &&
( ( argc != 1 ) && ( argc != 2 ) && ( argc != 4 ) ) ) {
success = FALSE;
@@ -2716,7 +2732,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
else if ( *ici_ptr )
- HDfprintf(stdout, "t_cache_image ici %d %d\n",
+ HDfprintf(stdout, "t_cache_image ici %d %d\n",
*file_idx_ptr, *mpi_size_ptr);
else
@@ -2728,7 +2744,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
} /* parse_flags() */
-
+
/*-------------------------------------------------------------------------
* Function: usage
*
@@ -2738,7 +2754,7 @@ parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
* Return: void
*
* Programmer: John Mainzer
- * 4/28/11
+ * 4/28/11
*
* Modifications:
*
@@ -2785,24 +2801,24 @@ usage(void)
int i = 0;
while(s[i] != NULL) {
- fprintf(stdout, "%s", s[i]);
+ HDfprintf(stdout, "%s", s[i]);
i++;
}
return;
} /* usage() */
-
+
/*-------------------------------------------------------------------------
* Function: verify_data_sets()
*
- * Purpose: If pass is TRUE on entry, verify that the data sets in the
- * file exist and contain the expected data.
+ * Purpose: If pass is TRUE on entry, verify that the data sets in the
+ * file exist and contain the expected data.
*
- * Note that these data sets were created by
- * create_data_sets() above. Thus any changes in that
- * function must be reflected in this function, and
- * vise-versa.
+ * Note that these data sets were created by
+ * create_data_sets() above. Thus any changes in that
+ * function must be reflected in this function, and
+ * vise-versa.
*
* On failure, set pass to FALSE, and set failure_mssg
* to point to an appropriate failure message.
@@ -2816,15 +2832,15 @@ usage(void)
*
* Modifications:
*
- * Added min_dset and max_dset parameters and supporting
- * code. This allows the caller to specify a range of
- * datasets to verify.
- * JRM -- 8/20/15
+ * Added min_dset and max_dset parameters and supporting
+ * code. This allows the caller to specify a range of
+ * datasets to verify.
+ * JRM -- 8/20/15
*
*-------------------------------------------------------------------------
*/
-static void
+static void
verify_data_sets(hid_t file_id, int min_dset, int max_dset)
{
const char * fcn_name = "verify_data_sets()";
@@ -2860,7 +2876,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* open the dataset */
if ( pass ) {
- sprintf(dset_name, "/dset%03d", i);
+ HDsprintf(dset_name, "/dset%03d", i);
dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
if ( dataset_ids[i] < 0 ) {
@@ -2952,8 +2968,8 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/* read the chunk from file */
if ( pass ) {
- status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
- memspace_id, filespace_ids[m],
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
+ memspace_id, filespace_ids[m],
H5P_DEFAULT, data_chunk);
if ( status < 0 ) {
@@ -2978,8 +2994,8 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
valid_chunk = FALSE;
- if ( verbose ) {
-
+ if ( verbose ) {
+
HDfprintf(stdout,
"data_chunk[%0d][%0d] = %0d, expect %0d.\n",
k, l, data_chunk[k][l],
@@ -2988,7 +3004,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
HDfprintf(stdout,
"m = %d, i = %d, j = %d, k = %d, l = %d\n",
m, i, j, k, l);
- }
+ }
}
}
}
@@ -2998,12 +3014,12 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
pass = FALSE;
failure_mssg = "slab validation failed.";
- if ( verbose ) {
+ if ( verbose ) {
- fprintf(stdout,
+ HDfprintf(stdout,
"Chunk (%0d, %0d) in /dset%03d is invalid.\n",
i, j, m);
- }
+ }
}
}
m++;
@@ -3054,7 +3070,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
} /* verify_data_sets() */
-
+
/****************************************************************************/
/******************************* Test Functions *****************************/
/****************************************************************************/
@@ -3062,21 +3078,21 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
/*-------------------------------------------------------------------------
* Function: verify_cache_image_RO()
*
- * Purpose: Verify that a HDF5 file containing a cache image is
- * opened R/O and read correctly by PHDF5 with the specified
+ * Purpose: Verify that a HDF5 file containing a cache image is
+ * opened R/O and read correctly by PHDF5 with the specified
* metadata write strategy.
- *
+ *
* Basic cycle of operation is as follows:
*
- * 1) Open the test file created at the beginning of this
- * test read only.
+ * 1) Open the test file created at the beginning of this
+ * test read only.
*
- * Verify that the file contains a cache image.
+ * Verify that the file contains a cache image.
*
- * Verify that only process 0 reads the cache image.
+ * Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
- * image block from process 0.
+ * Verify that all other processes receive the cache
+ * image block from process 0.
*
* 2) Verify that the file contains the expected data.
*
@@ -3096,7 +3112,7 @@ verify_data_sets(hid_t file_id, int min_dset, int max_dset)
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -3135,14 +3151,14 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the file name */
if ( pass ) {
- if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
+ if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
filename, sizeof(filename)) == NULL ) {
pass = FALSE;
@@ -3150,7 +3166,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3165,7 +3181,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3180,15 +3196,15 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 2) Verify that the file contains the expected data.
+ /* 2) Verify that the file contains the expected data.
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
@@ -3210,14 +3226,14 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
+ /* Verify that all other processes receive the cache image block
* from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
* all processes.
*/
#if H5C_COLLECT_CACHE_STATS
@@ -3231,12 +3247,12 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 3) Close the file. */
-
+
if ( pass ) {
if ( H5Fclose(file_id) < 0 ) {
@@ -3247,7 +3263,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3259,7 +3275,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* mdci_sbem_expected */ TRUE,
/* read_only */ TRUE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3274,12 +3290,12 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Verify that the file contains the expected data. */
-
+
if ( pass ) {
verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
@@ -3298,7 +3314,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
/* 6) Close the file. */
-
+
if ( pass ) {
if ( H5Fclose(file_id) < 0 ) {
@@ -3309,7 +3325,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3324,7 +3340,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
H5_FAILED();
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
}
}
@@ -3334,32 +3350,32 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
} /* verify_cache_image_RO() */
-
+
/*-------------------------------------------------------------------------
* Function: verify_cache_image_RW()
*
- * Purpose: Verify that a HDF5 file containing a cache image is
- * opened and read correctly by PHDF5 with the specified
+ * Purpose: Verify that a HDF5 file containing a cache image is
+ * opened and read correctly by PHDF5 with the specified
* metadata write strategy.
- *
+ *
* Basic cycle of operation is as follows:
*
- * 1) Open the test file created at the beginning of this
- * test.
+ * 1) Open the test file created at the beginning of this
+ * test.
*
- * Verify that the file contains a cache image.
+ * Verify that the file contains a cache image.
*
* 2) Verify that the file contains the expected data.
*
- * Verify that only process 0 reads the cache image.
+ * Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
- * image block from process 0.
+ * Verify that all other processes receive the cache
+ * image block from process 0.
*
*
* 3) Close the file.
*
- * 4) Open the file, and verify that it doesn't contain
+ * 4) Open the file, and verify that it doesn't contain
* a cache image.
*
* 5) Verify that the file contains the expected data.
@@ -3375,7 +3391,7 @@ verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
*
* Modifications:
*
- * None.
+ * None.
*
*-------------------------------------------------------------------------
*/
@@ -3414,14 +3430,14 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* setup the file name */
if ( pass ) {
- if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
+ if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
filename, sizeof(filename)) == NULL ) {
pass = FALSE;
@@ -3429,7 +3445,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3439,7 +3455,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
@@ -3449,7 +3465,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* mdci_sbem_expected */ TRUE,
/* read_only */ FALSE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3464,7 +3480,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3472,7 +3488,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
if ( pass ) {
@@ -3493,14 +3509,14 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
+ /* Verify that all other processes receive the cache image block
* from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
* all processes.
*/
#if H5C_COLLECT_CACHE_STATS
@@ -3514,12 +3530,12 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
#endif /* H5C_COLLECT_CACHE_STATS */
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 3) Close the file. */
-
+
if ( pass ) {
if ( H5Fclose(file_id) < 0 ) {
@@ -3530,7 +3546,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3542,7 +3558,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* mdci_sbem_expected */ FALSE,
/* read_only */ FALSE,
/* set_mdci_fapl */ FALSE,
- /* config_fsm */ FALSE,
+ /* config_fsm */ FALSE,
/* enable_page_buffer */ FALSE,
/* hdf_file_name */ filename,
/* cache_image_flags */ H5C_CI__ALL_FLAGS,
@@ -3557,12 +3573,12 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* md_write_strat */ md_write_strat);
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
/* 5) Verify that the file contains the expected data. */
-
+
if ( pass ) {
verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
@@ -3581,7 +3597,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
/* 6) Close the file. */
-
+
if ( pass ) {
if ( H5Fclose(file_id) < 0 ) {
@@ -3592,7 +3608,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
}
}
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
@@ -3622,7 +3638,7 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
H5_FAILED();
- if ( show_progress )
+ if ( show_progress )
HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
}
}
@@ -3632,20 +3648,20 @@ verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
} /* verify_cache_imageRW() */
-
+
/*****************************************************************************
*
* Function: smoke_check_1()
*
- * Purpose: Initial smoke check to verify correct behaviour of cache
+ * Purpose: Initial smoke check to verify correct behaviour of cache
* image in combination with parallel.
- *
+ *
* As cache image is currently disabled in the parallel case,
* we construct a test file in parallel, verify it in serial
* and generate a cache image in passing, and then verify
* it again in parallel.
*
- * In passing, also verify that page buffering is silently
+ * In passing, also verify that page buffering is silently
* disabled in the parallel case. Needless to say, this part
* of the test will have to be re-worked when and if page
* buffering is supported in parallel.
@@ -3700,7 +3716,7 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 1) Create a PHDF5 file without the cache image FAPL entry.
+ /* 1) Create a PHDF5 file without the cache image FAPL entry.
*
* Verify that a cache image is not requested
*/
@@ -3807,11 +3823,11 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 7) Verify the datasets in the file backwards
+ /* 7) Verify the datasets in the file backwards
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
@@ -3841,11 +3857,11 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if ( ( mpi_rank == 0 ) && ( show_progress ) )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
+ /* Verify that all other processes receive the cache image block
* from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
* all processes.
*/
#if H5C_COLLECT_CACHE_STATS
@@ -3906,11 +3922,11 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* 10) Verify the datasets in the file
+ /* 10) Verify the datasets in the file
*
* Verify that only process 0 reads the cache image.
*
- * Verify that all other processes receive the cache
+ * Verify that all other processes receive the cache
* image block from process 0.
*/
@@ -3940,11 +3956,11 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
if ( ( mpi_rank == 0 ) && ( show_progress ) )
HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
- /* Verify that all other processes receive the cache image block
+ /* Verify that all other processes receive the cache image block
* from process 0.
- *
- * Since we have alread verified that only process 0 has read the
- * image, it is sufficient to verify that the image was loaded on
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
* all processes.
*/
#if H5C_COLLECT_CACHE_STATS
@@ -4047,28 +4063,35 @@ smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
H5_FAILED();
- HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
fcn_name, failure_mssg);
}
}
return !pass;
-
+
} /* smoke_check_1() */
-
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
/*-------------------------------------------------------------------------
* Function: main
*
* Purpose: Run parallel tests on the cache image feature.
- *
- * At present, cache image is disabled in parallel, and
- * thus these tests are restructed to verifying that a
+ *
+ * At present, cache image is disabled in parallel, and
+ * thus these tests are restructed to verifying that a
* file with a cache image can be opened in the parallel
- * case, and verifying that instructions to create a
+ * case, and verifying that instructions to create a
* cache image are ignored in the parallel case.
*
- * WARNING: This test uses fork() and execve(), and
+ * WARNING: This test uses fork() and execve(), and
* therefore will not run on Windows.
*
* Return: Success: 0
@@ -4106,39 +4129,39 @@ main(int argc, char **argv)
HDfflush(stdout);
i = 0;
- while ( ( FILENAMES[i] != NULL ) && ( i < TEST_FILES_TO_CONSTRUCT ) ) {
+ while ( ( FILENAMES[i] != NULL ) && ( i < TEST_FILES_TO_CONSTRUCT ) ) {
HDfprintf(stdout, " writing %s ... ", FILENAMES[i]);
HDfflush(stdout);
construct_test_file(i);
if ( pass ) {
-
- printf("done.\n");
+
+ HDprintf("done.\n");
HDfflush(stdout);
} else {
- printf("failed.\n");
- exit(1);
+ HDprintf("failed.\n");
+ HDexit(1);
}
i++;
}
HDfprintf(stdout, "Test file construction complete.\n");
- exit(0);
+ HDexit(0);
} else if ( ici ) {
if ( serial_insert_cache_image(file_idx, mpi_size) ) {
- exit(0);
+ HDexit(0);
} else {
HDfprintf(stderr, "\n\nCache image insertion failed.\n");
HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg);
- exit(1);
+ HDexit(1);
}
}
@@ -4155,24 +4178,24 @@ main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("%d:Failed to turn off atexit processing. Continue.\n",
+ HDprintf("%d:Failed to turn off atexit processing. Continue.\n",
mpi_rank);
};
H5open();
if ( mpi_rank == 0 ) {
- printf("===================================\n");
- printf("Parallel metadata cache image tests\n");
- printf(" mpi_size = %d\n", mpi_size);
- printf("===================================\n");
+ HDprintf("===================================\n");
+ HDprintf("Parallel metadata cache image tests\n");
+ HDprintf(" mpi_size = %d\n", mpi_size);
+ HDprintf("===================================\n");
}
if ( mpi_size < 2 ) {
if ( mpi_rank == 0 ) {
- printf(" Need at least 2 processes. Exiting.\n");
+ HDprintf(" Need at least 2 processes. Exiting.\n");
}
goto finish;
}
@@ -4194,13 +4217,13 @@ main(int argc, char **argv)
/* we may need to play with the path here */
if ( execv("t_cache_image", child_argv) == -1 ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"execl() of setup process failed. errno = %d(%s)\n",
errno, strerror(errno));
- exit(1);
+ HDexit(1);
}
- } else if ( child_pid != -1 ) {
+ } else if ( child_pid != -1 ) {
/* this is the parent process -- wait until child is done */
if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
@@ -4216,7 +4239,7 @@ main(int argc, char **argv)
} else {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"testfile construction complete -- proceeding with tests.\n");
}
} else { /* fork failed */
@@ -4229,14 +4252,14 @@ main(int argc, char **argv)
MPI_Barrier(MPI_COMM_WORLD);
- nerrs += verify_cache_image_RO(0,
+ nerrs += verify_cache_image_RO(0,
H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
#if 1
- nerrs += verify_cache_image_RO(1,
+ nerrs += verify_cache_image_RO(1,
H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
- nerrs += verify_cache_image_RW(0,
+ nerrs += verify_cache_image_RW(0,
H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
- nerrs += verify_cache_image_RW(1,
+ nerrs += verify_cache_image_RW(1,
H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
nerrs += smoke_check_1(comm, info, mpi_rank, mpi_size);
#endif
@@ -4248,16 +4271,16 @@ finish:
MPI_Barrier(MPI_COMM_WORLD);
if ( mpi_rank == 0 ) { /* only process 0 reports */
- sleep(10);
- printf("===================================\n");
+ HDsleep(10);
+ HDprintf("===================================\n");
if ( nerrs > 0 ) {
- printf("***metadata cache image tests detected %d failures***\n",
+ HDprintf("***metadata cache image tests detected %d failures***\n",
nerrs);
}
else {
- printf("metadata cache image tests finished with no failures\n");
+ HDprintf("metadata cache image tests finished with no failures\n");
}
- printf("===================================\n");
+ HDprintf("===================================\n");
}
/* takedown_derived_types(); */
@@ -4272,4 +4295,14 @@ finish:
return(nerrs > 0);
} /* main() */
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c
index 2340ae0..e716f41 100644
--- a/testpar/t_chunk_alloc.c
+++ b/testpar/t_chunk_alloc.c
@@ -20,23 +20,23 @@
*/
#include "testphdf5.h"
-static int mpi_size, mpi_rank;
+static int mpi_size, mpi_rank;
#define DSET_NAME "ExtendibleArray"
-#define CHUNK_SIZE 1000 /* #elements per chunk */
-#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
+#define CHUNK_SIZE 1000 /* #elements per chunk */
+#define CHUNK_FACTOR 200 /* default dataset size in terms of chunks */
#define CLOSE 1
#define NO_CLOSE 0
static MPI_Offset
get_filesize(const char *filename)
{
- int mpierr;
- MPI_File fd;
- MPI_Offset filesize;
+ int mpierr;
+ MPI_File fd;
+ MPI_Offset filesize;
mpierr = MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY,
- MPI_INFO_NULL, &fd);
+ MPI_INFO_NULL, &fd);
VRFY((mpierr == MPI_SUCCESS), "");
mpierr = MPI_File_get_size(fd, &filesize);
@@ -85,8 +85,8 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
long nchunks;
herr_t hrc;
- MPI_Offset filesize, /* actual file size */
- est_filesize; /* estimated file size */
+ MPI_Offset filesize, /* actual file size */
+ est_filesize; /* estimated file size */
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -95,40 +95,39 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
/* Only MAINPROCESS should create the file. Others just wait. */
if (MAINPROCESS){
nchunks=chunk_factor*mpi_size;
- dims[0]=nchunks*CHUNK_SIZE;
- /* Create the data space with unlimited dimensions. */
- dataspace = H5Screate_simple (1, dims, maxdims);
- VRFY((dataspace >= 0), "");
+ dims[0]=(hsize_t)(nchunks*CHUNK_SIZE);
+ /* Create the data space with unlimited dimensions. */
+ dataspace = H5Screate_simple (1, dims, maxdims);
+ VRFY((dataspace >= 0), "");
- memspace = H5Screate_simple(1, chunk_dims, NULL);
- VRFY((memspace >= 0), "");
+ memspace = H5Screate_simple(1, chunk_dims, NULL);
+ VRFY((memspace >= 0), "");
- /* Create a new file. If file exists its contents will be overwritten. */
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT,
- H5P_DEFAULT);
- VRFY((file_id >= 0), "H5Fcreate");
+ /* Create a new file. If file exists its contents will be overwritten. */
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((file_id >= 0), "H5Fcreate");
- /* Modify dataset creation properties, i.e. enable chunking */
- cparms = H5Pcreate(H5P_DATASET_CREATE);
- VRFY((cparms >= 0), "");
+ /* Modify dataset creation properties, i.e. enable chunking */
+ cparms = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((cparms >= 0), "");
- hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY);
- VRFY((hrc >= 0), "");
+ hrc = H5Pset_alloc_time(cparms, H5D_ALLOC_TIME_EARLY);
+ VRFY((hrc >= 0), "");
- hrc = H5Pset_chunk(cparms, 1, chunk_dims);
- VRFY((hrc >= 0), "");
+ hrc = H5Pset_chunk(cparms, 1, chunk_dims);
+ VRFY((hrc >= 0), "");
- /* Create a new dataset within the file using cparms creation properties. */
- dataset = H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
- VRFY((dataset >= 0), "");
+ /* Create a new dataset within the file using cparms creation properties. */
+ dataset = H5Dcreate2(file_id, DSET_NAME, H5T_NATIVE_UCHAR, dataspace, H5P_DEFAULT, cparms, H5P_DEFAULT);
+ VRFY((dataset >= 0), "");
- if(write_pattern == sec_last) {
+ if(write_pattern == sec_last) {
HDmemset(buffer, 100, CHUNK_SIZE);
count[0] = 1;
stride[0] = 1;
block[0] = chunk_dims[0];
- offset[0] = (nchunks-2)*chunk_dims[0];
+ offset[0] = (hsize_t)(nchunks-2)*chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
VRFY((hrc >= 0), "");
@@ -138,28 +137,28 @@ create_chunked_dataset(const char *filename, int chunk_factor, write_type write_
VRFY((hrc >= 0), "H5Dwrite");
} /* end if */
- /* Close resources */
- hrc = H5Dclose (dataset);
- VRFY((hrc >= 0), "");
- dataset = -1;
+ /* Close resources */
+ hrc = H5Dclose (dataset);
+ VRFY((hrc >= 0), "");
+ dataset = -1;
- hrc = H5Sclose (dataspace);
- VRFY((hrc >= 0), "");
+ hrc = H5Sclose (dataspace);
+ VRFY((hrc >= 0), "");
- hrc = H5Sclose (memspace);
- VRFY((hrc >= 0), "");
+ hrc = H5Sclose (memspace);
+ VRFY((hrc >= 0), "");
- hrc = H5Pclose (cparms);
- VRFY((hrc >= 0), "");
+ hrc = H5Pclose (cparms);
+ VRFY((hrc >= 0), "");
- hrc = H5Fclose (file_id);
- VRFY((hrc >= 0), "");
- file_id = -1;
+ hrc = H5Fclose (file_id);
+ VRFY((hrc >= 0), "");
+ file_id = -1;
- /* verify file size */
- filesize = get_filesize(filename);
- est_filesize = nchunks * CHUNK_SIZE * sizeof(unsigned char);
- VRFY((filesize >= est_filesize), "file size check");
+ /* verify file size */
+ filesize = get_filesize(filename);
+ est_filesize = (MPI_Offset)nchunks * (MPI_Offset)CHUNK_SIZE * (MPI_Offset)sizeof(unsigned char);
+ VRFY((filesize >= est_filesize), "file size check");
}
@@ -200,8 +199,8 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
int i;
long nchunks;
/* MPI Gubbins */
- MPI_Offset filesize, /* actual file size */
- est_filesize; /* estimated file size */
+ MPI_Offset filesize, /* actual file size */
+ est_filesize; /* estimated file size */
/* Initialize MPI */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -234,26 +233,26 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
dataspace = H5Dget_space(*dataset);
VRFY((dataspace >= 0), "");
- size[0] = nchunks*CHUNK_SIZE;
+ size[0] = (hsize_t)nchunks*CHUNK_SIZE;
switch (action) {
/* all chunks are written by all the processes in an interleaved way*/
case write_all:
- memset(buffer, mpi_rank+1, CHUNK_SIZE);
- count[0] = 1;
- stride[0] = 1;
- block[0] = chunk_dims[0];
- for (i=0; i<nchunks/mpi_size; i++){
- offset[0] = (i*mpi_size+mpi_rank)*chunk_dims[0];
+ HDmemset(buffer, mpi_rank+1, CHUNK_SIZE);
+ count[0] = 1;
+ stride[0] = 1;
+ block[0] = chunk_dims[0];
+ for (i=0; i<nchunks/mpi_size; i++) {
+ offset[0] = (hsize_t)(i*mpi_size+mpi_rank)*chunk_dims[0];
- hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
- VRFY((hrc >= 0), "");
+ hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
+ VRFY((hrc >= 0), "");
- /* Write the buffer out */
- hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
- VRFY((hrc >= 0), "H5Dwrite");
+ /* Write the buffer out */
+ hrc = H5Dwrite(*dataset, H5T_NATIVE_UCHAR, memspace, dataspace, H5P_DEFAULT, buffer);
+ VRFY((hrc >= 0), "H5Dwrite");
}
break;
@@ -295,7 +294,7 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
/* verify file size */
filesize = get_filesize(filename);
- est_filesize = nchunks*CHUNK_SIZE*sizeof(unsigned char);
+ est_filesize = (MPI_Offset)nchunks*(MPI_Offset)CHUNK_SIZE*(MPI_Offset)sizeof(unsigned char);
VRFY((filesize >= est_filesize), "file size check");
/* Can close some plists */
@@ -318,7 +317,7 @@ parallel_access_dataset(const char *filename, int chunk_factor, access_type acti
* interleaved pattern.
*/
static void
-verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose,
+verify_data(const char *filename, int chunk_factor, write_type write_pattern, int vclose,
hid_t *file_id, hid_t *dataset)
{
/* HDF5 gubbins */
@@ -372,10 +371,10 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
stride[0] = 1;
block[0] = chunk_dims[0];
for (i=0; i<nchunks; i++){
- /* reset buffer values */
- memset(buffer, -1, CHUNK_SIZE);
+ /* reset buffer values */
+ HDmemset(buffer, -1, CHUNK_SIZE);
- offset[0] = i*chunk_dims[0];
+ offset[0] = (hsize_t)i*chunk_dims[0];
hrc = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, offset, stride, count, block);
VRFY((hrc >= 0), "");
@@ -385,22 +384,22 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
VRFY((hrc >= 0), "H5Dread");
/* set expected value according the write pattern */
- switch (write_pattern) {
- case all:
- value = i%mpi_size + 1;
- break;
- case none:
- value = 0;
- break;
+ switch (write_pattern) {
+ case all:
+ value = i%mpi_size + 1;
+ break;
+ case none:
+ value = 0;
+ break;
case sec_last:
- if (i==nchunks-2)
- value = 100;
- else
- value = 0;
+ if (i==nchunks-2)
+ value = 100;
+ else
+ value = 0;
break;
default:
HDassert(0);
- }
+ }
/* verify content of the chunk */
for (index_l = 0; index_l < CHUNK_SIZE; index_l++)
@@ -408,10 +407,10 @@ verify_data(const char *filename, int chunk_factor, write_type write_pattern, in
}
hrc = H5Sclose (dataspace);
- VRFY((hrc >= 0), "");
+ VRFY((hrc >= 0), "");
- hrc = H5Sclose (memspace);
- VRFY((hrc >= 0), "");
+ hrc = H5Sclose (memspace);
+ VRFY((hrc >= 0), "");
/* Can close some plists */
hrc = H5Pclose(access_plist);
@@ -469,7 +468,7 @@ test_chunk_alloc(void)
filename = (const char*)GetTestParameters();
if (VERBOSE_MED)
- printf("Extend Chunked allocation test on file %s\n", filename);
+ HDprintf("Extend Chunked allocation test on file %s\n", filename);
/* Case 1 */
/* Create chunked dataset without writing anything.*/
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index c6fa3d4..740f78e 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -16,7 +16,7 @@
#define HYPER 1
#define POINT 2
-#define ALL 3
+#define ALL 3
/* some commonly used routines for collective chunk IO tests*/
@@ -620,7 +620,6 @@ coll_chunktest(const char* filename,
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
hsize_t current_dims; /* for point selection */
- int i;
/* set up MPI parameters */
MPI_Comm_size(comm,&mpi_size);
@@ -638,7 +637,7 @@ coll_chunktest(const char* filename,
VRFY((status >= 0),"");
/* setup dimensionality object */
- dims[0] = SPACE_DIM1*mpi_size;
+ dims[0] = (hsize_t)(SPACE_DIM1*mpi_size);
dims[1] = SPACE_DIM2;
/* allocate memory for data buffer */
@@ -671,7 +670,7 @@ coll_chunktest(const char* filename,
VRFY((crp_plist >= 0),"");
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/chunk_factor;
+ chunk_dims[0] = dims[0]/(hsize_t)chunk_factor;
/* to decrease the testing time, maintain bigger chunk size */
(chunk_factor == 1) ? (chunk_dims[1] = SPACE_DIM2) : (chunk_dims[1] = SPACE_DIM2/2);
@@ -1058,7 +1057,7 @@ ccslab_set(int mpi_rank,
stride[1] = 1;
count[0] = SPACE_DIM1;
count[1] = SPACE_DIM2;
- start[0] = mpi_rank*count[0];
+ start[0] = (hsize_t)mpi_rank*count[0];
start[1] = 0;
break;
@@ -1067,11 +1066,11 @@ ccslab_set(int mpi_rank,
/* Each process takes several disjoint blocks. */
block[0] = 1;
block[1] = 1;
- stride[0] = 3;
- stride[1] = 3;
- count[0] = SPACE_DIM1/(stride[0]*block[0]);
- count[1] = (SPACE_DIM2)/(stride[1]*block[1]);
- start[0] = SPACE_DIM1*mpi_rank;
+ stride[0] = 3;
+ stride[1] = 3;
+ count[0] = SPACE_DIM1/(stride[0]*block[0]);
+ count[1] = (SPACE_DIM2)/(stride[1]*block[1]);
+ start[0] = (hsize_t)SPACE_DIM1*(hsize_t)mpi_rank;
start[1] = 0;
break;
@@ -1085,7 +1084,7 @@ ccslab_set(int mpi_rank,
stride[1] = 1;
count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:SPACE_DIM1);
count[1] = SPACE_DIM2;
- start[0] = mpi_rank*count[0];
+ start[0] = (hsize_t)mpi_rank*count[0];
start[1] = 0;
break;
@@ -1096,14 +1095,14 @@ ccslab_set(int mpi_rank,
half of the domain. */
block[0] = 1;
- count[0] = 2;
- stride[0] = SPACE_DIM1*mpi_size/4+1;
+ count[0] = 2;
+ stride[0] = (hsize_t)SPACE_DIM1*(hsize_t)mpi_size/4+1;
block[1] = SPACE_DIM2;
count[1] = 1;
start[1] = 0;
stride[1] = 1;
- if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank;
- else start[0] = 1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3);
+ if((mpi_rank *3)<(mpi_size*2)) start[0] = (hsize_t)mpi_rank;
+ else start[0] = (hsize_t)(1 + SPACE_DIM1*mpi_size/2 + (mpi_rank-2*mpi_size/3));
break;
case BYROW_SELECTINCHUNK:
@@ -1111,7 +1110,7 @@ ccslab_set(int mpi_rank,
block[0] = 1;
count[0] = 1;
- start[0] = mpi_rank*SPACE_DIM1;
+ start[0] = (hsize_t)(mpi_rank*SPACE_DIM1);
stride[0]= 1;
block[1] = SPACE_DIM2;
count[1] = 1;
@@ -1122,7 +1121,7 @@ ccslab_set(int mpi_rank,
default:
/* Unknown mode. Set it to cover the whole dataset. */
- block[0] = SPACE_DIM1*mpi_size;
+ block[0] = (hsize_t)SPACE_DIM1*(hsize_t)mpi_size;
block[1] = SPACE_DIM2;
stride[0] = block[0];
stride[1] = block[1];
@@ -1134,7 +1133,7 @@ ccslab_set(int mpi_rank,
break;
}
if (VERBOSE_MED){
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
+ HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
(unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
(unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
(unsigned long)(block[0]*block[1]*count[0]*count[1]));
@@ -1197,20 +1196,20 @@ ccdataset_print(hsize_t start[],
hsize_t i, j;
/* print the column heading */
- printf("Print only the first block of the dataset\n");
- printf("%-8s", "Cols:");
+ HDprintf("Print only the first block of the dataset\n");
+ HDprintf("%-8s", "Cols:");
for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
for (j=0; j < block[1]; j++){
- printf("%03d ", *dataptr++);
+ HDprintf("%03d ", *dataptr++);
}
- printf("\n");
+ HDprintf("\n");
}
}
@@ -1233,13 +1232,13 @@ ccdataset_vrfy(hsize_t start[],
/* print it if VERBOSE_MED */
if (VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
(unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
(unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
+ HDprintf("original values:\n");
ccdataset_print(start, block, original);
- printf("compared values:\n");
+ HDprintf("compared values:\n");
ccdataset_print(start, block, dataset);
}
@@ -1262,7 +1261,7 @@ ccdataset_vrfy(hsize_t start[],
}
if (*dataptr != *oriptr){
if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
+ HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
(unsigned long)i, (unsigned long)j,
*(oriptr), *(dataptr));
}
@@ -1272,8 +1271,8 @@ ccdataset_vrfy(hsize_t start[],
}
}
if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if (vrfyerrs)
- printf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
+ HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
return(vrfyerrs);
}
diff --git a/testpar/t_coll_md_read.c b/testpar/t_coll_md_read.c
new file mode 100644
index 0000000..d4aaa2e
--- /dev/null
+++ b/testpar/t_coll_md_read.c
@@ -0,0 +1,506 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * A test suite to test HDF5's collective metadata read capabilities, as enabled
+ * by making a call to H5Pset_all_coll_metadata_ops().
+ */
+
+#include "testphdf5.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * Define the non-participating process as the "last"
+ * rank to avoid any weirdness potentially caused by
+ * an if (mpi_rank == 0) check.
+ */
+#define PARTIAL_NO_SELECTION_NO_SEL_PROCESS (mpi_rank == mpi_size - 1)
+#define PARTIAL_NO_SELECTION_DATASET_NAME "partial_no_selection_dset"
+#define PARTIAL_NO_SELECTION_DATASET_NDIMS 2
+#define PARTIAL_NO_SELECTION_Y_DIM_SCALE 5
+#define PARTIAL_NO_SELECTION_X_DIM_SCALE 5
+
+#define MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS 2
+
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME "linked_chunk_io_sort_chunk_issue"
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE 20000
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE 1
+#define LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS 1
+
+/*
+ * A test for issue HDFFV-10501. A parallel hang was reported which occurred
+ * in linked-chunk I/O when collective metadata reads are enabled and some ranks
+ * do not have any selection in a dataset's dataspace, while others do. The ranks
+ * which have no selection during the read/write operation called H5D__chunk_addrmap()
+ * to retrieve the lowest chunk address, since we require that the read/write be done
+ * in strictly non-decreasing order of chunk address. For version 1 and 2 B-trees,
+ * this caused the non-participating ranks to issue a collective MPI_Bcast() call
+ * which the other ranks did not issue, thus causing a hang.
+ *
+ * However, since these ranks are not actually reading/writing anything, this call
+ * can simply be removed and the address used for the read/write can be set to an
+ * arbitrary number (0 was chosen).
+ */
+void test_partial_no_selection_coll_md_read(void)
+{
+ const char *filename;
+ hsize_t *dataset_dims = NULL;
+ hsize_t max_dataset_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t sel_dims[1];
+ hsize_t chunk_dims[PARTIAL_NO_SELECTION_DATASET_NDIMS] = { PARTIAL_NO_SELECTION_Y_DIM_SCALE, PARTIAL_NO_SELECTION_X_DIM_SCALE };
+ hsize_t start[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t stride[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t count[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hsize_t block[PARTIAL_NO_SELECTION_DATASET_NDIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = GetTestParameters();
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ dataset_dims = HDmalloc(PARTIAL_NO_SELECTION_DATASET_NDIMS * sizeof(*dataset_dims));
+ VRFY((dataset_dims != NULL), "malloc succeeded");
+
+ dataset_dims[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_size;
+ dataset_dims[1] = (hsize_t)PARTIAL_NO_SELECTION_X_DIM_SCALE * (hsize_t)mpi_size;
+ max_dataset_dims[0] = H5S_UNLIMITED;
+ max_dataset_dims[1] = H5S_UNLIMITED;
+
+ fspace_id = H5Screate_simple(PARTIAL_NO_SELECTION_DATASET_NDIMS, dataset_dims, max_dataset_dims);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Set up chunking on the dataset in order to reproduce the problem.
+ */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, PARTIAL_NO_SELECTION_DATASET_NDIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, PARTIAL_NO_SELECTION_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Setup hyperslab selection to split the dataset among the ranks.
+ *
+ * The ranks will write rows across the dataset.
+ */
+ start[0] = (hsize_t)PARTIAL_NO_SELECTION_Y_DIM_SCALE * (hsize_t)mpi_rank;
+ start[1] = 0;
+ stride[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ stride[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+ count[0] = 1;
+ count[1] = (hsize_t)mpi_size;
+ block[0] = PARTIAL_NO_SELECTION_Y_DIM_SCALE;
+ block[1] = PARTIAL_NO_SELECTION_X_DIM_SCALE;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE);
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ data = HDcalloc(1, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
+ VRFY((data != NULL), "calloc succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ /*
+ * Enable collective access for the data transfer.
+ */
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ /*
+ * Ensure that linked-chunk I/O is performed since this is
+ * the particular code path where the issue lies and we don't
+ * want the library doing multi-chunk I/O behind our backs.
+ */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ read_buf = HDmalloc(count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ /*
+ * Make sure to call H5Sselect_none() on the non-participating process.
+ */
+ if (PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
+ VRFY((H5Sselect_none(fspace_id) >= 0), "H5Sselect_none succeeded");
+ VRFY((H5Sselect_none(mspace_id) >= 0), "H5Sselect_none succeeded");
+ }
+
+ /*
+ * Finally have each rank read their section of data back from the dataset.
+ */
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+
+ /*
+ * Check data integrity just to be sure.
+ */
+ if (!PARTIAL_NO_SELECTION_NO_SEL_PROCESS) {
+ VRFY((!HDmemcmp(data, read_buf, count[1] * (PARTIAL_NO_SELECTION_Y_DIM_SCALE * PARTIAL_NO_SELECTION_X_DIM_SCALE) * sizeof(int))), "memcmp succeeded");
+ }
+
+ if (dataset_dims) {
+ HDfree(dataset_dims);
+ dataset_dims = NULL;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for HDFFV-10562 which attempts to verify that using multi-chunk
+ * I/O with collective metadata reads enabled doesn't causes issues due to
+ * collective metadata reads being made only by process 0 in H5D__chunk_addrmap().
+ *
+ * Failure in this test may either cause a hang, or, due to how the MPI calls
+ * pertaining to this issue might mistakenly match up, may cause an MPI error
+ * message similar to:
+ *
+ * #008: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): MPI_BCast failed
+ * major: Internal error (too specific to document in detail)
+ * minor: Some MPI function failed
+ * #009: H5Dmpio.c line 2546 in H5D__obtain_mpio_mode(): Message truncated, error stack:
+ *PMPI_Bcast(1600)..................: MPI_Bcast(buf=0x1df98e0, count=18, MPI_BYTE, root=0, comm=0x84000006) failed
+ *MPIR_Bcast_impl(1452).............:
+ *MPIR_Bcast(1476)..................:
+ *MPIR_Bcast_intra(1249)............:
+ *MPIR_SMP_Bcast(1088)..............:
+ *MPIR_Bcast_binomial(239)..........:
+ *MPIDI_CH3U_Receive_data_found(131): Message from rank 0 and tag 2 truncated; 2616 bytes received but buffer size is 18
+ * major: Internal error (too specific to document in detail)
+ * minor: MPI Error String
+ *
+ */
+void test_multi_chunk_io_addrmap_issue(void)
+{
+ const char *filename;
+ hsize_t start[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t stride[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t count[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t block[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS];
+ hsize_t dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {10, 5};
+ hsize_t chunk_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {5, 5};
+ hsize_t max_dims[MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t space_id = H5I_INVALID_HID;
+ void *read_buf = NULL;
+ int mpi_rank;
+ int data[5][5] = { {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4},
+ {0, 1, 2, 3, 4} };
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ filename = GetTestParameters();
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ space_id = H5Screate_simple(MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, dims, max_dims);
+ VRFY((space_id >= 0), "H5Screate_simple succeeded");
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, MULTI_CHUNK_IO_ADDRMAP_ISSUE_DIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_INT, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_MULTI_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ start[1] = 0;
+ stride[0] = stride[1] = 1;
+ count[0] = count[1] = 5;
+ block[0] = block[1] = 1;
+
+ if (mpi_rank == 0)
+ start[0] = 0;
+ else
+ start[0] = 5;
+
+ VRFY((H5Sselect_hyperslab(space_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+ if (mpi_rank != 0)
+ VRFY((H5Sselect_none(space_id) >= 0), "H5Sselect_none succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, space_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ read_buf = HDmalloc(50 * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(space_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
+
+/*
+ * A test for HDFFV-10562 which attempts to verify that using linked-chunk
+ * I/O with collective metadata reads enabled doesn't cause issues due to
+ * collective metadata reads being made only by process 0 in H5D__sort_chunk().
+ *
+ * NOTE: Due to the way that the threshold value which pertains to this test
+ * is currently calculated within HDF5, there are several conditions that this
+ * test must maintain. Refer to the function H5D__sort_chunk in H5Dmpio.c for
+ * a better idea of why.
+ *
+ * Condition 1: We need to make sure that the test always selects every single
+ * chunk in the dataset. It is fine if the selection is split up among multiple
+ * ranks, but their combined selection must cover the whole dataset.
+ *
+ * Condition 2: The number of chunks in the dataset divided by the number of MPI
+ * ranks must exceed or equal 10000. In other words, each MPI rank must be
+ * responsible for 10000 or more unique chunks.
+ *
+ * Condition 3: This test will currently only be reliably reproducable for 2 or 3
+ * MPI ranks. The threshold value calculated reduces to a constant 100 / mpi_size,
+ * and is compared against a default value of 30%.
+ *
+ * Failure in this test may either cause a hang, or, due to how the MPI calls
+ * pertaining to this issue might mistakenly match up, may cause an MPI error
+ * message similar to:
+ *
+ * #008: H5Dmpio.c line 2338 in H5D__sort_chunk(): MPI_BCast failed
+ * major: Internal error (too specific to document in detail)
+ * minor: Some MPI function failed
+ * #009: H5Dmpio.c line 2338 in H5D__sort_chunk(): Other MPI error, error stack:
+ *PMPI_Bcast(1600)........: MPI_Bcast(buf=0x7eae610, count=320000, MPI_BYTE, root=0, comm=0x84000006) failed
+ *MPIR_Bcast_impl(1452)...:
+ *MPIR_Bcast(1476)........:
+ *MPIR_Bcast_intra(1249)..:
+ *MPIR_SMP_Bcast(1088)....:
+ *MPIR_Bcast_binomial(250): message sizes do not match across processes in the collective routine: Received 2096 but expected 320000
+ * major: Internal error (too specific to document in detail)
+ * minor: MPI Error String
+ */
+void test_link_chunk_io_sort_chunk_issue(void)
+{
+ const char *filename;
+ hsize_t *dataset_dims = NULL;
+ hsize_t max_dataset_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t sel_dims[1];
+ hsize_t chunk_dims[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS] = { LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS };
+ hsize_t start[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t stride[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t count[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hsize_t block[LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS];
+ hid_t file_id = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ hid_t dset_id = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hid_t fspace_id = H5I_INVALID_HID;
+ hid_t mspace_id = H5I_INVALID_HID;
+ int mpi_rank, mpi_size;
+ void *data = NULL;
+ void *read_buf = NULL;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = GetTestParameters();
+
+ fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
+
+ /*
+ * Even though the testphdf5 framework currently sets collective metadata reads
+ * on the FAPL, we call it here just to be sure this is futureproof, since
+ * demonstrating this issue relies upon it.
+ */
+ VRFY((H5Pset_all_coll_metadata_ops(fapl_id, true) >= 0), "Set collective metadata reads succeeded");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ VRFY((file_id >= 0), "H5Fcreate succeeded");
+
+ dataset_dims = HDmalloc(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS * sizeof(*dataset_dims));
+ VRFY((dataset_dims != NULL), "malloc succeeded");
+
+ dataset_dims[0] = (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE * (hsize_t)mpi_size * (hsize_t)LINK_CHUNK_IO_SORT_CHUNK_ISSUE_Y_DIM_SCALE;
+ max_dataset_dims[0] = H5S_UNLIMITED;
+
+ fspace_id = H5Screate_simple(LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, dataset_dims, max_dataset_dims);
+ VRFY((fspace_id >= 0), "H5Screate_simple succeeded");
+
+ /*
+ * Set up chunking on the dataset in order to reproduce the problem.
+ */
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
+
+ VRFY((H5Pset_chunk(dcpl_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DIMS, chunk_dims) >= 0), "H5Pset_chunk succeeded");
+
+ dset_id = H5Dcreate2(file_id, LINK_CHUNK_IO_SORT_CHUNK_ISSUE_DATASET_NAME, H5T_NATIVE_INT, fspace_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
+
+ /*
+ * Setup hyperslab selection to split the dataset among the ranks.
+ *
+ * The ranks will write rows across the dataset.
+ */
+ stride[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
+ count[0] = (dataset_dims[0] / LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) / (hsize_t)mpi_size;
+ start[0] = count[0] * (hsize_t)mpi_rank;
+ block[0] = LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE;
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ data = HDcalloc(1, count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ VRFY((data != NULL), "calloc succeeded");
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id >= 0), "H5Pcreate succeeded");
+
+ /*
+ * Enable collective access for the data transfer.
+ */
+ VRFY((H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, data) >= 0), "H5Dwrite succeeded");
+
+ VRFY((H5Fflush(file_id, H5F_SCOPE_GLOBAL) >= 0), "H5Fflush succeeded");
+
+ /*
+ * Ensure that linked-chunk I/O is performed since this is
+ * the particular code path where the issue lies and we don't
+ * want the library doing multi-chunk I/O behind our backs.
+ */
+ VRFY((H5Pset_dxpl_mpio_chunk_opt(dxpl_id, H5FD_MPIO_CHUNK_ONE_IO) >= 0), "H5Pset_dxpl_mpio_chunk_opt succeeded");
+
+ read_buf = HDmalloc(count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ VRFY((read_buf != NULL), "malloc succeeded");
+
+ VRFY((H5Sselect_hyperslab(fspace_id, H5S_SELECT_SET, start, stride, count, block) >= 0), "H5Sselect_hyperslab succeeded");
+
+ sel_dims[0] = count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE);
+
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+
+ mspace_id = H5Screate_simple(1, sel_dims, NULL);
+ VRFY((mspace_id >= 0), "H5Screate_simple succeeded");
+
+ read_buf = HDrealloc(read_buf, count[0] * (LINK_CHUNK_IO_SORT_CHUNK_ISSUE_CHUNK_SIZE) * sizeof(int));
+ VRFY((read_buf != NULL), "realloc succeeded");
+
+ /*
+ * Finally have each rank read their section of data back from the dataset.
+ */
+ VRFY((H5Dread(dset_id, H5T_NATIVE_INT, mspace_id, fspace_id, dxpl_id, read_buf) >= 0), "H5Dread succeeded");
+
+ if (dataset_dims) {
+ HDfree(dataset_dims);
+ dataset_dims = NULL;
+ }
+
+ if (data) {
+ HDfree(data);
+ data = NULL;
+ }
+
+ if (read_buf) {
+ HDfree(read_buf);
+ read_buf = NULL;
+ }
+
+ VRFY((H5Sclose(fspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Sclose(mspace_id) >= 0), "H5Sclose succeeded");
+ VRFY((H5Pclose(dcpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Pclose(dxpl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "H5Dclose succeeded");
+ VRFY((H5Pclose(fapl_id) >= 0), "H5Pclose succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "H5Fclose succeeded");
+}
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index b952bf3..13f9e89 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -36,83 +36,91 @@
* Setup the dimensions of the hyperslab.
* Two modes--by rows or by columns.
* Assume dimension rank is 2.
- * BYROW divide into slabs of rows
- * BYCOL divide into blocks of columns
- * ZROW same as BYROW except process 0 gets 0 rows
- * ZCOL same as BYCOL except process 0 gets 0 columns
+ * BYROW divide into slabs of rows
+ * BYCOL divide into blocks of columns
+ * ZROW same as BYROW except process 0 gets 0 rows
+ * ZCOL same as BYCOL except process 0 gets 0 columns
*/
static void
slab_set(int mpi_rank, int mpi_size, hsize_t start[], hsize_t count[],
- hsize_t stride[], hsize_t block[], int mode)
+ hsize_t stride[], hsize_t block[], int mode)
{
- switch (mode){
+ switch (mode) {
case BYROW:
- /* Each process takes a slabs of rows. */
- block[0] = dim0/mpi_size;
- block[1] = dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = mpi_rank*block[0];
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set BYROW\n");
- break;
+ /* Each process takes a slabs of rows. */
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (hsize_t)mpi_rank * block[0];
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYROW\n");
+ break;
case BYCOL:
- /* Each process takes a block of columns. */
- block[0] = dim0;
- block[1] = dim1/mpi_size;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = mpi_rank*block[1];
-if(VERBOSE_MED) printf("slab_set BYCOL\n");
- break;
+ /* Each process takes a block of columns. */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(dim1 / mpi_size);
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (hsize_t)mpi_rank * block[1];
+ if (VERBOSE_MED)
+ HDprintf("slab_set BYCOL\n");
+ break;
case ZROW:
- /* Similar to BYROW except process 0 gets 0 row */
- block[0] = (mpi_rank ? dim0/mpi_size : 0);
- block[1] = dim1;
- stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = (mpi_rank? mpi_rank*block[0] : 0);
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set ZROW\n");
- break;
+ /* Similar to BYROW except process 0 gets 0 row */
+ block[0] = (hsize_t)(mpi_rank ? dim0 / mpi_size : 0);
+ block[1] = (hsize_t)dim1;
+ stride[0] = (mpi_rank ? block[0] : 1); /* avoid setting stride to 0 */
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = (mpi_rank ? (hsize_t)mpi_rank * block[0] : 0);
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZROW\n");
+ break;
case ZCOL:
- /* Similar to BYCOL except process 0 gets 0 column */
- block[0] = dim0;
- block[1] = (mpi_rank ? dim1/mpi_size : 0);
- stride[0] = block[0];
- stride[1] = (mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = (mpi_rank? mpi_rank*block[1] : 0);
-if(VERBOSE_MED) printf("slab_set ZCOL\n");
- break;
+ /* Similar to BYCOL except process 0 gets 0 column */
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)(mpi_rank ? dim1 / mpi_size : 0);
+ stride[0] = block[0];
+ stride[1] = (hsize_t)(mpi_rank ? block[1] : 1); /* avoid setting stride to 0 */
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = (mpi_rank ? (hsize_t)mpi_rank * block[1] : 0);
+ if (VERBOSE_MED)
+ HDprintf("slab_set ZCOL\n");
+ break;
default:
- /* Unknown mode. Set it to cover the whole dataset. */
- printf("unknown slab_set mode (%d)\n", mode);
- block[0] = dim0;
- block[1] = dim1;
- stride[0] = block[0];
- stride[1] = block[1];
- count[0] = 1;
- count[1] = 1;
- start[0] = 0;
- start[1] = 0;
-if(VERBOSE_MED) printf("slab_set wholeset\n");
- break;
+ /* Unknown mode. Set it to cover the whole dataset. */
+ HDprintf("unknown slab_set mode (%d)\n", mode);
+ block[0] = (hsize_t)dim0;
+ block[1] = (hsize_t)dim1;
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ if (VERBOSE_MED)
+ HDprintf("slab_set wholeset\n");
+ break;
}
-if(VERBOSE_MED){
- printf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
- (unsigned long)(block[0]*block[1]*count[0]*count[1]));
+ if (VERBOSE_MED) {
+ HDprintf(
+ "start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
+ (unsigned long) start[0], (unsigned long) start[1],
+ (unsigned long) count[0], (unsigned long) count[1],
+ (unsigned long) stride[0], (unsigned long) stride[1],
+ (unsigned long) block[0], (unsigned long) block[1],
+ (unsigned long) (block[0] * block[1] * count[0] * count[1]));
}
}
@@ -123,7 +131,7 @@ void point_set(hsize_t start[],
hsize_t count[],
hsize_t stride[],
hsize_t block[],
- size_t num_points,
+ size_t num_points,
hsize_t coords[],
int order)
{
@@ -153,13 +161,13 @@ void point_set(hsize_t start[],
}
if(VERBOSE_MED) {
- printf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
+ HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
(unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
(unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
(unsigned long)(block[0] * block[1] * count[0] * count[1]));
k = 0;
for(i = 0; i < num_points ; i++) {
- printf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
+ HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
k += 2;
}
}
@@ -177,10 +185,10 @@ dataset_fill(hsize_t start[], hsize_t block[], DATATYPE * dataset)
/* put some trivial data in the data_array */
for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1));
- dataptr++;
- }
+ for (j=0; j < block[1]; j++){
+ *dataptr = (DATATYPE)((i+start[0])*100 + (j+start[1]+1));
+ dataptr++;
+ }
}
}
@@ -195,19 +203,19 @@ dataset_print(hsize_t start[], hsize_t block[], DATATYPE * dataset)
hsize_t i, j;
/* print the column heading */
- printf("%-8s", "Cols:");
+ HDprintf("%-8s", "Cols:");
for (j=0; j < block[1]; j++){
- printf("%3lu ", (unsigned long)(start[1]+j));
+ HDprintf("%3lu ", (unsigned long)(start[1]+j));
}
- printf("\n");
+ HDprintf("\n");
/* print the slab data */
for (i=0; i < block[0]; i++){
- printf("Row %2lu: ", (unsigned long)(i+start[0]));
- for (j=0; j < block[1]; j++){
- printf("%03d ", *dataptr++);
- }
- printf("\n");
+ HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
+ for (j=0; j < block[1]; j++){
+ HDprintf("%03d ", *dataptr++);
+ }
+ HDprintf("\n");
}
}
@@ -223,35 +231,35 @@ dataset_vrfy(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[]
/* print it if VERBOSE_MED */
if(VERBOSE_MED) {
- printf("dataset_vrfy dumping:::\n");
- printf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
- (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
- (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
- printf("original values:\n");
- dataset_print(start, block, original);
- printf("compared values:\n");
- dataset_print(start, block, dataset);
+ HDprintf("dataset_vrfy dumping:::\n");
+ HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
+ (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
+ (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
+ HDprintf("original values:\n");
+ dataset_print(start, block, original);
+ HDprintf("compared values:\n");
+ dataset_print(start, block, dataset);
}
vrfyerrs = 0;
for (i=0; i < block[0]; i++){
- for (j=0; j < block[1]; j++){
- if(*dataset != *original){
- if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
- (unsigned long)i, (unsigned long)j,
- (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
- *(original), *(dataset));
- }
- dataset++;
- original++;
- }
- }
+ for (j=0; j < block[1]; j++){
+ if(*dataset != *original){
+ if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ (unsigned long)i, (unsigned long)j,
+ (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
+ *(original), *(dataset));
+ }
+ dataset++;
+ original++;
+ }
+ }
}
if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if(vrfyerrs)
- printf("%d errors found in dataset_vrfy\n", vrfyerrs);
+ HDprintf("%d errors found in dataset_vrfy\n", vrfyerrs);
return(vrfyerrs);
}
@@ -272,20 +280,20 @@ void
dataset_writeInd(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -293,14 +301,14 @@ dataset_writeInd(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Independent write test on file %s\n", filename);
+ HDprintf("Independent write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* ----------------------------------------
@@ -324,20 +332,20 @@ dataset_writeInd(void)
* and the slabs local to the MPI process.
* ------------------------------------------- */
/* setup dimensionality object */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* create a dataset collectively */
dataset1 = H5Dcreate2(fid, DATASETNAME1, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset1 >= 0), "H5Dcreate2 succeeded");
/* create another dataset collectively */
dataset2 = H5Dcreate2(fid, DATASETNAME2, H5T_NATIVE_INT, sid,
- H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset2 >= 0), "H5Dcreate2 succeeded");
@@ -366,28 +374,28 @@ dataset_writeInd(void)
/* write data independently */
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* write data independently */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to write with zero rows for process 0 */
if(VERBOSE_MED)
- printf("writeInd by some with zero row\n");
+ HDprintf("writeInd by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeInd by some with zero row");
if((mpi_rank/2)*2 != mpi_rank){
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
}
#ifdef BARRIER_CHECKS
@@ -418,19 +426,19 @@ void
dataset_readInd(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -438,16 +446,16 @@ dataset_readInd(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Independent read test on file %s\n", filename);
+ HDprintf("Independent read test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* setup file access template */
@@ -489,7 +497,7 @@ dataset_readInd(void)
/* read data independently */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
@@ -498,7 +506,7 @@ dataset_readInd(void)
/* read data independently */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
@@ -540,28 +548,27 @@ void
dataset_writeAll(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2, dataset3, dataset4; /* Dataset ID */
hid_t dataset5, dataset6, dataset7; /* Dataset ID */
- hid_t datatype; /* Datatype ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
+ hid_t datatype; /* Datatype ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
hsize_t current_dims; /* for point selection */
- int i;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -569,19 +576,19 @@ dataset_writeAll(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Collective write test on file %s\n", filename);
+ HDprintf("Collective write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* set up the coords array selection */
- num_points = dim1;
- coords = (hsize_t *)HDmalloc(dim1 * RANK * sizeof(hsize_t));
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim1 * (size_t)RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -605,8 +612,8 @@ dataset_writeAll(void)
* and create the dataset
* ------------------------- */
/* setup 2-D dimensionality object */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -669,8 +676,8 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
@@ -679,31 +686,31 @@ dataset_writeAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* write data collectively */
MESG("writeAll by Row");
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
/* setup dimensions again to writeAll with zero rows for process 0 */
if(VERBOSE_MED)
- printf("writeAll by some with zero row\n");
+ HDprintf("writeAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero row");
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZROW succeeded");
/* release all temporary handles. */
@@ -720,8 +727,8 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a file dataspace independently */
@@ -738,8 +745,8 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
@@ -748,30 +755,30 @@ dataset_writeAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* write data independently */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
/* setup dimensions again to writeAll with zero columns for process 0 */
if(VERBOSE_MED)
- printf("writeAll by some with zero col\n");
+ HDprintf("writeAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("writeAll by some with zero col");
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset1 by ZCOL succeeded");
/* release all temporary handles. */
@@ -789,8 +796,8 @@ dataset_writeAll(void)
file_dataspace = H5Dget_space (dataset3);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
@@ -801,16 +808,16 @@ dataset_writeAll(void)
mem_dataspace = H5Screate_simple (RANK, block, NULL);
VRFY((mem_dataspace >= 0), "");
if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_none mem_dataspace succeeded");
} /* end if */
/* fill the local slab with some trivial data */
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
@@ -819,21 +826,21 @@ dataset_writeAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* write data collectively */
MESG("writeAll with none");
ret = H5Dwrite(dataset3, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with none");
ret = H5Dwrite(dataset3, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset3 succeeded");
/* release all temporary handles. */
@@ -850,8 +857,8 @@ dataset_writeAll(void)
file_dataspace = H5Dget_space (dataset4);
VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
if(MAINPROCESS) {
- ret = H5Sselect_none(file_dataspace);
- VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
+ ret = H5Sselect_none(file_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all file_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(file_dataspace);
@@ -862,8 +869,8 @@ dataset_writeAll(void)
mem_dataspace = H5Screate(H5S_SCALAR);
VRFY((mem_dataspace >= 0), "");
if(MAINPROCESS) {
- ret = H5Sselect_none(mem_dataspace);
- VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
+ ret = H5Sselect_none(mem_dataspace);
+ VRFY((ret >= 0), "H5Sselect_all mem_dataspace succeeded");
} /* end if */
else {
ret = H5Sselect_all(mem_dataspace);
@@ -874,8 +881,8 @@ dataset_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
} /* end if */
/* set up the collective transfer properties list */
@@ -884,20 +891,20 @@ dataset_writeAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* write data collectively */
MESG("writeAll with scalar dataspace");
ret = H5Dwrite(dataset4, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* write data collectively (with datatype conversion) */
MESG("writeAll with scalar dataspace");
ret = H5Dwrite(dataset4, H5T_NATIVE_UCHAR, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite dataset4 succeeded");
/* release all temporary handles. */
@@ -907,30 +914,30 @@ dataset_writeAll(void)
if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
block[0] = 1;
- block[1] = dim1;
+ block[1] = (hsize_t)dim1;
stride[0] = 1;
- stride[1] = dim1;
+ stride[1] = (hsize_t)dim1;
count[0] = 1;
count[1] = 1;
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
start[1] = 0;
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* Dataset5: point selection in File - Hyperslab selection in Memory*/
/* create a file dataspace independently */
point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
file_dataspace = H5Dget_space (dataset5);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
@@ -963,7 +970,7 @@ dataset_writeAll(void)
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
start[1] = 0;
point_set (start, count, stride, block, num_points, coords, OUT_OF_ORDER);
file_dataspace = H5Dget_space (dataset6);
@@ -1001,11 +1008,11 @@ dataset_writeAll(void)
/* Dataset7: point selection in File - All selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
start[1] = 0;
point_set (start, count, stride, block, num_points, coords, IN_ORDER);
file_dataspace = H5Dget_space (dataset7);
- VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
+ VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
VRFY((ret >= 0), "H5Sselect_elements succeeded");
@@ -1075,25 +1082,24 @@ void
dataset_readAll(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
hid_t dataset1, dataset2, dataset5, dataset6, dataset7; /* Dataset ID */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
size_t num_points; /* for point selection */
hsize_t *coords = NULL; /* for point selection */
- hsize_t current_dims; /* for point selection */
int i,j,k;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -1101,21 +1107,21 @@ dataset_readAll(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Collective read test on file %s\n", filename);
+ HDprintf("Collective read test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* set up the coords array selection */
- num_points = dim1;
- coords = (hsize_t *)HDmalloc(dim0 * dim1 * RANK * sizeof(hsize_t));
+ num_points = (size_t)dim1;
+ coords = (hsize_t *)HDmalloc((size_t)dim0 * (size_t)dim1 * RANK * sizeof(hsize_t));
VRFY((coords != NULL), "coords malloc succeeded");
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -1174,8 +1180,8 @@ dataset_readAll(void)
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
@@ -1184,14 +1190,14 @@ dataset_readAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 succeeded");
/* verify the read data with original expected data */
@@ -1200,18 +1206,18 @@ dataset_readAll(void)
/* setup dimensions again to readAll with zero columns for process 0 */
if(VERBOSE_MED)
- printf("readAll by some with zero col\n");
+ HDprintf("readAll by some with zero col\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZCOL);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero col");
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZCOL succeeded");
/* verify the read data with original expected data */
@@ -1242,8 +1248,8 @@ dataset_readAll(void)
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* set up the collective transfer properties list */
@@ -1252,14 +1258,14 @@ dataset_readAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset2 succeeded");
/* verify the read data with original expected data */
@@ -1268,18 +1274,18 @@ dataset_readAll(void)
/* setup dimensions again to readAll with zero rows for process 0 */
if(VERBOSE_MED)
- printf("readAll by some with zero row\n");
+ HDprintf("readAll by some with zero row\n");
slab_set(mpi_rank, mpi_size, start, count, stride, block, ZROW);
ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
/* need to make mem_dataspace to match for process 0 */
if(MAINPROCESS){
- ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
- VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
+ ret = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab mem_dataspace succeeded");
}
MESG("readAll by some with zero row");
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset1 by ZROW succeeded");
/* verify the read data with original expected data */
@@ -1293,25 +1299,25 @@ dataset_readAll(void)
if(data_array1) free(data_array1);
if(data_origin1) free(data_origin1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
- data_origin1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
block[0] = 1;
- block[1] = dim1;
+ block[1] = (hsize_t)dim1;
stride[0] = 1;
- stride[1] = dim1;
+ stride[1] = (hsize_t)dim1;
count[0] = 1;
count[1] = 1;
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
start[1] = 0;
dataset_fill(start, block, data_origin1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_origin1);
+ MESG("data_array created");
+ dataset_print(start, block, data_origin1);
}
/* Dataset5: point selection in memory - Hyperslab selection in file*/
@@ -1335,8 +1341,8 @@ dataset_readAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
@@ -1344,7 +1350,7 @@ dataset_readAll(void)
xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset5 succeeded");
-
+
ret = dataset_vrfy(start, count, stride, block, data_array1, data_origin1);
if(ret) nerrors++;
@@ -1355,12 +1361,12 @@ dataset_readAll(void)
if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset6: point selection in File - Point selection in Memory*/
/* create a file dataspace independently */
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
start[1] = 0;
point_set (start, count, stride, block, num_points, coords, IN_ORDER);
file_dataspace = H5Dget_space (dataset6);
@@ -1400,7 +1406,7 @@ dataset_readAll(void)
H5Pclose(xfer_plist);
if(data_array1) free(data_array1);
- data_array1 = (DATATYPE *)malloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
/* Dataset7: point selection in memory - All selection in file*/
@@ -1410,12 +1416,12 @@ dataset_readAll(void)
ret = H5Sselect_all(file_dataspace);
VRFY((ret >= 0), "H5Sselect_all succeeded");
- num_points = dim0 * dim1;
+ num_points = (size_t)(dim0 * dim1);
k=0;
for (i=0 ; i<dim0; i++) {
for (j=0 ; j<dim1; j++) {
- coords[k++] = i;
- coords[k++] = j;
+ coords[k++] = (hsize_t)i;
+ coords[k++] = (hsize_t)j;
}
}
mem_dataspace = H5Dget_space (dataset7);
@@ -1438,7 +1444,7 @@ dataset_readAll(void)
xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread dataset7 succeeded");
- start[0] = dim0/mpi_size * mpi_rank;
+ start[0] = (hsize_t)(dim0/mpi_size * mpi_rank);
start[1] = 0;
ret = dataset_vrfy(start, count, stride, block, data_array1+(dim0/mpi_size * dim1 * mpi_rank), data_origin1);
if(ret) nerrors++;
@@ -1488,25 +1494,25 @@ void
extend_writeInd(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t dims[RANK]; /* dataset dim sizes */
hsize_t max_dims[RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -1514,18 +1520,18 @@ extend_writeInd(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = chunkdim0;
- chunk_dims[1] = chunkdim1;
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -1567,7 +1573,7 @@ extend_writeInd(void)
/* set up dataset storage chunk sizes and creation property list */
if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -1603,8 +1609,8 @@ extend_writeInd(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
@@ -1612,8 +1618,8 @@ extend_writeInd(void)
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
@@ -1625,7 +1631,7 @@ extend_writeInd(void)
/* write data independently */
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -1643,8 +1649,8 @@ extend_writeInd(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
@@ -1664,7 +1670,7 @@ extend_writeInd(void)
/* write data independently. Should fail. */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -1672,8 +1678,8 @@ extend_writeInd(void)
H5Sclose(file_dataspace);
/* Extend dataset2 and try again. Should succeed. */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
@@ -1685,7 +1691,7 @@ extend_writeInd(void)
/* write data independently */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -1719,25 +1725,25 @@ extend_writeInd2(void)
{
const char *filename;
hid_t fid; /* HDF5 file ID */
- hid_t fapl; /* File access templates */
- hid_t fs; /* File dataspace ID */
- hid_t ms; /* Memory dataspace ID */
- hid_t dataset; /* Dataset ID */
- hsize_t orig_size=10; /* Original dataset dim size */
- hsize_t new_size=20; /* Extended dataset dim size */
+ hid_t fapl; /* File access templates */
+ hid_t fs; /* File dataspace ID */
+ hid_t ms; /* Memory dataspace ID */
+ hid_t dataset; /* Dataset ID */
+ hsize_t orig_size=10; /* Original dataset dim size */
+ hsize_t new_size=20; /* Extended dataset dim size */
hsize_t one=1;
- hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
- hsize_t chunk_size = 16384; /* chunk size */
- hid_t dcpl; /* dataset create prop. list */
+ hsize_t max_size = H5S_UNLIMITED; /* dataset maximum dim size */
+ hsize_t chunk_size = 16384; /* chunk size */
+ hid_t dcpl; /* dataset create prop. list */
int written[10], /* Data to write */
retrieved[10]; /* Data read in */
int mpi_size, mpi_rank; /* MPI settings */
int i; /* Local index variable */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent write test #2 on file %s\n", filename);
+ HDprintf("Extend independent write test #2 on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -1794,10 +1800,10 @@ extend_writeInd2(void)
written[i] = i;
MESG("data array initialized");
if(VERBOSE_MED) {
- MESG("writing at offset zero: ");
+ MESG("writing at offset zero: ");
for(i = 0; i < (int)orig_size; i++)
- printf("%s%d", i?", ":"", written[i]);
- printf("\n");
+ HDprintf("%s%d", i?", ":"", written[i]);
+ HDprintf("\n");
}
ret = H5Dwrite(dataset, H5T_NATIVE_INT, ms, fs, H5P_DEFAULT, written);
VRFY((ret >= 0), "H5Dwrite succeeded");
@@ -1809,15 +1815,15 @@ extend_writeInd2(void)
VRFY((ret >= 0), "H5Dread succeeded");
for (i=0; i<(int)orig_size; i++)
if(written[i]!=retrieved[i]) {
- printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
i,written[i], i,retrieved[i]);
nerrors++;
}
if(VERBOSE_MED){
- MESG("read at offset zero: ");
+ MESG("read at offset zero: ");
for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", retrieved[i]);
- printf("\n");
+ HDprintf("%s%d", i?", ":"", retrieved[i]);
+ HDprintf("\n");
}
/* -------------------------
@@ -1833,14 +1839,15 @@ extend_writeInd2(void)
/* -------------------------
* Write to the second half of the dataset
* -------------------------*/
+ H5_CHECK_OVERFLOW(orig_size, hsize_t, int);
for (i=0; i<(int)orig_size; i++)
- written[i] = orig_size + i;
+ written[i] = (int)orig_size + i;
MESG("data array re-initialized");
if(VERBOSE_MED) {
- MESG("writing at offset 10: ");
+ MESG("writing at offset 10: ");
for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", written[i]);
- printf("\n");
+ HDprintf("%s%d", i?", ":"", written[i]);
+ HDprintf("\n");
}
ret = H5Sselect_hyperslab(fs, H5S_SELECT_SET, &orig_size, NULL, &one, &orig_size);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded");
@@ -1854,15 +1861,15 @@ extend_writeInd2(void)
VRFY((ret >= 0), "H5Dread succeeded");
for (i=0; i<(int)orig_size; i++)
if(written[i]!=retrieved[i]) {
- printf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
+ HDprintf("Line #%d: written!=retrieved: written[%d]=%d, retrieved[%d]=%d\n",__LINE__,
i,written[i], i,retrieved[i]);
nerrors++;
}
if(VERBOSE_MED){
- MESG("read at offset 10: ");
+ MESG("read at offset 10: ");
for (i=0; i<(int)orig_size; i++)
- printf("%s%d", i?", ":"", retrieved[i]);
- printf("\n");
+ HDprintf("%s%d", i?", ":"", retrieved[i]);
+ HDprintf("\n");
}
@@ -1879,22 +1886,22 @@ extend_writeInd2(void)
void
extend_readInd(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
const char *filename;
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -1902,18 +1909,18 @@ extend_readInd(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent read test on file %s\n", filename);
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -1974,13 +1981,13 @@ extend_readInd(void)
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
@@ -2009,13 +2016,13 @@ extend_readInd(void)
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* read data independently */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array1);
+ H5P_DEFAULT, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
@@ -2058,26 +2065,26 @@ void
extend_writeAll(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
+ hsize_t dims[RANK]; /* dataset dim sizes */
hsize_t max_dims[RANK] =
- {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
+ {H5S_UNLIMITED, H5S_UNLIMITED}; /* dataset maximum dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -2085,18 +2092,18 @@ extend_writeAll(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = chunkdim0;
- chunk_dims[1] = chunkdim1;
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
/* -------------------
@@ -2138,7 +2145,7 @@ extend_writeAll(void)
/* set up dataset storage chunk sizes and creation property list */
if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
@@ -2174,8 +2181,8 @@ extend_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED) {
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
@@ -2183,8 +2190,8 @@ extend_writeAll(void)
VRFY((mem_dataspace >= 0), "");
/* Extend its current dim sizes before writing */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
ret = H5Dset_extent(dataset1, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
@@ -2200,14 +2207,14 @@ extend_writeAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* write data collectively */
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2226,8 +2233,8 @@ extend_writeAll(void)
dataset_fill(start, block, data_array1);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* create a memory dataspace independently */
@@ -2240,8 +2247,8 @@ extend_writeAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
@@ -2258,7 +2265,7 @@ extend_writeAll(void)
/* write data independently. Should fail. */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret < 0), "H5Dwrite failed as expected");
/* restore auto error reporting */
@@ -2266,8 +2273,8 @@ extend_writeAll(void)
H5Sclose(file_dataspace);
/* Extend dataset2 and try again. Should succeed. */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
ret = H5Dset_extent(dataset2, dims);
VRFY((ret >= 0), "H5Dset_extent succeeded");
@@ -2279,7 +2286,7 @@ extend_writeAll(void)
/* write data independently */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release resource */
@@ -2308,23 +2315,23 @@ extend_writeAll(void)
void
extend_readAll(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t fid; /* HDF5 file ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_array1 = NULL; /* data buffer */
- DATATYPE *data_array2 = NULL; /* data buffer */
- DATATYPE *data_origin1 = NULL; /* expected data buffer */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_array1 = NULL; /* data buffer */
+ DATATYPE *data_array2 = NULL; /* data buffer */
+ DATATYPE *data_origin1 = NULL; /* expected data buffer */
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -2332,18 +2339,18 @@ extend_readAll(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent read test on file %s\n", filename);
+ HDprintf("Extend independent read test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* allocate memory for data buffer */
- data_array1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array1 != NULL), "data_array1 HDmalloc succeeded");
- data_array2 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_array2 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_array2 != NULL), "data_array2 HDmalloc succeeded");
- data_origin1 = (DATATYPE *)HDmalloc(dim0*dim1*sizeof(DATATYPE));
+ data_origin1 = (DATATYPE *)HDmalloc((size_t)dim0*(size_t)dim1*sizeof(DATATYPE));
VRFY((data_origin1 != NULL), "data_origin1 HDmalloc succeeded");
/* -------------------
@@ -2404,8 +2411,8 @@ extend_readAll(void)
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
@@ -2414,14 +2421,14 @@ extend_readAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
@@ -2451,8 +2458,8 @@ extend_readAll(void)
/* fill dataset with test data */
dataset_fill(start, block, data_origin1);
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(start, block, data_array1);
+ MESG("data_array created");
+ dataset_print(start, block, data_array1);
}
/* set up the collective transfer properties list */
@@ -2461,14 +2468,14 @@ extend_readAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* read data collectively */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_array1);
+ xfer_plist, data_array1);
VRFY((ret >= 0), "H5Dread succeeded");
/* verify the read data with original expected data */
@@ -2505,27 +2512,27 @@ void
compress_readAll(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
+ hid_t acc_tpl; /* File access templates */
hid_t dcpl; /* Dataset creation property list */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t dataspace; /* Dataspace ID */
- hid_t dataset; /* Dataset ID */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t dataspace; /* Dataspace ID */
+ hid_t dataset; /* Dataset ID */
int rank=1; /* Dataspace rank */
- hsize_t dim=dim0; /* Dataspace dimensions */
+ hsize_t dim=(hsize_t)dim0; /* Dataspace dimensions */
unsigned u; /* Local index variable */
unsigned chunk_opts; /* Chunk options */
unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
- DATATYPE *data_read = NULL; /* data buffer */
+ DATATYPE *data_read = NULL; /* data buffer */
DATATYPE *data_orig = NULL; /* expected data buffer */
const char *filename;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
int mpi_size, mpi_rank;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Collective chunked dataset read test on file %s\n", filename);
+ HDprintf("Collective chunked dataset read test on file %s\n", filename);
/* Retrieve MPI parameters */
MPI_Comm_size(comm,&mpi_size);
@@ -2539,7 +2546,7 @@ compress_readAll(void)
/* Initialize data buffers */
for(u=0; u<dim;u++)
- data_orig[u]=u;
+ data_orig[u]=(DATATYPE)u;
/* Run test both with and without filters disabled on partial chunks */
for(disable_partial_chunk_filters = 0; disable_partial_chunk_filters <= 1;
@@ -2634,28 +2641,27 @@ compress_readAll(void)
ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* Try reading the data */
ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+ VRFY((ret >= 0), "H5Dread succeeded");
/* Verify data read */
for(u=0; u<dim; u++)
if(data_orig[u]!=data_read[u]) {
- printf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
+ HDprintf("Line #%d: written!=retrieved: data_orig[%u]=%d, data_read[%u]=%d\n",__LINE__,
(unsigned)u,data_orig[u],(unsigned)u,data_read[u]);
nerrors++;
}
- /* Writing to the compressed, chunked dataset in parallel should fail */
- H5E_BEGIN_TRY {
- ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
- } H5E_END_TRY;
- VRFY((ret < 0), "H5Dwrite failed");
+#if MPI_VERSION >= 3
+ ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, xfer_plist, data_read);
+ VRFY((ret >= 0), "H5Dwrite succeeded");
+#endif
ret = H5Pclose(xfer_plist);
VRFY((ret >= 0), "H5Pclose succeeded");
@@ -2690,26 +2696,26 @@ void
none_selection_chunk(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t xfer_plist; /* Dataset transfer properties list */
- hid_t sid; /* Dataspace ID */
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* memory dataspace ID */
- hid_t dataset1, dataset2; /* Dataset ID */
+ hid_t acc_tpl; /* File access templates */
+ hid_t xfer_plist; /* Dataset transfer properties list */
+ hid_t sid; /* Dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* memory dataspace ID */
+ hid_t dataset1, dataset2; /* Dataset ID */
const char *filename;
- hsize_t dims[RANK]; /* dataset dim sizes */
- DATATYPE *data_origin = NULL; /* data buffer */
- DATATYPE *data_array = NULL; /* data buffer */
- hsize_t chunk_dims[RANK]; /* chunk sizes */
- hid_t dataset_pl; /* dataset create prop. list */
-
- hsize_t start[RANK]; /* for hyperslab setting */
- hsize_t count[RANK]; /* for hyperslab setting */
- hsize_t stride[RANK]; /* for hyperslab setting */
- hsize_t block[RANK]; /* for hyperslab setting */
- hsize_t mstart[RANK]; /* for data buffer in memory */
-
- herr_t ret; /* Generic return value */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ DATATYPE *data_origin = NULL; /* data buffer */
+ DATATYPE *data_array = NULL; /* data buffer */
+ hsize_t chunk_dims[RANK]; /* chunk sizes */
+ hid_t dataset_pl; /* dataset create prop. list */
+
+ hsize_t start[RANK]; /* for hyperslab setting */
+ hsize_t count[RANK]; /* for hyperslab setting */
+ hsize_t stride[RANK]; /* for hyperslab setting */
+ hsize_t block[RANK]; /* for hyperslab setting */
+ hsize_t mstart[RANK]; /* for data buffer in memory */
+
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -2717,15 +2723,15 @@ none_selection_chunk(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Extend independent write test on file %s\n", filename);
+ HDprintf("Extend independent write test on file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
/* setup chunk-size. Make sure sizes are > 0 */
- chunk_dims[0] = chunkdim0;
- chunk_dims[1] = chunkdim1;
+ chunk_dims[0] = (hsize_t)chunkdim0;
+ chunk_dims[1] = (hsize_t)chunkdim1;
/* -------------------
* START AN HDF5 FILE
@@ -2748,15 +2754,15 @@ none_selection_chunk(void)
/* set up dataset storage chunk sizes and creation property list */
if(VERBOSE_MED)
- printf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
+ HDprintf("chunks[]=%lu,%lu\n", (unsigned long)chunk_dims[0], (unsigned long)chunk_dims[1]);
dataset_pl = H5Pcreate(H5P_DATASET_CREATE);
VRFY((dataset_pl >= 0), "H5Pcreate succeeded");
ret = H5Pset_chunk(dataset_pl, RANK, chunk_dims);
VRFY((ret >= 0), "H5Pset_chunk succeeded");
/* setup dimensionality object */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple(RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -2792,8 +2798,8 @@ none_selection_chunk(void)
dataset_fill(mstart, block, data_origin);
MESG("data_array initialized");
if(VERBOSE_MED){
- MESG("data_array created");
- dataset_print(mstart, block, data_origin);
+ MESG("data_array created");
+ dataset_print(mstart, block, data_origin);
}
}
@@ -2827,12 +2833,12 @@ none_selection_chunk(void)
/* write data collectively */
ret = H5Dwrite(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
ret = H5Dread(dataset1, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
@@ -2849,12 +2855,12 @@ none_selection_chunk(void)
/* write data collectively */
ret = H5Dwrite(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- xfer_plist, data_origin);
+ xfer_plist, data_origin);
VRFY((ret >= 0), "H5Dwrite succeeded");
/* read data independently */
ret = H5Dread(dataset2, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
- H5P_DEFAULT, data_array);
+ H5P_DEFAULT, data_array);
VRFY((ret >= 0), "");
/* verify the read data with original expected data */
@@ -2886,20 +2892,20 @@ none_selection_chunk(void)
if(data_array) HDfree(data_array);
}
-
+
/* Function: test_actual_io_mode
*
- * Purpose: tests one specific case of collective I/O and checks that the
+ * Purpose: tests one specific case of collective I/O and checks that the
* actual_chunk_opt_mode property and the actual_io_mode
* properties in the DXPL have the correct values.
*
* Input: selection_mode: changes the way processes select data from the space, as well
* as some dxpl flags to get collective I/O to break in different ways.
- *
+ *
* The relevant I/O function and expected response for each mode:
* TEST_ACTUAL_IO_MULTI_CHUNK_IND:
* H5D_mpi_chunk_collective_io, each process reports independent I/O
- *
+ *
* TEST_ACTUAL_IO_MULTI_CHUNK_COL:
* H5D_mpi_chunk_collective_io, each process reports collective I/O
*
@@ -2911,7 +2917,7 @@ none_selection_chunk(void)
* collective, the rest report independent I/O
*
* TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
- * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
+ * Same test TEST_ACTUAL_IO_MULTI_CHUNK_IND.
* Set directly go to multi-chunk-io without num threshold calc.
* TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL:
* Same test TEST_ACTUAL_IO_MULTI_CHUNK_COL.
@@ -2936,20 +2942,20 @@ none_selection_chunk(void)
*
* Note: DIRECT_MULTI_CHUNK_MIX and DIRECT_MULTI_CHUNK_MIX_DISAGREE
* is not needed as they are covered by DIRECT_CHUNK_MIX and
- * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
+ * MULTI_CHUNK_MIX_DISAGREE cases. _DIRECT_ cases are only for testing
* path way to multi-chunk-io by H5FD_MPIO_CHUNK_MULTI_IO insted of num-threshold.
*
* Modification:
- * - Refctore to remove multi-chunk-without-opimization test and update for
- * testing direct to multi-chunk-io
+ * - Refctore to remove multi-chunk-without-opimization test and update for
+ * testing direct to multi-chunk-io
* Programmer: Jonathan Kim
* Date: 2012-10-10
*
- *
+ *
* Programmer: Jacob Gruber
* Date: 2011-04-06
*/
-static void
+static void
test_actual_io_mode(int selection_mode) {
H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_write = -1;
H5D_mpio_actual_chunk_opt_mode_t actual_chunk_opt_mode_read = -1;
@@ -2963,7 +2969,7 @@ test_actual_io_mode(int selection_mode) {
hbool_t multi_chunk_io;
hbool_t is_chunked;
hbool_t is_collective;
- int mpi_size = -1;
+ int mpi_size = -1;
int mpi_rank = -1;
int length;
int * buffer;
@@ -2988,12 +2994,12 @@ test_actual_io_mode(int selection_mode) {
hsize_t block[RANK];
char message[256];
herr_t ret;
-
+
/* Set up some flags to make some future if statements slightly more readable */
direct_multi_chunk_io = (
selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND ||
selection_mode == TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_COL );
-
+
/* Note: RESET performs the same tests as MULTI_CHUNK_MIX_DISAGREE and then
* tests independent I/O
*/
@@ -3003,11 +3009,11 @@ test_actual_io_mode(int selection_mode) {
selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX ||
selection_mode == TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE ||
selection_mode == TEST_ACTUAL_IO_RESET );
-
+
is_chunked = (
selection_mode != TEST_ACTUAL_IO_CONTIGUOUS &&
selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE);
-
+
is_collective = selection_mode != TEST_ACTUAL_IO_NO_COLLECTIVE;
/* Set up MPI parameters */
@@ -3015,7 +3021,7 @@ test_actual_io_mode(int selection_mode) {
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Barrier(MPI_COMM_WORLD);
-
+
HDassert(mpi_size >= 1);
mpi_comm = MPI_COMM_WORLD;
@@ -3032,9 +3038,9 @@ test_actual_io_mode(int selection_mode) {
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
VRFY((fid >= 0), "H5Fcreate succeeded");
- /* Create the basic Space */
- dims[0] = dim0;
- dims[1] = dim1;
+ /* Create the basic Space */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -3045,7 +3051,7 @@ test_actual_io_mode(int selection_mode) {
/* If we are not testing contiguous datasets */
if(is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/mpi_size;
+ chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
chunk_dims[1] = dims[1];
ret = H5Pset_chunk(dcpl, 2, chunk_dims);
VRFY((ret >= 0),"chunk creation property list succeeded");
@@ -3060,10 +3066,10 @@ test_actual_io_mode(int selection_mode) {
file_space = H5Dget_space(dataset);
VRFY((file_space >= 0), "H5Dget_space succeeded");
- /* Choose a selection method based on the type of I/O we want to occur,
+ /* Choose a selection method based on the type of I/O we want to occur,
* and also set up some selection-dependeent test info. */
switch(selection_mode) {
-
+
/* Independent I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_IND:
case TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND:
@@ -3072,7 +3078,7 @@ test_actual_io_mode(int selection_mode) {
* independent.
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
+
test_name = "Multi Chunk - Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
@@ -3086,7 +3092,7 @@ test_actual_io_mode(int selection_mode) {
* selections to each chunk, the operation is purely collective.
*/
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
-
+
test_name = "Multi Chunk - Collective";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
if(mpi_size > 1)
@@ -3094,7 +3100,7 @@ test_actual_io_mode(int selection_mode) {
else
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
break;
-
+
/* Mixed I/O with optimization */
case TEST_ACTUAL_IO_MULTI_CHUNK_MIX:
/* A chunk will be assigned collective I/O only if it is selected by each
@@ -3105,22 +3111,22 @@ test_actual_io_mode(int selection_mode) {
* assigned independent I/O. Each process will access one chunk collectively
* and at least one chunk independently, reporting mixed I/O.
*/
-
+
if(mpi_rank == 0) {
/* Select the first column */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
} else {
/* Select the first and the nth chunk in the nth column */
- block[0] = dim0 / mpi_size;
- block[1] = dim1 / mpi_size;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
count[0] = 2;
count[1] = 1;
- stride[0] = mpi_rank * block[0];
+ stride[0] = (hsize_t)mpi_rank * block[0];
stride[1] = 1;
start[0] = 0;
- start[1] = mpi_rank*block[1];
+ start[1] = (hsize_t)mpi_rank*block[1];
}
-
+
test_name = "Multi Chunk - Mixed";
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
actual_io_mode_expected = H5D_MPIO_CHUNK_MIXED;
@@ -3130,7 +3136,7 @@ test_actual_io_mode(int selection_mode) {
* performed. To acheive this, we have RESET perform collective I/O (which would change
* the values from the defaults) followed by independent I/O (which should report the
* default values). RESET doesn't need to have a unique selection, so we reuse
- * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
+ * MULTI_CHUMK_MIX_DISAGREE, which was chosen because it is a complex case that works
* on all builds. The independent section of RESET can be found at the end of this function.
*/
case TEST_ACTUAL_IO_RESET:
@@ -3139,7 +3145,7 @@ test_actual_io_mode(int selection_mode) {
case TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE:
/* A chunk will be assigned collective I/O only if it is selected by each
* process. To get mixed I/O with disagreement, assign process n to the
- * first chunk and the nth chunk. The first chunk, selected by all, is
+ * first chunk and the nth chunk. The first chunk, selected by all, is
* assgigned collective I/O, while each other process gets independent I/O.
* Since the root process with only access the first chunk, it will report
* collective I/O. The subsequent processes will access the first chunk
@@ -3149,25 +3155,25 @@ test_actual_io_mode(int selection_mode) {
if(mpi_rank == 0) {
/* Select the first chunk in the first column */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYCOL);
- block[0] = block[0] / mpi_size;
+ block[0] = block[0] / (hsize_t)mpi_size;
} else {
/* Select the first and the nth chunk in the nth column */
- block[0] = dim0 / mpi_size;
- block[1] = dim1 / mpi_size;
+ block[0] = (hsize_t)(dim0 / mpi_size);
+ block[1] = (hsize_t)(dim1 / mpi_size);
count[0] = 2;
count[1] = 1;
- stride[0] = mpi_rank * block[0];
+ stride[0] = (hsize_t)mpi_rank * block[0];
stride[1] = 1;
start[0] = 0;
- start[1] = mpi_rank*block[1];
+ start[1] = (hsize_t)mpi_rank*block[1];
}
-
+
/* If the testname was not already set by the RESET case */
if (selection_mode == TEST_ACTUAL_IO_RESET)
test_name = "RESET";
else
test_name = "Multi Chunk - Mixed (Disagreement)";
-
+
actual_chunk_opt_mode_expected = H5D_MPIO_MULTI_CHUNK;
if(mpi_size > 1) {
if(mpi_rank == 0)
@@ -3177,14 +3183,14 @@ test_actual_io_mode(int selection_mode) {
}
else
actual_io_mode_expected = H5D_MPIO_CHUNK_INDEPENDENT;
-
- break;
+
+ break;
/* Linked Chunk I/O */
- case TEST_ACTUAL_IO_LINK_CHUNK:
+ case TEST_ACTUAL_IO_LINK_CHUNK:
/* Nothing special; link chunk I/O is forced in the dxpl settings. */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
+
test_name = "Link Chunk";
actual_chunk_opt_mode_expected = H5D_MPIO_LINK_CHUNK;
actual_io_mode_expected = H5D_MPIO_CHUNK_COLLECTIVE;
@@ -3195,7 +3201,7 @@ test_actual_io_mode(int selection_mode) {
/* A non overlapping, regular selection in a contiguous dataset leads to
* collective I/O */
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
+
test_name = "Contiguous";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
actual_io_mode_expected = H5D_MPIO_CONTIGUOUS_COLLECTIVE;
@@ -3203,7 +3209,7 @@ test_actual_io_mode(int selection_mode) {
case TEST_ACTUAL_IO_NO_COLLECTIVE:
slab_set(mpi_rank, mpi_size, start, count, stride, block, BYROW);
-
+
test_name = "Independent";
actual_chunk_opt_mode_expected = H5D_MPIO_NO_CHUNK_OPTIMIZATION;
actual_io_mode_expected = H5D_MPIO_NO_COLLECTIVE;
@@ -3218,13 +3224,13 @@ test_actual_io_mode(int selection_mode) {
ret = H5Sselect_hyperslab(file_space, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
-
+
/* Create a memory dataspace mirroring the dataset and select the same hyperslab
- * as in the file space.
+ * as in the file space.
*/
mem_space = H5Screate_simple (RANK, dims, NULL);
VRFY((mem_space >= 0), "mem_space created");
-
+
ret = H5Sselect_hyperslab(mem_space, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
@@ -3232,23 +3238,23 @@ test_actual_io_mode(int selection_mode) {
length = dim0 * dim1;
/* Allocate and initialize the buffer */
- buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl_write = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
+
/* Set collective I/O properties in the dxpl. */
if(is_collective) {
/* Request collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
- /* Set the threshold number of processes per chunk to twice mpi_size.
- * This will prevent the threshold from ever being met, thus forcing
+
+ /* Set the threshold number of processes per chunk to twice mpi_size.
+ * This will prevent the threshold from ever being met, thus forcing
* multi chunk io instead of link chunk io.
* This is via deault.
*/
@@ -3286,12 +3292,12 @@ test_actual_io_mode(int selection_mode) {
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_write, &actual_chunk_opt_mode_write);
VRFY((ret >= 0), "retriving actual chunk opt mode succeeded" );
-
+
/* Read */
ret = H5Dread(dataset, data_type, mem_space, file_space, dxpl_read, buffer);
if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Retreive Actual io values */
ret = H5Pget_mpio_actual_io_mode(dxpl_read, &actual_io_mode_read);
VRFY((ret >= 0), "retriving actual io mode succeeded" );
@@ -3307,9 +3313,9 @@ test_actual_io_mode(int selection_mode) {
/* Test values */
if(actual_chunk_opt_mode_expected != (H5D_mpio_actual_chunk_opt_mode_t) -1 && actual_io_mode_expected != (H5D_mpio_actual_io_mode_t) -1) {
- sprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
+ HDsprintf(message, "Actual Chunk Opt Mode has the correct value for %s.\n",test_name);
VRFY((actual_chunk_opt_mode_write == actual_chunk_opt_mode_expected), message);
- sprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
+ HDsprintf(message, "Actual IO Mode has the correct value for %s.\n",test_name);
VRFY((actual_io_mode_write == actual_io_mode_expected), message);
} else {
HDfprintf(stderr, "%s %d -> (%d,%d)\n", test_name, mpi_rank,
@@ -3341,7 +3347,7 @@ test_actual_io_mode(int selection_mode) {
"actual_chunk_opt_mode has correct value for reset write (independent)");
VRFY(actual_io_mode_write == H5D_MPIO_NO_COLLECTIVE,
"actual_io_mode has correct value for reset write (independent)");
-
+
/* Read */
ret = H5Dread(dataset, data_type, H5S_ALL, H5S_ALL, dxpl_read, buffer);
VRFY((ret >= 0), "H5Dwrite() dataset multichunk write succeeded");
@@ -3351,7 +3357,7 @@ test_actual_io_mode(int selection_mode) {
VRFY( (ret >= 0), "retriving actual io mode succeeded" );
ret = H5Pget_mpio_actual_chunk_opt_mode(dxpl_read, &actual_chunk_opt_mode_read);
VRFY( (ret >= 0), "retriving actual chunk opt mode succeeded" );
-
+
VRFY(actual_chunk_opt_mode_read == H5D_MPIO_NO_CHUNK_OPTIMIZATION,
"actual_chunk_opt_mode has correct value for reset read (independent)");
VRFY(actual_io_mode_read == H5D_MPIO_NO_COLLECTIVE,
@@ -3373,10 +3379,10 @@ test_actual_io_mode(int selection_mode) {
return;
}
-
+
/* Function: actual_io_mode_tests
*
- * Purpose: Tests all possible cases of the actual_io_mode property.
+ * Purpose: Tests all possible cases of the actual_io_mode property.
*
* Programmer: Jacob Gruber
* Date: 2011-04-06
@@ -3387,10 +3393,10 @@ actual_io_mode_tests(void) {
int mpi_rank = -1;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_rank);
-
+
test_actual_io_mode(TEST_ACTUAL_IO_NO_COLLECTIVE);
-
- /*
+
+ /*
* Test multi-chunk-io via proc_num threshold
*/
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_IND);
@@ -3401,10 +3407,10 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX);
else
HDfprintf(stdout, "Multi Chunk Mixed test requires 3 proceses minimum\n");
-
+
test_actual_io_mode(TEST_ACTUAL_IO_MULTI_CHUNK_MIX_DISAGREE);
- /*
+ /*
* Test multi-chunk-io via setting direct property
*/
test_actual_io_mode(TEST_ACTUAL_IO_DIRECT_MULTI_CHUNK_IND);
@@ -3412,31 +3418,31 @@ actual_io_mode_tests(void) {
test_actual_io_mode(TEST_ACTUAL_IO_LINK_CHUNK);
test_actual_io_mode(TEST_ACTUAL_IO_CONTIGUOUS);
-
+
test_actual_io_mode(TEST_ACTUAL_IO_RESET);
return;
}
-/*
+/*
* Function: test_no_collective_cause_mode
*
- * Purpose:
- * tests cases for broken collective I/O and checks that the
+ * Purpose:
+ * tests cases for broken collective I/O and checks that the
* H5Pget_mpio_no_collective_cause properties in the DXPL have the correct values.
*
- * Input:
+ * Input:
* selection_mode: various mode to cause broken collective I/O
* Note: Originally, each TEST case is supposed to be used alone.
* After some discussion, this is updated to take multiple TEST cases
- * with '|'. However there is no error check for any of combined
+ * with '|'. However there is no error check for any of combined
* test cases, so a tester is responsible to understand and feed
* proper combination of TESTs if needed.
*
- *
+ *
* TEST_COLLECTIVE:
* Test for regular collective I/O without cause of breaking.
* Just to test normal behavior.
- *
+ *
* TEST_SET_INDEPENDENT:
* Test for Independent I/O as the cause of breaking collective I/O.
*
@@ -3448,7 +3454,7 @@ actual_io_mode_tests(void) {
*
* TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES:
* Test for NULL dataspace as the cause of breaking collective I/O.
- *
+ *
* TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT:
* Test for Compact layout as the cause of breaking collective I/O.
*
@@ -3457,17 +3463,18 @@ actual_io_mode_tests(void) {
*
* TEST_FILTERS:
* Test for using filter (checksum) as the cause of breaking collective I/O.
- * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead.
+ * Note: TEST_FILTERS mode will not work until H5Dcreate and H5write is supported for mpio and filter feature. Use test_no_collective_cause_mode_filter() function instead.
+ *
*
- *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
+#ifdef LATER
#define DSET_NOCOLCAUSE "nocolcause"
-#define NELM 2
+#endif
#define FILE_EXTERNAL "nocolcause_extern.data"
-static void
-test_no_collective_cause_mode(int selection_mode)
+static void
+test_no_collective_cause_mode(int selection_mode)
{
uint32_t no_collective_cause_local_write = 0;
uint32_t no_collective_cause_local_read = 0;
@@ -3475,13 +3482,12 @@ test_no_collective_cause_mode(int selection_mode)
uint32_t no_collective_cause_global_write = 0;
uint32_t no_collective_cause_global_read = 0;
uint32_t no_collective_cause_global_expected = 0;
- hsize_t coord[NELM][RANK];
const char * filename;
const char * test_name;
hbool_t is_chunked=1;
hbool_t is_independent=0;
- int mpi_size = -1;
+ int mpi_size = -1;
int mpi_rank = -1;
int length;
int * buffer;
@@ -3505,7 +3511,7 @@ test_no_collective_cause_mode(int selection_mode)
H5Z_filter_t filter_info;
#endif /* LATER */
/* set to global value as default */
- int l_facc_type = facc_type;
+ int l_facc_type = facc_type;
char message[256];
/* Set up MPI parameters */
@@ -3561,13 +3567,13 @@ test_no_collective_cause_mode(int selection_mode)
dims[1] = COL_FACTOR * 6;
}
else {
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
}
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
}
-
+
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -3584,7 +3590,7 @@ test_no_collective_cause_mode(int selection_mode)
/* If we are not testing contiguous datasets */
if(is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/mpi_size;
+ chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
chunk_dims[1] = dims[1];
ret = H5Pset_chunk(dcpl, 2, chunk_dims);
VRFY((ret >= 0),"chunk creation property list succeeded");
@@ -3592,20 +3598,19 @@ test_no_collective_cause_mode(int selection_mode)
/* Create the dataset */
- dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, "nocolcause", data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
- /*
- * Set expected causes and some tweaks based on the type of test
+ /*
+ * Set expected causes and some tweaks based on the type of test
*/
if (selection_mode & TEST_DATATYPE_CONVERSION) {
test_name = "Broken Collective I/O - Datatype Conversion";
no_collective_cause_local_expected |= H5D_MPIO_DATATYPE_CONVERSION;
no_collective_cause_global_expected |= H5D_MPIO_DATATYPE_CONVERSION;
/* set different sign to trigger type conversion */
- data_type = H5T_NATIVE_UINT;
+ data_type = H5T_NATIVE_UINT;
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
@@ -3666,18 +3671,18 @@ test_no_collective_cause_mode(int selection_mode)
}
/* Get the number of elements in the selection */
- length = dims[0] * dims[1];
+ H5_CHECKED_ASSIGN(length, int, dims[0] * dims[1], uint64_t);
/* Allocate and initialize the buffer */
- buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ buffer = (int *)HDmalloc(sizeof(int) * (size_t)length);
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl_write = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl_write >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
+
if(is_independent) {
/* Set Independent I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_INDEPENDENT);
@@ -3687,17 +3692,17 @@ test_no_collective_cause_mode(int selection_mode)
/* Set Collective I/O */
ret = H5Pset_dxpl_mpio(dxpl_write, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
-
+
}
if (selection_mode & TEST_DATA_TRANSFORMS) {
- ret = H5Pset_data_transform (dxpl_write, "x+1");
+ ret = H5Pset_data_transform (dxpl_write, "x+1");
VRFY((ret >= 0), "H5Pset_data_transform succeeded");
}
/*---------------------
* Test Write access
- *---------------------*/
+ *---------------------*/
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl_write, buffer);
@@ -3712,7 +3717,7 @@ test_no_collective_cause_mode(int selection_mode)
/*---------------------
* Test Read access
- *---------------------*/
+ *---------------------*/
/* Make a copy of the dxpl to test the read operation */
dxpl_read = H5Pcopy(dxpl_write);
@@ -3723,7 +3728,7 @@ test_no_collective_cause_mode(int selection_mode)
if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Get the cause of broken collective I/O */
ret = H5Pget_mpio_no_collective_cause (dxpl_read, &no_collective_cause_local_read, &no_collective_cause_global_read);
VRFY((ret >= 0), "retriving no collective cause succeeded" );
@@ -3733,13 +3738,13 @@ test_no_collective_cause_mode(int selection_mode)
"reading and writing are the same for local cause of Broken Collective I/O");
VRFY((no_collective_cause_global_read == no_collective_cause_global_write),
"reading and writing are the same for global cause of Broken Collective I/O");
-
+
/* Test values */
- memset (message, 0, sizeof (message));
- sprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
VRFY((no_collective_cause_local_write == no_collective_cause_local_expected), message);
- memset (message, 0, sizeof (message));
- sprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
VRFY((no_collective_cause_global_write == no_collective_cause_global_expected), message);
/* Release some resources */
@@ -3771,28 +3776,29 @@ test_no_collective_cause_mode(int selection_mode)
}
-/*
+/*
* Function: test_no_collective_cause_mode_filter
*
- * Purpose:
- * Test specific for using filter as a caus of broken collective I/O and
+ * Purpose:
+ * Test specific for using filter as a caus of broken collective I/O and
* checks that the H5Pget_mpio_no_collective_cause properties in the DXPL
* have the correct values.
*
- * NOTE:
- * This is a temporary function.
+ * NOTE:
+ * This is a temporary function.
* test_no_collective_cause_mode(TEST_FILTERS) will replace this when
* H5Dcreate and H5write support for mpio and filter feature.
*
- * Input:
+ * Input:
* TEST_FILTERS_READ:
* Test for using filter (checksum) as the cause of breaking collective I/O.
- *
+ *
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
-static void
-test_no_collective_cause_mode_filter(int selection_mode)
+#ifdef LATER
+static void
+test_no_collective_cause_mode_filter(int selection_mode)
{
uint32_t no_collective_cause_local_read = 0;
uint32_t no_collective_cause_local_expected = 0;
@@ -3800,9 +3806,9 @@ test_no_collective_cause_mode_filter(int selection_mode)
uint32_t no_collective_cause_global_expected = 0;
const char * filename;
- const char * test_name;
+ const char * test_name = "I/O";
hbool_t is_chunked=1;
- int mpi_size = -1;
+ int mpi_size = -1;
int mpi_rank = -1;
int length;
int * buffer;
@@ -3832,7 +3838,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Barrier(MPI_COMM_WORLD);
-
+
HDassert(mpi_size >= 1);
mpi_comm = MPI_COMM_WORLD;
@@ -3858,12 +3864,12 @@ test_no_collective_cause_mode_filter(int selection_mode)
VRFY(0, "Unexpected mode, only test for TEST_FILTERS_READ.");
}
- /* Create the basic Space */
- dims[0] = dim0;
- dims[1] = dim1;
+ /* Create the basic Space */
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
-
+
filename = (const char *)GetTestParameters();
HDassert(filename != NULL);
@@ -3878,7 +3884,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* If we are not testing contiguous datasets */
if(is_chunked) {
/* Set up chunk information. */
- chunk_dims[0] = dims[0]/mpi_size;
+ chunk_dims[0] = dims[0]/(hsize_t)mpi_size;
chunk_dims[1] = dims[1];
ret = H5Pset_chunk(dcpl, 2, chunk_dims);
VRFY((ret >= 0),"chunk creation property list succeeded");
@@ -3886,8 +3892,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Create the dataset */
- dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT);
+ dataset = H5Dcreate2(fid, DSET_NOCOLCAUSE, data_type, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dcreate2() dataset succeeded");
#ifdef LATER /* fletcher32 */
@@ -3910,16 +3915,16 @@ test_no_collective_cause_mode_filter(int selection_mode)
/* Allocate and initialize the buffer */
buffer = (int *)HDmalloc(sizeof(int) * length);
- VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
- for(i = 0; i < length; i++)
+ VRFY((buffer != NULL), "HDmalloc of buffer succeeded");
+ for(i = 0; i < length; i++)
buffer[i] = i;
/* Set up the dxpl for the write */
dxpl = H5Pcreate(H5P_DATASET_XFER);
VRFY((dxpl >= 0), "H5Pcreate(H5P_DATASET_XFER) succeeded");
-
+
if (selection_mode == TEST_FILTERS_READ) {
- /* To test read in collective I/O mode , write in independent mode
+ /* To test read in collective I/O mode , write in independent mode
* because write fails with mpio + filter */
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
@@ -3929,7 +3934,7 @@ test_no_collective_cause_mode_filter(int selection_mode)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
}
-
+
/* Write */
ret = H5Dwrite(dataset, data_type, mem_space, file_space, dxpl, buffer);
@@ -3970,17 +3975,17 @@ test_no_collective_cause_mode_filter(int selection_mode)
if(ret < 0) H5Eprint2(H5E_DEFAULT, stdout);
VRFY((ret >= 0), "H5Dread() dataset multichunk read succeeded");
-
+
/* Get the cause of broken collective I/O */
ret = H5Pget_mpio_no_collective_cause (dxpl, &no_collective_cause_local_read, &no_collective_cause_global_read);
VRFY((ret >= 0), "retriving no collective cause succeeded" );
/* Test values */
- memset (message, 0, sizeof (message));
- sprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Local cause of Broken Collective I/O has the correct value for %s.\n",test_name);
VRFY((no_collective_cause_local_read == (uint32_t)no_collective_cause_local_expected), message);
- memset (message, 0, sizeof (message));
- sprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
+ HDmemset (message, 0, sizeof (message));
+ HDsprintf(message, "Global cause of Broken Collective I/O has the correct value for %s.\n",test_name);
VRFY((no_collective_cause_global_read == (uint32_t)no_collective_cause_global_expected), message);
/* Release some resources */
@@ -4003,19 +4008,20 @@ test_no_collective_cause_mode_filter(int selection_mode)
HDfree(buffer);
return;
}
+#endif
/* Function: no_collective_cause_tests
*
- * Purpose: Tests cases for broken collective IO.
+ * Purpose: Tests cases for broken collective IO.
*
* Programmer: Jonathan Kim
* Date: Aug, 2012
*/
-void
-no_collective_cause_tests(void)
+void
+no_collective_cause_tests(void)
{
- /*
- * Test individual cause
+ /*
+ * Test individual cause
*/
test_no_collective_cause_mode (TEST_COLLECTIVE);
test_no_collective_cause_mode (TEST_SET_INDEPENDENT);
@@ -4025,15 +4031,15 @@ no_collective_cause_tests(void)
test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT);
test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL);
#ifdef LATER /* fletcher32 */
- /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
+ /* TODO: use this instead of below TEST_FILTERS_READ when H5Dcreate and
* H5Dwrite is ready for mpio + filter feature.
*/
/* test_no_collective_cause_mode (TEST_FILTERS); */
test_no_collective_cause_mode_filter (TEST_FILTERS_READ);
-#endif /* LATER */
+#endif /* LATER */
- /*
- * Test combined causes
+ /*
+ * Test combined causes
*/
test_no_collective_cause_mode (TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL | TEST_DATATYPE_CONVERSION);
test_no_collective_cause_mode (TEST_DATATYPE_CONVERSION | TEST_DATA_TRANSFORMS);
@@ -4057,22 +4063,22 @@ void
dataset_atomicity(void)
{
hid_t fid; /* HDF5 file ID */
- hid_t acc_tpl; /* File access templates */
- hid_t sid; /* Dataspace ID */
- hid_t dataset1; /* Dataset IDs */
- hsize_t dims[RANK]; /* dataset dim sizes */
- int *write_buf = NULL; /* data buffer */
- int *read_buf = NULL; /* data buffer */
+ hid_t acc_tpl; /* File access templates */
+ hid_t sid; /* Dataspace ID */
+ hid_t dataset1; /* Dataset IDs */
+ hsize_t dims[RANK]; /* dataset dim sizes */
+ int *write_buf = NULL; /* data buffer */
+ int *read_buf = NULL; /* data buffer */
int buf_size;
hid_t dataset2;
- hid_t file_dataspace; /* File dataspace ID */
- hid_t mem_dataspace; /* Memory dataspace ID */
+ hid_t file_dataspace; /* File dataspace ID */
+ hid_t mem_dataspace; /* Memory dataspace ID */
hsize_t start[RANK];
hsize_t stride[RANK];
hsize_t count[RANK];
hsize_t block[RANK];
const char *filename;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
int mpi_size, mpi_rank;
int i, j, k;
hbool_t atomicity = FALSE;
@@ -4082,11 +4088,11 @@ dataset_atomicity(void)
dim0 = 64; dim1 = 32;
filename = GetTestParameters();
if (facc_type != FACC_MPIO) {
- printf("Atomicity tests will not work without the MPIO VFD\n");
+ HDprintf("Atomicity tests will not work without the MPIO VFD\n");
return;
}
if(VERBOSE_MED)
- printf("atomic writes to file %s\n", filename);
+ HDprintf("atomic writes to file %s\n", filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -4094,10 +4100,10 @@ dataset_atomicity(void)
buf_size = dim0 * dim1;
/* allocate memory for data buffer */
- write_buf = (int *)HDcalloc(buf_size, sizeof(int));
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
/* allocate memory for data buffer */
- read_buf = (int *)HDcalloc(buf_size, sizeof(int));
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
/* setup file access template */
@@ -4113,8 +4119,8 @@ dataset_atomicity(void)
VRFY((ret >= 0), "H5Pclose succeeded");
/* setup dimensionality object */
- dims[0] = dim0;
- dims[1] = dim1;
+ dims[0] = (hsize_t)dim0;
+ dims[1] = (hsize_t)dim1;
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -4137,7 +4143,7 @@ dataset_atomicity(void)
H5P_DEFAULT, write_buf);
VRFY((ret >= 0), "H5Dwrite dataset2 succeeded");
}
-
+
ret = H5Dclose(dataset1);
VRFY((ret >= 0), "H5Dclose succeeded");
ret = H5Dclose(dataset2);
@@ -4152,17 +4158,17 @@ dataset_atomicity(void)
/* make sure setting atomicity fails on a serial file ID */
/* file locking allows only one file open (serial) for writing */
if(MAINPROCESS){
- fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
- VRFY((fid >= 0), "H5Fopen succeeed");
+ fid=H5Fopen(filename,H5F_ACC_RDWR,H5P_DEFAULT);
+ VRFY((fid >= 0), "H5Fopen succeeed");
}
/* should fail */
- ret = H5Fset_mpi_atomicity (fid , TRUE);
+ ret = H5Fset_mpi_atomicity(fid , TRUE);
VRFY((ret == FAIL), "H5Fset_mpi_atomicity failed");
if(MAINPROCESS){
- ret = H5Fclose(fid);
- VRFY((ret >= 0), "H5Fclose succeeded");
+ ret = H5Fclose(fid);
+ VRFY((ret >= 0), "H5Fclose succeeded");
}
MPI_Barrier (comm);
@@ -4179,7 +4185,7 @@ dataset_atomicity(void)
ret = H5Pclose(acc_tpl);
VRFY((ret >= 0), "H5Pclose succeeded");
- ret = H5Fset_mpi_atomicity (fid , TRUE);
+ ret = H5Fset_mpi_atomicity(fid , TRUE);
VRFY((ret >= 0), "H5Fset_mpi_atomicity succeeded");
/* open dataset1 (contiguous case) */
@@ -4198,7 +4204,7 @@ dataset_atomicity(void)
}
/* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity (fid , &atomicity);
+ ret = H5Fget_mpi_atomicity(fid , &atomicity);
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
@@ -4218,23 +4224,23 @@ dataset_atomicity(void)
if(VERBOSE_MED) {
i=0;j=0;k=0;
for (i=0 ; i<dim0 ; i++) {
- printf ("\n");
+ HDprintf ("\n");
for (j=0 ; j<dim1 ; j++)
- printf ("%d ", read_buf[k++]);
+ HDprintf ("%d ", read_buf[k++]);
}
}
/* The processes that read the dataset must either read all values
as 0 (read happened before process 0 wrote to dataset 1), or 5
(read happened after process 0 wrote to dataset 1) */
- if (0 != mpi_rank) {
+ if (0 != mpi_rank) {
int compare = read_buf[0];
- VRFY((compare == 0 || compare == 5),
+ VRFY((compare == 0 || compare == 5),
"Atomicity Test Failed Process %d: Value read should be 0 or 5\n");
for (i=1; i<buf_size; i++) {
if (read_buf[i] != compare) {
- printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, i, read_buf[i], compare);
nerrors ++;
}
}
@@ -4252,10 +4258,10 @@ dataset_atomicity(void)
VRFY((dataset2 >= 0), "H5Dopen2 succeeded");
/* allocate memory for data buffer */
- write_buf = (int *)HDcalloc(buf_size, sizeof(int));
+ write_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((write_buf != NULL), "write_buf HDcalloc succeeded");
/* allocate memory for data buffer */
- read_buf = (int *)HDcalloc(buf_size, sizeof(int));
+ read_buf = (int *)HDcalloc((size_t)buf_size, sizeof(int));
VRFY((read_buf != NULL), "read_buf HDcalloc succeeded");
for (i=0 ; i<buf_size ; i++) {
@@ -4267,17 +4273,17 @@ dataset_atomicity(void)
atomicity = FALSE;
/* check that the atomicity flag is set */
- ret = H5Fget_mpi_atomicity (fid , &atomicity);
+ ret = H5Fget_mpi_atomicity(fid , &atomicity);
VRFY((ret >= 0), "atomcity get failed");
VRFY((atomicity == TRUE), "atomcity set failed");
- block[0] = dim0/mpi_size - 1;
- block[1] = dim1/mpi_size - 1;
+ block[0] = (hsize_t)(dim0/mpi_size - 1);
+ block[1] = (hsize_t)(dim1/mpi_size - 1);
stride[0] = block[0] + 1;
stride[1] = block[1] + 1;
- count[0] = mpi_size;
- count[1] = mpi_size;
+ count[0] = (hsize_t)mpi_size;
+ count[1] = (hsize_t)mpi_size;
start[0] = 0;
start[1] = 0;
@@ -4316,41 +4322,43 @@ dataset_atomicity(void)
if (mpi_rank == 1) {
i=0;j=0;k=0;
for (i=0 ; i<dim0 ; i++) {
- printf ("\n");
+ HDprintf ("\n");
for (j=0 ; j<dim1 ; j++)
- printf ("%d ", read_buf[k++]);
+ HDprintf ("%d ", read_buf[k++]);
}
- printf ("\n");
+ HDprintf ("\n");
}
}
/* The processes that read the dataset must either read all values
as 5 (read happened after process 0 wrote to dataset 1) */
- if (0 != mpi_rank) {
+ if (0 != mpi_rank) {
int compare;
i=0;j=0;k=0;
compare = 5;
+ H5_CHECK_OVERFLOW(block[0], hsize_t, int);
+ H5_CHECK_OVERFLOW(block[1], hsize_t, int);
for (i=0 ; i<dim0 ; i++) {
- if (i >= mpi_rank*(block[0]+1)) {
+ if (i >= mpi_rank*((int)block[0]+1)) {
break;
}
- if ((i+1)%(block[0]+1)==0) {
+ if ((i+1)%((int)block[0]+1)==0) {
k += dim1;
continue;
}
for (j=0 ; j<dim1 ; j++) {
- if (j >= mpi_rank*(block[1]+1)) {
- k += dim1 - mpi_rank*(block[1]+1);
+ if (j >= mpi_rank*((int)block[1]+1)) {
+ k += dim1 - mpi_rank*((int)block[1]+1);
break;
}
- if ((j+1)%(block[1]+1)==0) {
+ if ((j+1)%((int)block[1]+1)==0) {
k++;
continue;
}
else if (compare != read_buf[k]) {
- printf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
+ HDprintf("Atomicity Test Failed Process %d: read_buf[%d] is %d, should be %d\n", mpi_rank, k, read_buf[k], compare);
nerrors++;
}
k ++;
@@ -4381,8 +4389,8 @@ dataset_atomicity(void)
* Programmer: Quincey Koziol
* Date: April, 2013
*/
-void
-test_dense_attr(void)
+void
+test_dense_attr(void)
{
int mpi_size, mpi_rank;
hid_t fpid, fid;
@@ -4420,7 +4428,7 @@ test_dense_attr(void)
status = H5Pclose(gpid);
VRFY((status >= 0), "H5Pclose succeeded");
- atFileSpace = H5Screate_simple(1, atDims, NULL);
+ atFileSpace = H5Screate_simple(1, atDims, NULL);
VRFY((atFileSpace > 0), "H5Screate_simple succeeded");
atid = H5Acreate2(gid, "bar", H5T_STD_U64LE, atFileSpace, H5P_DEFAULT, H5P_DEFAULT);
VRFY((atid > 0), "H5Acreate succeeded");
diff --git a/testpar/t_file.c b/testpar/t_file.c
index 1b6978f..19a75c8 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -17,28 +17,29 @@
#include "testphdf5.h"
-#include "H5PBprivate.h"
+#include "H5CXprivate.h" /* API Contexts */
#include "H5Iprivate.h"
+#include "H5PBprivate.h"
/*
* This file needs to access private information from the H5F package.
*/
-#define H5C_FRIEND /*suppress error about including H5Cpkg */
-#include "H5Cpkg.h"
-#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
#include "H5ACpkg.h"
-#define H5MF_FRIEND /*suppress error about including H5MFpkg */
-#include "H5MFpkg.h"
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#include "H5Cpkg.h"
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
#define H5F_TESTING
#include "H5Fpkg.h"
+#define H5MF_FRIEND /*suppress error about including H5MFpkg */
+#include "H5MFpkg.h"
#define NUM_DSETS 5
int mpi_size, mpi_rank;
static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy);
-static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
+static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
hsize_t page_size, size_t page_buffer_size);
/*
@@ -59,15 +60,15 @@ test_split_comm_access(void)
MPI_Info info = MPI_INFO_NULL;
int is_old, mrc;
int newrank, newprocs;
- hid_t fid; /* file IDs */
- hid_t acc_tpl; /* File access properties */
- herr_t ret; /* generic return value */
+ hid_t fid; /* file IDs */
+ hid_t acc_tpl; /* File access properties */
+ herr_t ret; /* generic return value */
const char *filename;
filename = (const char *)GetTestParameters();
if (VERBOSE_MED)
- printf("Split Communicator access test on file %s\n",
- filename);
+ HDprintf("Split Communicator access test on file %s\n",
+ filename);
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
@@ -79,35 +80,35 @@ test_split_comm_access(void)
MPI_Comm_rank(comm,&newrank);
if (is_old){
- /* odd-rank processes */
- mrc = MPI_Barrier(comm);
- VRFY((mrc==MPI_SUCCESS), "");
+ /* odd-rank processes */
+ mrc = MPI_Barrier(comm);
+ VRFY((mrc==MPI_SUCCESS), "");
}else{
- /* even-rank processes */
- int sub_mpi_rank; /* rank in the sub-comm */
- MPI_Comm_rank(comm,&sub_mpi_rank);
-
- /* setup file access template */
- acc_tpl = create_faccess_plist(comm, info, facc_type);
- VRFY((acc_tpl >= 0), "");
-
- /* create the file collectively */
- fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
- VRFY((fid >= 0), "H5Fcreate succeeded");
-
- /* Release file-access template */
- ret=H5Pclose(acc_tpl);
- VRFY((ret >= 0), "");
-
- /* close the file */
- ret=H5Fclose(fid);
- VRFY((ret >= 0), "");
-
- /* delete the test file */
- if (sub_mpi_rank == 0){
- mrc = MPI_File_delete((char *)filename, info);
- /*VRFY((mrc==MPI_SUCCESS), ""); */
- }
+ /* even-rank processes */
+ int sub_mpi_rank; /* rank in the sub-comm */
+ MPI_Comm_rank(comm,&sub_mpi_rank);
+
+ /* setup file access template */
+ acc_tpl = create_faccess_plist(comm, info, facc_type);
+ VRFY((acc_tpl >= 0), "");
+
+ /* create the file collectively */
+ fid=H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_tpl);
+ VRFY((fid >= 0), "H5Fcreate succeeded");
+
+ /* Release file-access template */
+ ret=H5Pclose(acc_tpl);
+ VRFY((ret >= 0), "");
+
+ /* close the file */
+ ret=H5Fclose(fid);
+ VRFY((ret >= 0), "");
+
+ /* delete the test file */
+ if (sub_mpi_rank == 0){
+ mrc = MPI_File_delete((char *)filename, info);
+ /*VRFY((mrc==MPI_SUCCESS), ""); */
+ }
}
mrc = MPI_Comm_free(&comm);
VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free succeeded");
@@ -119,15 +120,15 @@ void
test_page_buffer_access(void)
{
hid_t file_id = -1; /* File ID */
- hid_t fcpl, fapl, fapl_self;
- hid_t dxpl_id = H5P_DATASET_XFER_DEFAULT;
+ hid_t fcpl, fapl;
size_t page_count = 0;
int i, num_elements = 200;
haddr_t raw_addr, meta_addr;
int *data;
H5F_t *f = NULL;
- herr_t ret; /* generic return value */
+ herr_t ret; /* generic return value */
const char *filename;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -135,7 +136,7 @@ test_page_buffer_access(void)
filename = (const char *)GetTestParameters();
if (VERBOSE_MED)
- printf("Page Buffer Usage in Parallel %s\n", filename);
+ HDprintf("Page Buffer Usage in Parallel %s\n", filename);
fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
VRFY((fapl >= 0), "create_faccess_plist succeeded");
@@ -144,7 +145,7 @@ test_page_buffer_access(void)
ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
VRFY((ret == 0), "");
- ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*100);
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*128);
VRFY((ret == 0), "");
ret = H5Pset_page_buffer_size(fapl, sizeof(int)*100000, 0, 0);
VRFY((ret == 0), "");
@@ -177,12 +178,8 @@ test_page_buffer_access(void)
/* intialize all the elements to have a value of -1 */
for(i=0 ; i<num_elements ; i++)
data[i] = -1;
-
- /* MSC - why this stopped working ? */
-#if 0
if(MAINPROCESS) {
- hid_t fapl_self;
-
+ hid_t fapl_self = H5I_INVALID_HID;
fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
ret = H5Pset_page_buffer_size(fapl_self, sizeof(int)*1000, 0, 0);
@@ -194,25 +191,30 @@ test_page_buffer_access(void)
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self);
VRFY((file_id >= 0), "");
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
/* Get a pointer to the internal file object */
f = (H5F_t *)H5I_object(file_id);
VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
/* allocate space for 200 raw elements */
- raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements);
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements);
VRFY((raw_addr != HADDR_UNDEF), "");
/* allocate space for 200 metadata elements */
- meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements);
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int)*(size_t)num_elements);
VRFY((meta_addr != HADDR_UNDEF), "");
page_count = 0;
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data);
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
@@ -220,10 +222,10 @@ test_page_buffer_access(void)
/* update the first 50 elements */
for(i=0 ; i<50 ; i++)
data[i] = i;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
H5Eprint2(H5E_DEFAULT, stderr);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
page_count += 2;
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
@@ -231,43 +233,43 @@ test_page_buffer_access(void)
/* update the second 50 elements */
for(i=0 ; i<50 ; i++)
data[i] = i+50;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update 100 - 200 */
for(i=0 ; i<100 ; i++)
data[i] = i+100;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- ret = H5PB_flush(f, dxpl_id, FALSE);
+ ret = H5PB_flush(f->shared);
VRFY((ret == 0), "");
/* read elements 0 - 200 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
for (i=0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
for (i=0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
/* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
for (i=0; i < 50; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
for (i=0; i < 50; i++)
@@ -278,8 +280,10 @@ test_page_buffer_access(void)
VRFY((ret >= 0), "H5Fclose succeeded");
ret = H5Pclose(fapl_self);
VRFY((ret>=0), "H5Pclose succeeded");
+
+ /* Pop API context */
+ if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
}
-#endif
MPI_Barrier(MPI_COMM_WORLD);
@@ -293,23 +297,28 @@ test_page_buffer_access(void)
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
VRFY((file_id >= 0), "");
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
/* Get a pointer to the internal file object */
f = (H5F_t *)H5I_object(file_id);
VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
/* allocate space for 200 raw elements */
- raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements);
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, sizeof(int)*(size_t)num_elements);
VRFY((raw_addr != HADDR_UNDEF), "");
/* allocate space for 200 metadata elements */
- meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements);
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, sizeof(int)*(size_t)num_elements);
VRFY((meta_addr != HADDR_UNDEF), "");
page_count = 0;
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
@@ -317,27 +326,27 @@ test_page_buffer_access(void)
/* update the first 50 elements */
for(i=0 ; i<50 ; i++)
data[i] = i;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update the second 50 elements */
for(i=0 ; i<50 ; i++)
data[i] = i+50;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* update 100 - 200 */
for(i=0 ; i<100 ; i++)
data[i] = i+100;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, data);
VRFY((ret == 0), "");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
@@ -345,24 +354,24 @@ test_page_buffer_access(void)
VRFY((ret == 0), "");
/* read elements 0 - 200 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
for (i=0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
for (i=0; i < 200; i++)
VRFY((data[i] == i), "Read different values than written");
/* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
for (i=0; i < 50; i++)
VRFY((data[i] == i), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
page_count += 1;
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
@@ -373,23 +382,23 @@ test_page_buffer_access(void)
/* reset the first 50 elements to -1*/
for(i=0 ; i<50 ; i++)
data[i] = -1;
- ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
/* read elements 0 - 50 */
- ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
for (i=0; i < 50; i++)
VRFY((data[i] == -1), "Read different values than written");
- ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, data);
VRFY((ret == 0), "");
VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
- for (i=0; i < 50; i++)
+ for (i=0; i < 50; i++)
VRFY((data[i] == -1), "Read different values than written");
/* close the file */
@@ -402,6 +411,9 @@ test_page_buffer_access(void)
ret = H5Pclose(fcpl);
VRFY((ret>=0), "H5Pclose succeeded");
+ /* Pop API context */
+ if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
+
HDfree(data);
data = NULL;
MPI_Barrier(MPI_COMM_WORLD);
@@ -420,10 +432,11 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
hsize_t dims[RANK], i;
hsize_t num_elements;
int k;
- char dset_name[10];
+ char dset_name[20];
H5F_t *f = NULL;
H5C_t *cache_ptr = NULL;
H5AC_cache_config_t config;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
herr_t ret;
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
@@ -432,6 +445,11 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((ret == 0), "");
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
f = (H5F_t *)H5I_object(file_id);
VRFY((f != NULL), "");
@@ -453,19 +471,19 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((grp_id >= 0), "");
- dims[0] = ROW_FACTOR*mpi_size;
- dims[1] = COL_FACTOR*mpi_size;
+ dims[0] = (hsize_t)(ROW_FACTOR*mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR*mpi_size);
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/mpi_size;
+ block[0] = dims[0]/(hsize_t)mpi_size;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank*block[0];
+ start[0] = (hsize_t)mpi_rank*block[0];
start[1] = 0;
num_elements = block[0] * block[1];
@@ -484,28 +502,28 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
VRFY((mem_dataspace >= 0), "");
for(k=0 ; k<NUM_DSETS; k++) {
- sprintf(dset_name, "D1dset%d", k);
+ HDsprintf(dset_name, "D1dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- sprintf(dset_name, "D2dset%d", k);
+ HDsprintf(dset_name, "D2dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- sprintf(dset_name, "D3dset%d", k);
+ HDsprintf(dset_name, "D3dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
ret = H5Dclose(dset_id);
VRFY((ret == 0), "");
- sprintf(dset_name, "dset%d", k);
+ HDsprintf(dset_name, "dset%d", k);
dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -529,13 +547,13 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
for (i=0; i < num_elements; i++)
VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
- sprintf(dset_name, "D1dset%d", k);
+ HDsprintf(dset_name, "D1dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- sprintf(dset_name, "D2dset%d", k);
+ HDsprintf(dset_name, "D2dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
- sprintf(dset_name, "D3dset%d", k);
+ HDsprintf(dset_name, "D3dset%d", k);
ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
VRFY((ret == 0), "");
}
@@ -549,6 +567,9 @@ create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_str
ret = H5Sclose(mem_dataspace);
VRFY((ret == 0), "");
+ /* Pop API context */
+ if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
+
MPI_Barrier(MPI_COMM_WORLD);
HDfree(data_array);
return 0;
@@ -568,10 +589,11 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
hsize_t block[RANK];
int i, k, ndims;
hsize_t num_elements;
- char dset_name[10];
+ char dset_name[20];
H5F_t *f = NULL;
H5C_t *cache_ptr = NULL;
H5AC_cache_config_t config;
+ hbool_t api_ctx_pushed = FALSE; /* Whether API context pushed */
herr_t ret;
config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
@@ -587,6 +609,11 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
H5Eprint2(H5E_DEFAULT, stderr);
VRFY((file_id >= 0), "");
+ /* Push API context */
+ ret = H5CX_push();
+ VRFY((ret == 0), "H5CX_push()");
+ api_ctx_pushed = TRUE;
+
ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((ret == 0), "");
@@ -605,17 +632,17 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT);
VRFY((grp_id >= 0), "");
- dims[0] = ROW_FACTOR*mpi_size;
- dims[1] = COL_FACTOR*mpi_size;
+ dims[0] = (hsize_t)(ROW_FACTOR*mpi_size);
+ dims[1] = (hsize_t)(COL_FACTOR*mpi_size);
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/mpi_size;
+ block[0] = dims[0]/(hsize_t)mpi_size;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank*block[0];
+ start[0] = (hsize_t)mpi_rank*block[0];
start[1] = 0;
num_elements = block[0] * block[1];
@@ -628,7 +655,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
VRFY((mem_dataspace >= 0), "");
for(k=0 ; k<NUM_DSETS; k++) {
- sprintf(dset_name, "dset%d", k);
+ HDsprintf(dset_name, "dset%d", k);
dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
VRFY((dset_id >= 0), "");
@@ -637,8 +664,8 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
- VRFY(dims[0] == ROW_FACTOR*mpi_size, "Wrong dataset dimensions");
- VRFY(dims[1] == COL_FACTOR*mpi_size, "Wrong dataset dimensions");
+ VRFY(dims[0] == (hsize_t)(ROW_FACTOR*mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[1] == (hsize_t)(COL_FACTOR*mpi_size), "Wrong dataset dimensions");
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
@@ -651,7 +678,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
ret = H5Sclose(sid);
VRFY((ret == 0), "");
- for (i=0; i < num_elements; i++)
+ for (i=0; i < (int)num_elements; i++)
VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
}
@@ -677,7 +704,7 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
HDassert(entry_ptr->is_dirty == FALSE);
if(!entry_ptr->is_pinned && !entry_ptr->is_protected) {
- ret = H5AC_expunge_entry(f, H5AC_ind_read_dxpl_id, entry_ptr->type, entry_ptr->addr, 0);
+ ret = H5AC_expunge_entry(f, entry_ptr->type, entry_ptr->addr, 0);
VRFY((ret == 0), "");
}
@@ -699,37 +726,116 @@ open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
VRFY((ret == 0), "");
ret = H5Sclose(mem_dataspace);
VRFY((ret == 0), "");
+
+ /* Pop API context */
+ if(api_ctx_pushed) { ret = H5CX_pop(); VRFY((ret == 0), "H5CX_pop()"); api_ctx_pushed = FALSE; }
+
HDfree(data_array);
return nerrors;
}
+/*
+ * NOTE: See HDFFV-10894 and add tests later to verify MPI-specific properties in the
+ * incoming fapl that could conflict with the existing values in H5F_shared_t on
+ * multiple opens of the same file.
+ */
void
test_file_properties(void)
{
- hid_t fid; /* HDF5 file ID */
- hid_t fapl_id; /* File access plist */
+ hid_t fid = H5I_INVALID_HID; /* HDF5 file ID */
+ hid_t fapl_id = H5I_INVALID_HID; /* File access plist */
+ hid_t fapl_copy_id = H5I_INVALID_HID; /* File access plist */
hbool_t is_coll;
+ htri_t are_equal;
const char *filename;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
+ MPI_Comm comm_out = MPI_COMM_NULL;
+ MPI_Info info_out = MPI_INFO_NULL;
herr_t ret; /* Generic return value */
+ int mpi_ret; /* MPI return value */
+ int cmp; /* Compare value */
filename = (const char *)GetTestParameters();
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ mpi_ret = MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ VRFY((mpi_ret >= 0), "MPI_Comm_size succeeded");
+ mpi_ret = MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ VRFY((mpi_ret >= 0), "MPI_Comm_rank succeeded");
+ mpi_ret = MPI_Info_create(&info);
+ VRFY((mpi_ret >= 0), "MPI_Info_create succeeded");
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop1", "xyz");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
/* setup file access plist */
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id >= 0), "H5Pcreate");
+ VRFY((fapl_id != H5I_INVALID_HID), "H5Pcreate");
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_fapl_mpio");
+ /* Check getting and setting MPI properties
+ * (for use in VOL connectors, not the MPI-I/O VFD)
+ */
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ ret = H5Pget_mpi_params(fapl_id, &comm_out, &info_out);
+ VRFY((ret >= 0), "H5Pget_mpi_params succeeded");
+
+ /* Check the communicator */
+ VRFY((comm != comm_out), "Communicators should not be bitwise identical");
+ cmp = MPI_UNEQUAL;
+ mpi_ret = MPI_Comm_compare(comm, comm_out, &cmp);
+ VRFY((ret >= 0), "MPI_Comm_compare succeeded");
+ VRFY((cmp == MPI_CONGRUENT), "Communicators should be congruent via MPI_Comm_compare");
+
+ /* Check the info object */
+ VRFY((info != info_out), "Info objects should not be bitwise identical");
+
+ /* Free the obtained comm and info object */
+ mpi_ret = MPI_Comm_free(&comm_out);
+ VRFY((mpi_ret >= 0), "MPI_Comm_free succeeded");
+ mpi_ret = MPI_Info_free(&info_out);
+ VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
+
+ /* Copy the fapl and ensure it's equal to the original */
+ fapl_copy_id = H5Pcopy(fapl_id);
+ VRFY((fapl_copy_id != H5I_INVALID_HID), "H5Pcopy");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((TRUE == are_equal), "H5Pequal");
+
+ /* Add a property to the copy and ensure it's different now */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_copy_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((FALSE == are_equal), "H5Pequal");
+
+ /* Add a property with the same key but a different value to the original
+ * and ensure they are still different.
+ */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "ijk");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((FALSE == are_equal), "H5Pequal");
+
+ /* Set the second property in the original to the same
+ * value as the copy and ensure they are the same now.
+ */
+ mpi_ret = MPI_Info_set(info, "hdf_info_prop2", "abc");
+ VRFY((mpi_ret == MPI_SUCCESS), "MPI_Info_set");
+ ret = H5Pset_mpi_params(fapl_id, comm, info);
+ VRFY((ret >= 0), "H5Pset_mpi_params succeeded");
+ are_equal = H5Pequal(fapl_id, fapl_copy_id);
+ VRFY((TRUE == are_equal), "H5Pequal");
+
/* create the file */
fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- VRFY((fid >= 0), "H5Fcreate succeeded");
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
/* verify settings for file access properties */
@@ -750,7 +856,7 @@ test_file_properties(void)
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
- VRFY((fid >= 0), "H5Fcreate succeeded");
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
/* verify settings for file access properties */
@@ -767,7 +873,7 @@ test_file_properties(void)
ret = H5Fclose(fid);
VRFY((ret >= 0), "H5Fclose succeeded");
- /* Open the file with the MPI-IO driver w collective settings */
+ /* Open the file with the MPI-IO driver w/ collective settings */
ret = H5Pset_fapl_mpio(fapl_id, comm, info);
VRFY((ret >= 0), "H5Pset_fapl_mpio failed");
/* Collective metadata writes */
@@ -777,7 +883,7 @@ test_file_properties(void)
ret = H5Pset_all_coll_metadata_ops(fapl_id, TRUE);
VRFY((ret >= 0), "H5Pget_all_coll_metadata_ops succeeded");
fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
- VRFY((fid >= 0), "H5Fcreate succeeded");
+ VRFY((fid != H5I_INVALID_HID), "H5Fcreate succeeded");
/* verify settings for file access properties */
@@ -794,10 +900,10 @@ test_file_properties(void)
/* close fapl and retrieve it from file */
ret = H5Pclose(fapl_id);
VRFY((ret >= 0), "H5Pclose succeeded");
- fapl_id = -1;
+ fapl_id = H5I_INVALID_HID;
fapl_id = H5Fget_access_plist(fid);
- VRFY((fapl_id >= 0), "H5P_FILE_ACCESS");
+ VRFY((fapl_id != H5I_INVALID_HID), "H5P_FILE_ACCESS");
/* verify settings for file access properties */
@@ -818,5 +924,12 @@ test_file_properties(void)
/* Release file-access plist */
ret = H5Pclose(fapl_id);
VRFY((ret >= 0), "H5Pclose succeeded");
+ ret = H5Pclose(fapl_copy_id);
+ VRFY((ret >= 0), "H5Pclose succeeded");
+
+ /* Free the MPI info object */
+ mpi_ret = MPI_Info_free(&info);
+ VRFY((mpi_ret >= 0), "MPI_Info_free succeeded");
+
} /* end test_file_properties() */
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
index 62db11a..81bb7c2 100644
--- a/testpar/t_file_image.c
+++ b/testpar/t_file_image.c
@@ -21,11 +21,11 @@
*
* Process zero:
*
- * 1) Creates a core file with an integer vector data set of
- * length n (= mpi_size),
+ * 1) Creates a core file with an integer vector data set of
+ * length n (= mpi_size),
*
- * 2) Initializes the vector to zero in * location 0, and to -1
- * everywhere else.
+ * 2) Initializes the vector to zero in * location 0, and to -1
+ * everywhere else.
*
* 3) Flushes the core file, and gets an image of it. Closes
* the core file.
@@ -35,7 +35,7 @@
* 5) Awaits receipt on a file image from process n-1.
*
* 6) opens the image received from process n-1, verifies that
- * it contains a vector of length equal to mpi_size, and
+ * it contains a vector of length equal to mpi_size, and
* that the vector contains (0, 1, 2, ... n-1)
*
* 7) closes the core file and exits.
@@ -45,7 +45,7 @@
* 1) Await receipt of file image from process (i - 1).
*
* 2) Open the image with the core file driver, verify that i
- * contains a vector v of length, and that v[j] = j for
+ * contains a vector v of length, and that v[j] = j for
* 0 <= j < i, and that v[j] == -1 for i <= j < n
*
* 3) Set v[i] = i in the core file.
@@ -87,13 +87,13 @@ file_image_daisy_chain_test(void)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* setup file name */
- HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5",
+ HDsnprintf(file_name, 1024, "file_image_daisy_chain_test_%05d.h5",
(int)mpi_rank);
if(mpi_rank == 0) {
-
- /* 1) Creates a core file with an integer vector data set
- * of length mpi_size,
+
+ /* 1) Creates a core file with an integer vector data set
+ * of length mpi_size,
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl_id >= 0), "creating fapl");
@@ -111,10 +111,10 @@ file_image_daisy_chain_test(void)
dset_id = H5Dcreate2(file_id, "v", H5T_NATIVE_INT, space_id,
H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dset_id >= 0), "created data set");
-
- /* 2) Initialize the vector to zero in location 0, and
- * to -1 everywhere else.
+
+ /* 2) Initialize the vector to zero in location 0, and
+ * to -1 everywhere else.
*/
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
@@ -131,7 +131,7 @@ file_image_daisy_chain_test(void)
HDfree(vector_ptr);
vector_ptr = NULL;
-
+
/* 3) Flush the core file, and get an image of it. Close
* the core file.
*/
@@ -159,14 +159,14 @@ file_image_daisy_chain_test(void)
err = H5Pclose(fapl_id);
VRFY((err >= 0), "closed fapl(1).");
-
+
/* 4) Send the image to process 1. */
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
MPI_BYTE, 1, 0, MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), "sent image size to process 1");
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
MPI_BYTE, 1, 0, MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), "sent image to process 1");
@@ -190,9 +190,9 @@ file_image_daisy_chain_test(void)
&rcvstat);
VRFY((mpi_result == MPI_SUCCESS), \
"received file image from process n-1");
-
+
/* 6) open the image received from process n-1, verify that
- * it contains a vector of length equal to mpi_size, and
+ * it contains a vector of length equal to mpi_size, and
* that the vector contains (0, 1, 2, ... n-1).
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -229,7 +229,7 @@ file_image_daisy_chain_test(void)
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
@@ -238,7 +238,7 @@ file_image_daisy_chain_test(void)
if(vector_ptr[i] != i)
vector_ok = FALSE;
VRFY((vector_ok), "verified received vector.");
-
+
HDfree(vector_ptr);
vector_ptr = NULL;
@@ -276,9 +276,9 @@ file_image_daisy_chain_test(void)
&rcvstat);
VRFY((mpi_result == MPI_SUCCESS), \
"received file image from process mpi_rank-1");
-
+
/* 2) Open the image with the core file driver, verify that it
- * contains a vector v of length, and that v[j] = j for
+ * contains a vector v of length, and that v[j] = j for
* 0 <= j < i, and that v[j] == -1 for i <= j < n
*/
fapl_id = H5Pcreate(H5P_FILE_ACCESS);
@@ -316,7 +316,7 @@ file_image_daisy_chain_test(void)
vector_ptr = (int *)HDmalloc((size_t)(mpi_size) * sizeof(int));
VRFY((vector_ptr != NULL), "allocated in memory rep of vector");
- err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ err = H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
H5P_DEFAULT, (void *)vector_ptr);
VRFY((err >= 0), "read received vector.");
@@ -331,7 +331,7 @@ file_image_daisy_chain_test(void)
}
}
VRFY((vector_ok), "verified received vector.");
-
+
/* 3) Set v[i] = i in the core file. */
@@ -344,7 +344,7 @@ file_image_daisy_chain_test(void)
HDfree(vector_ptr);
vector_ptr = NULL;
-
+
/* 4) Flush the core file and send it to process (mpi_rank + 1) % n. */
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
@@ -359,14 +359,14 @@ file_image_daisy_chain_test(void)
bytes_read = H5Fget_file_image(file_id, image_ptr, (size_t)image_len);
VRFY(bytes_read == image_len, "wrote file into image buffer");
- mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
- MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ mpi_result = MPI_Ssend((void *)(&image_len), (int)sizeof(ssize_t),
+ MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), \
"sent image size to process (mpi_rank + 1) % mpi_size");
- mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
- MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
+ mpi_result = MPI_Ssend((void *)image_ptr, (int)image_len,
+ MPI_BYTE, (mpi_rank + 1) % mpi_size, 0,
MPI_COMM_WORLD);
VRFY((mpi_result == MPI_SUCCESS), \
"sent image to process (mpi_rank + 1) % mpi_size");
@@ -374,7 +374,7 @@ file_image_daisy_chain_test(void)
HDfree(image_ptr);
image_ptr = NULL;
image_len = 0;
-
+
/* 5) close the core file and exit. */
err = H5Sclose(space_id);
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 4556b01..7b0e677 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -37,9 +37,9 @@ static int mpi_size, mpi_rank;
#define HS_DIM1 200
#define HS_DIM2 100
-
+
/*-------------------------------------------------------------------------
- * Function: filter_read_internal
+ * Function: filter_read_internal
*
* Purpose: Tests parallel reading of a 2D dataset written serially using
* filters. During the parallel reading phase, the dataset is
@@ -54,13 +54,13 @@ static void
filter_read_internal(const char *filename, hid_t dcpl,
hsize_t *dset_size)
{
- hid_t file, dataset; /* HDF5 IDs */
- hid_t access_plist; /* Access property list ID */
- hid_t sid, memspace; /* Dataspace IDs */
- hsize_t size[2]; /* Dataspace dimensions */
- hsize_t hs_offset[2]; /* Hyperslab offset */
- hsize_t hs_size[2]; /* Hyperslab size */
- size_t i, j; /* Local index variables */
+ hid_t file, dataset; /* HDF5 IDs */
+ hid_t access_plist; /* Access property list ID */
+ hid_t sid, memspace; /* Dataspace IDs */
+ hsize_t size[2]; /* Dataspace dimensions */
+ hsize_t hs_offset[2]; /* Hyperslab offset */
+ hsize_t hs_size[2]; /* Hyperslab size */
+ size_t i, j; /* Local index variables */
char name[32] = "dataset";
herr_t hrc; /* Error status */
int *points = NULL; /* Writing buffer for entire dataset */
@@ -74,10 +74,10 @@ filter_read_internal(const char *filename, hid_t dcpl,
hs_size[0] = size[0] = HS_DIM1;
hs_size[1] = HS_DIM2;
- size[1] = hs_size[1] * mpi_size;
+ size[1] = hs_size[1] * (hsize_t)mpi_size;
hs_offset[0] = 0;
- hs_offset[1] = hs_size[1] * mpi_rank;
+ hs_offset[1] = hs_size[1] * (hsize_t)mpi_rank;
/* Create the data space */
sid = H5Screate_simple(2, size, NULL);
@@ -151,17 +151,17 @@ filter_read_internal(const char *filename, hid_t dcpl,
for (j=0; j<hs_size[1]; j++) {
if(points[i*size[1]+(size_t)hs_offset[1]+j] !=
check[i*hs_size[1]+j]) {
- fprintf(stderr," Read different values than written.\n");
- fprintf(stderr," At index %lu,%lu\n",
- (unsigned long)(i),
- (unsigned long)(hs_offset[1]+j));
- fprintf(stderr," At original: %d\n",
- (int)points[i*size[1]+(size_t)hs_offset[1]+j]);
- fprintf(stderr," At returned: %d\n",
- (int)check[i*hs_size[1]+j]);
+ HDfprintf(stderr," Read different values than written.\n");
+ HDfprintf(stderr," At index %lu,%lu\n",
+ (unsigned long)(i),
+ (unsigned long)(hs_offset[1]+j));
+ HDfprintf(stderr," At original: %d\n",
+ (int)points[i*size[1]+(size_t)hs_offset[1]+j]);
+ HDfprintf(stderr," At returned: %d\n",
+ (int)check[i*hs_size[1]+j]);
VRFY(FALSE, "");
- }
- }
+ }
+ }
}
/* Get the storage size of the dataset */
@@ -194,10 +194,10 @@ filter_read_internal(const char *filename, hid_t dcpl,
/*-------------------------------------------------------------------------
* Function: test_filter_read
*
- * Purpose: Tests parallel reading of datasets written serially using
+ * Purpose: Tests parallel reading of datasets written serially using
* several (combinations of) filters.
*
- * Programmer: Christian Chilan
+ * Programmer: Christian Chilan
* Tuesday, May 15, 2007
*
* Modifications:
@@ -208,14 +208,16 @@ filter_read_internal(const char *filename, hid_t dcpl,
void
test_filter_read(void)
{
- hid_t dc; /* HDF5 IDs */
+ hid_t dc; /* HDF5 IDs */
const hsize_t chunk_size[2] = {CHUNK_DIM1, CHUNK_DIM2}; /* Chunk dimensions */
hsize_t null_size; /* Size of dataset without filters */
unsigned chunk_opts; /* Chunk options */
unsigned disable_partial_chunk_filters; /* Whether filters are disabled on partial chunks */
herr_t hrc;
const char *filename;
+#ifdef H5_HAVE_FILTER_FLETCHER32
hsize_t fletcher32_size; /* Size of dataset with Fletcher32 checksum */
+#endif
#ifdef H5_HAVE_FILTER_DEFLATE
hsize_t deflate_size; /* Size of dataset with deflate filter */
@@ -236,7 +238,7 @@ test_filter_read(void)
filename = GetTestParameters();
if(VERBOSE_MED)
- printf("Parallel reading of dataset written with filters %s\n", filename);
+ HDprintf("Parallel reading of dataset written with filters %s\n", filename);
/*----------------------------------------------------------
* STEP 0: Test without filters.
@@ -448,10 +450,10 @@ test_filter_read(void)
/* Make sure encoding is enabled */
if(h5_szip_can_encode() == 1) {
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
VRFY(hrc>=0, "H5Pset_szip");
- filter_read_internal(filename,dc,&combo_size);
+ filter_read_internal(filename,dc,&combo_size);
}
/* Clean up objects used for this test */
@@ -461,25 +463,25 @@ test_filter_read(void)
/* Testing shuffle+szip(with encoder)+checksum filters(checksum last) */
/* Make sure encoding is enabled */
if(h5_szip_can_encode() == 1) {
- dc = H5Pcreate(H5P_DATASET_CREATE);
+ dc = H5Pcreate(H5P_DATASET_CREATE);
VRFY(dc>=0, "H5Pcreate");
- hrc = H5Pset_chunk (dc, 2, chunk_size);
+ hrc = H5Pset_chunk (dc, 2, chunk_size);
VRFY(hrc>=0, "H5Pset_chunk");
- hrc = H5Pset_shuffle (dc);
+ hrc = H5Pset_shuffle (dc);
VRFY(hrc>=0, "H5Pset_shuffle");
- hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
+ hrc = H5Pset_szip(dc, szip_options_mask, szip_pixels_per_block);
VRFY(hrc>=0, "H5Pset_szip");
- hrc = H5Pset_fletcher32 (dc);
+ hrc = H5Pset_fletcher32 (dc);
VRFY(hrc>=0, "H5Pset_fletcher32");
- filter_read_internal(filename,dc,&combo_size);
+ filter_read_internal(filename,dc,&combo_size);
- /* Clean up objects used for this test */
- hrc = H5Pclose (dc);
+ /* Clean up objects used for this test */
+ hrc = H5Pclose (dc);
VRFY(hrc>=0, "H5Pclose");
}
diff --git a/testpar/t_filters_parallel.c b/testpar/t_filters_parallel.c
new file mode 100644
index 0000000..76f9276
--- /dev/null
+++ b/testpar/t_filters_parallel.c
@@ -0,0 +1,6065 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Jordan Henderson
+ * 01/31/2017
+ *
+ * This file contains tests for writing to and reading from
+ * datasets in parallel with filters applied to the data.
+ */
+
+#include "t_filters_parallel.h"
+
+const char *FILENAME[] = {
+ "t_filters_parallel",
+ NULL
+};
+char filenames[1][256];
+
+int nerrors = 0;
+
+size_t cur_filter_idx = 0;
+#define GZIP_INDEX 0
+#define FLETCHER32_INDEX 1
+
+#define ARRAY_SIZE(a) sizeof(a) / sizeof(a[0])
+
+/*
+ * Used to check if a filter is available before running a test.
+ */
+#define CHECK_CUR_FILTER_AVAIL() \
+{ \
+ htri_t filter_is_avail; \
+ \
+ if (cur_filter_idx == GZIP_INDEX) { \
+ if ((filter_is_avail = H5Zfilter_avail(H5Z_FILTER_DEFLATE)) != TRUE) { \
+ if (MAINPROCESS) { \
+ HDputs(" - SKIPPED - Deflate filter not available"); \
+ } \
+ return; \
+ } \
+ } \
+}
+
+static herr_t set_dcpl_filter(hid_t dcpl);
+
+#if MPI_VERSION >= 3
+/* Tests for writing data in parallel */
+static void test_write_one_chunk_filtered_dataset(void);
+static void test_write_filtered_dataset_no_overlap(void);
+static void test_write_filtered_dataset_overlap(void);
+static void test_write_filtered_dataset_single_no_selection(void);
+static void test_write_filtered_dataset_all_no_selection(void);
+static void test_write_filtered_dataset_point_selection(void);
+static void test_write_filtered_dataset_interleaved_write(void);
+static void test_write_3d_filtered_dataset_no_overlap_separate_pages(void);
+static void test_write_3d_filtered_dataset_no_overlap_same_pages(void);
+static void test_write_3d_filtered_dataset_overlap(void);
+static void test_write_cmpd_filtered_dataset_no_conversion_unshared(void);
+static void test_write_cmpd_filtered_dataset_no_conversion_shared(void);
+static void test_write_cmpd_filtered_dataset_type_conversion_unshared(void);
+static void test_write_cmpd_filtered_dataset_type_conversion_shared(void);
+#endif
+
+/* Tests for reading data in parallel */
+static void test_read_one_chunk_filtered_dataset(void);
+static void test_read_filtered_dataset_no_overlap(void);
+static void test_read_filtered_dataset_overlap(void);
+static void test_read_filtered_dataset_single_no_selection(void);
+static void test_read_filtered_dataset_all_no_selection(void);
+static void test_read_filtered_dataset_point_selection(void);
+static void test_read_filtered_dataset_interleaved_read(void);
+static void test_read_3d_filtered_dataset_no_overlap_separate_pages(void);
+static void test_read_3d_filtered_dataset_no_overlap_same_pages(void);
+static void test_read_3d_filtered_dataset_overlap(void);
+static void test_read_cmpd_filtered_dataset_no_conversion_unshared(void);
+static void test_read_cmpd_filtered_dataset_no_conversion_shared(void);
+static void test_read_cmpd_filtered_dataset_type_conversion_unshared(void);
+static void test_read_cmpd_filtered_dataset_type_conversion_shared(void);
+
+#if MPI_VERSION >= 3
+/* Other miscellaneous tests */
+static void test_shrinking_growing_chunks(void);
+#endif
+
+/*
+ * Tests for attempting to round-trip the data going from
+ *
+ * written serially -> read in parallel
+ *
+ * and
+ *
+ * written in parallel -> read serially
+ */
+static void test_write_serial_read_parallel(void);
+#if MPI_VERSION >= 3
+static void test_write_parallel_read_serial(void);
+#endif
+
+static MPI_Comm comm = MPI_COMM_WORLD;
+static MPI_Info info = MPI_INFO_NULL;
+static int mpi_rank;
+static int mpi_size;
+
+static void (*tests[])(void) = {
+#if MPI_VERSION >= 3
+ test_write_one_chunk_filtered_dataset,
+ test_write_filtered_dataset_no_overlap,
+ test_write_filtered_dataset_overlap,
+ test_write_filtered_dataset_single_no_selection,
+ test_write_filtered_dataset_all_no_selection,
+ test_write_filtered_dataset_point_selection,
+ test_write_filtered_dataset_interleaved_write,
+ test_write_3d_filtered_dataset_no_overlap_separate_pages,
+ test_write_3d_filtered_dataset_no_overlap_same_pages,
+ test_write_3d_filtered_dataset_overlap,
+ test_write_cmpd_filtered_dataset_no_conversion_unshared,
+ test_write_cmpd_filtered_dataset_no_conversion_shared,
+ test_write_cmpd_filtered_dataset_type_conversion_unshared,
+ test_write_cmpd_filtered_dataset_type_conversion_shared,
+#endif
+ test_read_one_chunk_filtered_dataset,
+ test_read_filtered_dataset_no_overlap,
+ test_read_filtered_dataset_overlap,
+ test_read_filtered_dataset_single_no_selection,
+ test_read_filtered_dataset_all_no_selection,
+ test_read_filtered_dataset_point_selection,
+ test_read_filtered_dataset_interleaved_read,
+ test_read_3d_filtered_dataset_no_overlap_separate_pages,
+ test_read_3d_filtered_dataset_no_overlap_same_pages,
+ test_read_3d_filtered_dataset_overlap,
+ test_read_cmpd_filtered_dataset_no_conversion_unshared,
+ test_read_cmpd_filtered_dataset_no_conversion_shared,
+ test_read_cmpd_filtered_dataset_type_conversion_unshared,
+ test_read_cmpd_filtered_dataset_type_conversion_shared,
+ test_write_serial_read_parallel,
+#if MPI_VERSION >= 3
+ test_write_parallel_read_serial,
+ test_shrinking_growing_chunks,
+#endif
+};
+
+/*
+ * Function to call the appropriate HDF5 filter-setting function
+ * depending on the currently set index. Used to re-run the tests
+ * with different filters to check that the data still comes back
+ * correctly under a variety of circumstances, such as the
+ * Fletcher32 checksum filter increasing the size of the chunk.
+ */
+static herr_t
+set_dcpl_filter(hid_t dcpl)
+{
+ switch (cur_filter_idx) {
+ case GZIP_INDEX:
+ return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
+ case FLETCHER32_INDEX:
+ return H5Pset_fletcher32(dcpl);
+ default:
+ return H5Pset_deflate(dcpl, DEFAULT_DEFLATE_LEVEL);
+ }
+}
+
+#if MPI_VERSION >= 3
+/*
+ * Tests parallel write of filtered data in the special
+ * case where a dataset is composed of a single chunk.
+ *
+ * Programmer: Jordan Henderson
+ * 02/01/2017
+ */
+static void
+test_write_one_chunk_filtered_dataset(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t start[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t stride[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t count[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t block[WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to one-chunk filtered dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ block[0] = sel_dims[0];
+ block[1] = sel_dims[1];
+ start[0] = ((hsize_t) mpi_rank * sel_dims[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS
+ * (hsize_t) WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = ((C_DATATYPE) i % (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS))
+ + ((C_DATATYPE) i / (WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where only
+ * one process is writing to a particular chunk in the operation.
+ * In this case, the write operation can be optimized because
+ * chunks do not have to be redistributed to new owners.
+ *
+ * Programmer: Jordan Henderson
+ * 02/01/2017
+ */
+static void
+test_write_filtered_dataset_no_overlap(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((dset_id >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ );
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * more than one process is writing to a particular chunk
+ * in the operation. In this case, the chunks have to be
+ * redistributed before the operation so that only one process
+ * writes to a particular chunk.
+ *
+ * Programmer: Jordan Henderson
+ * 02/01/2017
+ */
+static void
+test_write_filtered_dataset_overlap(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR;
+
+ filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ count[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * a single process in the write operation has no selection
+ * in the dataset's dataspace. In this case, the process with
+ * no selection still has to participate in the collective
+ * space re-allocation for the filtered chunks and also must
+ * participate in the re-insertion of the filtered chunks
+ * into the chunk index.
+ *
+ * Programmer: Jordan Henderson
+ * 02/01/2017
+ */
+static void
+test_write_filtered_dataset_single_no_selection(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ size_t segment_length;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with a single process having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ sel_dims[0] = sel_dims[1] = 0;
+
+ filespace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank * (hsize_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ if (mpi_rank == WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+ else
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ );
+
+ /* Compute the correct offset into the buffer for the process having no selection and clear it */
+ segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t) mpi_size;
+ HDmemset(correct_buf + ((size_t) WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
+ 0, segment_length * sizeof(*data));
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case
+ * where no process in the write operation has a
+ * selection in the dataset's dataspace. This test is
+ * to ensure that there are no assertion failures or
+ * similar issues due to size 0 allocations and the
+ * like. In this case, the file and dataset are created
+ * but the dataset is populated with the default fill
+ * value.
+ *
+ * Programmer: Jordan Henderson
+ * 02/02/2017
+ */
+static void
+test_write_filtered_dataset_all_no_selection(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with all processes having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = sel_dims[1] = 0;
+
+ filespace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data by using
+ * point selections instead of hyperslab selections.
+ *
+ * Programmer: Jordan Henderson
+ * 02/02/2017
+ */
+static void
+test_write_filtered_dataset_point_selection(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
+ hsize_t *coords = NULL;
+ hsize_t dataset_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ size_t i, j, data_size, correct_buf_size;
+ size_t num_points;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to filtered chunks with point selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ filespace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims,NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Set up point selection */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ num_points = (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size;
+ coords = (hsize_t *) HDcalloc(1, 2 * num_points * sizeof(*coords));
+ VRFY((NULL != coords), "Coords HDcalloc succeeded");
+
+ for (i = 0; i < num_points; i++)
+ for (j = 0; j < WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
+ coords[(i * WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] =
+ (j > 0) ? (i % (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
+ : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
+
+ VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t ) num_points, (const hsize_t * ) coords) >= 0),
+ "Point selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (coords) HDfree(coords);
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * each process writes an equal amount of data to each chunk
+ * in the dataset. Each chunk is distributed among the
+ * processes in round-robin fashion by blocks of size 1 until
+ * the whole chunk is selected, leading to an interleaved
+ * write pattern.
+ *
+ * Programmer: Jordan Henderson
+ * 02/02/2017
+ */
+static void
+test_write_filtered_dataset_interleaved_write(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ hsize_t chunk_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ hsize_t sel_dims[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ hsize_t start[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ hsize_t stride[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ hsize_t count[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ hsize_t block[INTERLEAVED_WRITE_FILTERED_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing interleaved write to filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS;
+ chunk_dims[0] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
+ sel_dims[0] = (hsize_t) (INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS;
+
+ filespace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, INTERLEAVED_WRITE_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, INTERLEAVED_WRITE_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t) (INTERLEAVED_WRITE_FILTERED_DATASET_NROWS / INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS);
+ count[1] = (hsize_t) (INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS / INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS);
+ stride[0] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ /* Add Column Index */
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+
+ /* Add the Row Index */
+ + ((i % (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)
+
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
+ + ((hsize_t) INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS)))
+ );
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" INTERLEAVED_WRITE_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * the dataset has 3 dimensions and each process writes
+ * to its own "page" in the 3rd dimension.
+ *
+ * Programmer: Jordan Henderson
+ * 02/06/2017
+ */
+static void
+test_write_3d_filtered_dataset_no_overlap_separate_pages(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks on separate pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
+ chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ sel_dims[2] = 1;
+
+ filespace = H5Screate_simple( WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple( WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ count[2] = 1;
+ stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = (hsize_t) mpi_rank;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) ((i % (hsize_t) mpi_size) + (i / (hsize_t) mpi_size));
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * the dataset has 3 dimensions and each process writes
+ * to each "page" in the 3rd dimension. However, no chunk
+ * on a given "page" is written to by more than one process.
+ *
+ * Programmer: Jordan Henderson
+ * 02/06/2017
+ */
+static void
+test_write_3d_filtered_dataset_no_overlap_same_pages(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t start[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t stride[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t count[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t block[WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id, dset_id, plist_id;
+ hid_t filespace, memspace;
+
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks on the same pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+ chunk_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ sel_dims[2] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+
+ filespace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ count[2] = (hsize_t) mpi_size;
+ stride[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ block[1] = (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
+ start[1] = 0;
+ start[2] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (i % (dataset_dims[0] * dataset_dims[1]))
+ + (i / (dataset_dims[0] * dataset_dims[1]))
+ );
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data in the case where
+ * the dataset has 3 dimensions and each process writes
+ * to each "page" in the 3rd dimension. Further, each chunk
+ * in each "page" is written to equally by all processes.
+ *
+ * Programmer: Jordan Henderson
+ * 02/06/2017
+ */
+static void
+test_write_3d_filtered_dataset_overlap(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t start[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t stride[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t count[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t block[WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ dataset_dims[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ chunk_dims[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ sel_dims[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+
+ filespace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
+ count[1] = (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS / WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
+ count[2] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ stride[0] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = 1;
+ block[1] = (hsize_t) WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ block[2] = 1;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+ start[2] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], start[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], start[0], start[1], start[2], block[0], block[1], block[2]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride,
+ count, block) >= 0), "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ /* Add the Column Index */
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the Row Index */
+ + ((i % (hsize_t) (mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ / (hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
+ + ((hsize_t) (WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)
+ * (i / (hsize_t) (mpi_size * WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH * WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS)))
+ );
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data to unshared
+ * chunks using a compound datatype which doesn't
+ * require a datatype conversion.
+ *
+ * Programmer: Jordan Henderson
+ * 02/10/2017
+ */
+static void
+test_write_cmpd_filtered_dataset_no_conversion_unshared(void)
+{
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ size_t i, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
+ chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ /* Fill data buffer */
+ for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
+ data[i].field1 = (short) GEN_DATA(i);
+ data[i].field2 = (int) GEN_DATA(i);
+ data[i].field3 = (long) GEN_DATA(i);
+ }
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+ }
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data to shared
+ * chunks using a compound datatype which doesn't
+ * require a datatype conversion.
+ *
+ * Programmer: Jordan Henderson
+ * 02/10/2017
+ */
+static void
+test_write_cmpd_filtered_dataset_no_conversion_shared(void)
+{
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ size_t i, correct_buf_size;
+ hid_t file_id, dset_id, plist_id, memtype;
+ hid_t filespace, memspace;
+
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ /* Fill data buffer */
+ for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
+ data[i].field1 = (short) GEN_DATA(i);
+ data[i].field2 = (int) GEN_DATA(i);
+ data[i].field3 = (long) GEN_DATA(i);
+ }
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+ }
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ /* Verify the correct data was written */
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data to unshared
+ * chunks using a compound datatype which requires a
+ * datatype conversion.
+ *
+ * NOTE: This test currently should fail because the
+ * datatype conversion causes the parallel library to
+ * break to independent I/O and this isn't allowed when
+ * there are filters in the pipeline.
+ *
+ * Programmer: Jordan Henderson
+ * 02/07/2017
+ */
+static void
+test_write_cmpd_filtered_dataset_type_conversion_unshared(void)
+{
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ size_t i, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write to unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
+ chunk_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ sel_dims[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ /* Create the compound type for file. */
+ filetype = H5Tcreate(H5T_COMPOUND, 32);
+ VRFY((filetype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ stride[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ block[0] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t) mpi_rank * WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC * sizeof(*data));
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ /* Fill data buffer */
+ for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC; i++) {
+ data[i].field1 = (short) GEN_DATA(i);
+ data[i].field2 = (int) GEN_DATA(i);
+ data[i].field3 = (long) GEN_DATA(i);
+ }
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ /* Ensure that this test currently fails since type conversions break collective mode */
+ H5E_BEGIN_TRY {
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
+ "Dataset write succeeded");
+ } H5E_END_TRY;
+
+ if (data) HDfree(data);
+
+ /* Verify that no data was written */
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel write of filtered data to shared
+ * chunks using a compound datatype which requires
+ * a datatype conversion.
+ *
+ * NOTE: This test currently should fail because the
+ * datatype conversion causes the parallel library to
+ * break to independent I/O and this isn't allowed when
+ * there are filters in the pipeline.
+ *
+ * Programmer: Jordan Henderson
+ * 02/10/2017
+ */
+static void
+test_write_cmpd_filtered_dataset_type_conversion_shared(void)
+{
+ COMPOUND_C_DATATYPE *data = NULL;
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ size_t i, correct_buf_size;
+ hid_t file_id, dset_id, plist_id, filetype, memtype;
+ hid_t filespace, memspace;
+
+ if (MAINPROCESS) HDputs("Testing write to shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
+ chunk_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ sel_dims[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ filespace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ /* Create the compound type for file. */
+ filetype = H5Tcreate(H5T_COMPOUND, 32);
+ VRFY((filetype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+
+ dset_id = H5Dcreate2(file_id, WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ stride[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ block[0] = (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ data = (COMPOUND_C_DATATYPE *) HDcalloc(1, (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC * sizeof(*data));
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(COMPOUND_C_DATATYPE);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ /* Fill data buffer */
+ for (i = 0; i < (hsize_t) WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC; i++) {
+ data[i].field1 = (short) GEN_DATA(i);
+ data[i].field2 = (int) GEN_DATA(i);
+ data[i].field3 = (long) GEN_DATA(i);
+ }
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ /* Ensure that this test currently fails since type conversions break collective mode */
+ H5E_BEGIN_TRY {
+ VRFY((H5Dwrite(dset_id, memtype, memspace, filespace, plist_id, data) < 0),
+ "Dataset write succeeded");
+ } H5E_END_TRY;
+
+ if (data) HDfree(data);
+
+ /* Verify that no data was written */
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+#endif
+
+/*
+ * Tests parallel read of filtered data in the special
+ * case where a dataset is composed of a single chunk.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the singular chunk and contributes its piece to a
+ * global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/14/2018
+ */
+static void
+test_read_one_chunk_filtered_dataset(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t sel_dims[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t start[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t stride[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t count[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t block[READ_ONE_CHUNK_FILTERED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from one-chunk filtered dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = ((C_DATATYPE) i % (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS))
+ + ((C_DATATYPE) i / (READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS / mpi_size * READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS));
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_ONE_CHUNK_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_ONE_CHUNK_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_ONE_CHUNK_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_ONE_CHUNK_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = 1;
+ stride[0] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t) READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS;
+ block[0] = sel_dims[0];
+ block[1] = sel_dims[1];
+ start[0] = ((hsize_t) mpi_rank * sel_dims[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) flat_dims[0];
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0]);
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where only
+ * one process is reading from a particular chunk in the operation.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the dataset and contributes its piece to a global buffer
+ * that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_no_overlap(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NROWS * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ );
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and reads
+ * it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) flat_dims[0];
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0]);
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * more than one process is reading from a particular chunk
+ * in the operation.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * each chunk of the dataset and contributes its pieces
+ * to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_overlap(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) DIM0_SCALE_FACTOR;
+ sel_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * (hsize_t) DIM1_SCALE_FACTOR;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NROWS / (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ count[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank * block[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /*
+ * Since these chunks are shared, run multiple rounds of MPI_Allgatherv
+ * to collect all of the pieces into their appropriate locations. The
+ * number of times MPI_Allgatherv is run should be equal to the number
+ * of chunks in the first dimension of the dataset.
+ */
+ {
+ size_t loop_count = count[0];
+ size_t total_recvcounts = 0;
+
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++) {
+ recvcounts[i] = (int) dataset_dims[1];
+ total_recvcounts += (size_t) recvcounts[i];
+ }
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * dataset_dims[1]);
+
+ for (; loop_count; loop_count--) {
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(count[0] - loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+ }
+ }
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * a single process in the read operation has no selection
+ * in the dataset's dataspace.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank (except for one)
+ * reads a part of the dataset and contributes its piece
+ * to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_single_no_selection(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t start[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t stride[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t count[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t block[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ size_t segment_length;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with a single process having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ + (i / (dataset_dims[0] / (hsize_t) mpi_size * dataset_dims[1]))
+ );
+
+ /* Compute the correct offset into the buffer for the process having no selection and clear it */
+ segment_length = dataset_dims[0] * dataset_dims[1] / (hsize_t) mpi_size;
+ HDmemset(correct_buf + ((size_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC * segment_length),
+ 0, segment_length * sizeof(*correct_buf));
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ sel_dims[0] = sel_dims[1] = 0;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank * (hsize_t) READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * count[0];
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+ else
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS);
+ recvcounts[READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC] = 0;
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * (size_t) (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS));
+
+ if (mpi_rank == READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC)
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, 0, C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+ else
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * no process in the read operation has a selection in the
+ * dataset's dataspace. This test is to ensure that there
+ * are no assertion failures or similar issues due to size
+ * 0 allocations and the like.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank will simply issue
+ * a no-op read.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_all_no_selection(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ size_t read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with all processes having no selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = sel_dims[1] = 0;
+
+ memspace = H5Screate_simple(READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_none(filespace) >= 0), "Select none succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data by using point
+ * selections instead of hyperslab selections.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank will read part
+ * of the dataset using a point selection and will
+ * contribute its piece to a global buffer that is
+ * checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_point_selection(void)
+{
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t *coords = NULL;
+ hsize_t dataset_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, j, read_buf_size, correct_buf_size;
+ size_t num_points;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from filtered chunks with point selection");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Set up point selection */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ num_points = (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS * (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS / (hsize_t) mpi_size;
+ coords = (hsize_t *) HDcalloc(1, 2 * num_points * sizeof(*coords));
+ VRFY((NULL != coords), "Coords HDcalloc succeeded");
+
+ for (i = 0; i < num_points; i++)
+ for (j = 0; j < READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS; j++)
+ coords[(i * READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS) + j] =
+ (j > 0) ? (i % (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)
+ : ((hsize_t) mpi_rank + ((hsize_t) mpi_size * (i / (hsize_t) READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS)));
+
+ VRFY((H5Sselect_elements(filespace, H5S_SELECT_SET, (hsize_t ) num_points, (const hsize_t * ) coords) >= 0),
+ "Point selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /*
+ * Since these chunks are shared, run multiple rounds of MPI_Allgatherv
+ * to collect all of the pieces into their appropriate locations. The
+ * number of times MPI_Allgatherv is run should be equal to the number
+ * of chunks in the first dimension of the dataset.
+ */
+ {
+ size_t original_loop_count = dataset_dims[0] / (hsize_t) mpi_size;
+ size_t cur_loop_count = original_loop_count;
+ size_t total_recvcounts = 0;
+
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++) {
+ recvcounts[i] = (int) dataset_dims[1];
+ total_recvcounts += (size_t) recvcounts[i];
+ }
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * dataset_dims[1]);
+
+ for (; cur_loop_count; cur_loop_count--) {
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(original_loop_count - cur_loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(original_loop_count - cur_loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+ }
+ }
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ HDfree(coords);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * each process reads an equal amount of data from each
+ * chunk in the dataset. Each chunk is distributed among the
+ * processes in round-robin fashion by blocks of size 1 until
+ * the whole chunk is selected, leading to an interleaved
+ * read pattern.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank will read part
+ * of each chunk of the dataset and will contribute its
+ * pieces to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/15/2018
+ */
+static void
+test_read_filtered_dataset_interleaved_read(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t chunk_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t sel_dims[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t start[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t stride[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t count[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t block[INTERLEAVED_READ_FILTERED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing interleaved read from filtered chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NROWS;
+ dataset_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ /* Add Column Index */
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+
+ /* Add the Row Index */
+ + ((i % (hsize_t) (mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)) / (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS)
+
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
+ + ((hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS * (i / (hsize_t) (mpi_size * INTERLEAVED_READ_FILTERED_DATASET_NCOLS)))
+ );
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(INTERLEAVED_READ_FILTERED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
+ chunk_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, INTERLEAVED_READ_FILTERED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, INTERLEAVED_READ_FILTERED_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" INTERLEAVED_READ_FILTERED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_NCOLS;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NROWS / INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS);
+ count[1] = (hsize_t) (INTERLEAVED_READ_FILTERED_DATASET_NCOLS / INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS);
+ stride[0] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS;
+ stride[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+ block[0] = 1;
+ block[1] = (hsize_t) INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /*
+ * Since these chunks are shared, run multiple rounds of MPI_Allgatherv
+ * to collect all of the pieces into their appropriate locations. The
+ * number of times MPI_Allgatherv is run should be equal to the number
+ * of chunks in the first dimension of the dataset.
+ */
+ {
+ size_t loop_count = count[0];
+ size_t total_recvcounts = 0;
+
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++) {
+ recvcounts[i] = (int) dataset_dims[1];
+ total_recvcounts += (size_t) recvcounts[i];
+ }
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * dataset_dims[1]);
+
+ for (; loop_count; loop_count--) {
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(&read_buf[(count[0] - loop_count) * dataset_dims[1]], recvcounts[mpi_rank], C_DATATYPE_MPI,
+ &global_buf[(count[0] - loop_count) * total_recvcounts], recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+ }
+ }
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * the dataset has 3 dimensions and each process reads from
+ * its own "page" in the 3rd dimension.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads its own "page"
+ * of the dataset and contributes its piece to a global buffer
+ * that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/16/2018
+ */
+static void
+test_read_3d_filtered_dataset_no_overlap_separate_pages(void)
+{
+ MPI_Datatype vector_type;
+ MPI_Datatype resized_vector_type;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks on separate pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) ((i % (hsize_t) mpi_size) + (i / (hsize_t) mpi_size));
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS;
+ sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS;
+ sel_dims[2] = 1;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ count[2] = 1;
+ stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS;
+ block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = (hsize_t) mpi_rank;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /*
+ * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each
+ * rank to write to the nth position of the global data buffer, where n is the rank number.
+ */
+ VRFY((MPI_SUCCESS == MPI_Type_vector((int) flat_dims[0], 1, mpi_size, C_DATATYPE_MPI, &vector_type)),
+ "MPI_Type_vector succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded");
+
+ /*
+ * Resize the type to allow interleaving,
+ * so make it only one MPI_LONG wide
+ */
+ VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, sizeof(long), &resized_vector_type)),
+ "MPI_Type_create_resized");
+ VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded");
+
+ VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
+ "MPI_Allgather succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded");
+
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * the dataset has 3 dimensions and each process reads from
+ * each "page" in the 3rd dimension. However, no chunk on a
+ * given "page" is read from by more than one process.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * each "page" of the dataset and contributes its piece to a
+ * global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/16/2018
+ */
+static void
+test_read_3d_filtered_dataset_no_overlap_same_pages(void)
+{
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t chunk_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t sel_dims[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t start[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t stride[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t count[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t block[READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id, dset_id, plist_id;
+ hid_t filespace, memspace;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks on the same pages in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS;
+ dataset_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ dataset_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (i % (dataset_dims[0] * dataset_dims[1]))
+ + (i / (dataset_dims[0] * dataset_dims[1]))
+ );
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS;
+ sel_dims[2] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ count[2] = (hsize_t) mpi_size;
+ stride[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ stride[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS;
+ block[1] = (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS;
+ block[2] = 1;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS * count[0]);
+ start[1] = 0;
+ start[2] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) flat_dims[0];
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0]);
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, recvcounts, displs, C_DATATYPE_MPI, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data in the case where
+ * the dataset has 3 dimensions and each process reads from
+ * each "page" in the 3rd dimension. Further, each chunk in
+ * each "page" is read from equally by all processes.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads part of each
+ * chunk of each "page" and contributes its pieces to a
+ * global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/16/2018
+ */
+static void
+test_read_3d_filtered_dataset_overlap(void)
+{
+ MPI_Datatype vector_type;
+ MPI_Datatype resized_vector_type;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t chunk_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t sel_dims[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t start[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t stride[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t count[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t block[READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in 3D dataset");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NROWS;
+ dataset_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ dataset_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ /* Add the Column Index */
+ correct_buf[i] =
+ (C_DATATYPE) (
+ (i % (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the Row Index */
+ + ((i % (hsize_t) (mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+ / (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS))
+
+ /* Add the amount that gets added when a rank moves down to its next section vertically in the dataset */
+ + ((hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)
+ * (i / (hsize_t) (mpi_size * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH * READ_SHARED_FILTERED_CHUNKS_3D_NCOLS)))
+ );
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+ sel_dims[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_NCOLS;
+ sel_dims[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1] * sel_dims[2];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS);
+ count[1] = (hsize_t) (READ_SHARED_FILTERED_CHUNKS_3D_NCOLS / READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS);
+ count[2] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_DEPTH;
+ stride[0] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS;
+ stride[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = 1;
+ block[1] = (hsize_t) READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS;
+ block[2] = 1;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+ start[2] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ {
+ size_t run_length = (size_t) (READ_SHARED_FILTERED_CHUNKS_3D_NCOLS * READ_SHARED_FILTERED_CHUNKS_3D_DEPTH);
+ size_t num_blocks = (size_t) (READ_SHARED_FILTERED_CHUNKS_3D_NROWS / mpi_size);
+
+ /*
+ * Due to the nature of 3-dimensional reading, create an MPI vector type that allows each
+ * rank to write to the nth position of the global data buffer, where n is the rank number.
+ */
+ VRFY((MPI_SUCCESS == MPI_Type_vector((int) num_blocks, (int) run_length, (int) (mpi_size * (int) run_length), C_DATATYPE_MPI, &vector_type)),
+ "MPI_Type_vector succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_commit(&vector_type)), "MPI_Type_commit succeeded");
+
+ /*
+ * Resize the type to allow interleaving,
+ * so make it "run_length" MPI_LONGs wide
+ */
+ VRFY((MPI_SUCCESS == MPI_Type_create_resized(vector_type, 0, (MPI_Aint) (run_length * sizeof(long)), &resized_vector_type)),
+ "MPI_Type_create_resized");
+ VRFY((MPI_SUCCESS == MPI_Type_commit(&resized_vector_type)), "MPI_Type_commit succeeded");
+ }
+
+ VRFY((MPI_SUCCESS == MPI_Allgather(read_buf, (int) flat_dims[0], C_DATATYPE_MPI, global_buf, 1, resized_vector_type, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ VRFY((MPI_SUCCESS == MPI_Type_free(&vector_type)), "MPI_Type_free succeeded");
+ VRFY((MPI_SUCCESS == MPI_Type_free(&resized_vector_type)), "MPI_Type_free succeeded");
+
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data to unshared
+ * chunks using a compound datatype which doesn't
+ * require a datatype conversion.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the dataset and contributes its piece to a global
+ * buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/17/2018
+ */
+static void
+test_read_cmpd_filtered_dataset_no_conversion_unshared(void)
+{
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1, memtype = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+ }
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, memtype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ stride[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ block[0] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data from shared
+ * chunks using a compound datatype which doesn't
+ * require a datatype conversion.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * each chunk of the dataset and contributes its piece
+ * to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/17/2018
+ */
+static void
+test_read_cmpd_filtered_dataset_no_conversion_shared(void)
+{
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id, dset_id, plist_id, memtype;
+ hid_t filespace, memspace;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in Compound Datatype dataset without Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+ }
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, memtype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS;
+ stride[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ block[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data from unshared
+ * chunks using a compound datatype which requires a
+ * datatype conversion.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * the dataset and contributes its piece to a global
+ * buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/17/2018
+ */
+static void
+test_read_cmpd_filtered_dataset_type_conversion_unshared(void)
+{
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1, filetype = -1, memtype = -1;
+ hid_t filespace = -1, memspace = -1;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from unshared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS;
+ dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (i % dataset_dims[1])
+ + (i / dataset_dims[1])
+ );
+ }
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ /* Create the compound type for file. */
+ filetype = H5Tcreate(H5T_COMPOUND, 32);
+ VRFY((filetype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ chunk_dims[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, filetype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC;
+ stride[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ block[0] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS;
+ start[0] = 0;
+ start[1] = ((hsize_t) mpi_rank * READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS);
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(filetype) >= 0), "File datatype close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests parallel read of filtered data from shared
+ * chunks using a compound datatype which requires
+ * a datatype conversion.
+ *
+ * The MAINPROCESS rank will first write out all of the
+ * data to the dataset. Then, each rank reads a part of
+ * each chunk of the dataset and contributes its pieces
+ * to a global buffer that is checked for consistency.
+ *
+ * Programmer: Jordan Henderson
+ * 05/17/2018
+ */
+static void
+test_read_cmpd_filtered_dataset_type_conversion_shared(void)
+{
+ COMPOUND_C_DATATYPE *read_buf = NULL;
+ COMPOUND_C_DATATYPE *correct_buf = NULL;
+ COMPOUND_C_DATATYPE *global_buf = NULL;
+ hsize_t dataset_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t chunk_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t sel_dims[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t start[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t stride[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t count[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t block[READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS];
+ hsize_t flat_dims[1];
+ size_t i, read_buf_size, correct_buf_size;
+ hid_t file_id, dset_id, plist_id, filetype, memtype;
+ hid_t filespace, memspace;
+ int *recvcounts = NULL;
+ int *displs = NULL;
+
+ if (MAINPROCESS) HDputs("Testing read from shared filtered chunks in Compound Datatype dataset with Datatype conversion");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS;
+ dataset_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS;
+
+ /* Setup the buffer for writing and for comparison */
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * sizeof(*correct_buf);
+
+ correct_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++) {
+ correct_buf[i].field1 = (short) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field2 = (int) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+
+ correct_buf[i].field3 = (long) (
+ (dataset_dims[1] * (i / ((hsize_t) mpi_size * dataset_dims[1])))
+ + (i % dataset_dims[1])
+ + (((i % ((hsize_t) mpi_size * dataset_dims[1])) / dataset_dims[1]) % dataset_dims[1])
+ );
+ }
+
+ /* Create the compound type for memory. */
+ memtype = H5Tcreate(H5T_COMPOUND, sizeof(COMPOUND_C_DATATYPE));
+ VRFY((memtype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(memtype, "ShortData", HOFFSET(COMPOUND_C_DATATYPE, field1), H5T_NATIVE_SHORT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "IntData", HOFFSET(COMPOUND_C_DATATYPE, field2), H5T_NATIVE_INT) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(memtype, "LongData", HOFFSET(COMPOUND_C_DATATYPE, field3), H5T_NATIVE_LONG) >= 0),
+ "Datatype insertion succeeded");
+
+ /* Create the compound type for file. */
+ filetype = H5Tcreate(H5T_COMPOUND, 32);
+ VRFY((filetype >= 0), "Datatype creation succeeded");
+
+ VRFY((H5Tinsert(filetype, "ShortData", 0, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "IntData", 8, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+ VRFY((H5Tinsert(filetype, "LongData", 16, H5T_STD_I64BE) >= 0),
+ "Datatype insertion succeeded");
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ filespace = H5Screate_simple(READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ chunk_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ chunk_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, filetype, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ VRFY((H5Dwrite(dset_id, memtype, H5S_ALL, H5S_ALL, H5P_DEFAULT, correct_buf) >= 0),
+ "Dataset write succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDONLY, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ sel_dims[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ sel_dims[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+
+ /* Setup one-dimensional memory dataspace for reading the dataset data into a contiguous buffer */
+ flat_dims[0] = sel_dims[0] * sel_dims[1];
+
+ memspace = H5Screate_simple(1, flat_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ /*
+ * Each process defines the dataset selection in the file and
+ * reads it to the selection in memory
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC;
+ stride[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS;
+ stride[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ block[0] = (hsize_t) READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS / (hsize_t) mpi_size;
+ block[1] = READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS;
+ start[0] = (hsize_t) mpi_rank;
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is reading with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset read */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ read_buf_size = flat_dims[0] * sizeof(*read_buf);
+
+ read_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, read_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ VRFY((H5Dread(dset_id, memtype, memspace, filespace, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ global_buf = (COMPOUND_C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != global_buf), "HDcalloc succeeded");
+
+ /* Collect each piece of data from all ranks into a global buffer on all ranks */
+ recvcounts = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*recvcounts));
+ VRFY((NULL != recvcounts), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ recvcounts[i] = (int) (flat_dims[0] * sizeof(*read_buf));
+
+ displs = (int *) HDcalloc(1, (size_t) mpi_size * sizeof(*displs));
+ VRFY((NULL != displs), "HDcalloc succeeded");
+
+ for (i = 0; i < (size_t) mpi_size; i++)
+ displs[i] = (int) (i * flat_dims[0] * sizeof(*read_buf));
+
+ VRFY((MPI_SUCCESS == MPI_Allgatherv(read_buf, (int) (flat_dims[0] * sizeof(COMPOUND_C_DATATYPE)), MPI_BYTE, global_buf, recvcounts, displs, MPI_BYTE, comm)),
+ "MPI_Allgatherv succeeded");
+
+ VRFY((0 == HDmemcmp(global_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (displs) HDfree(displs);
+ if (recvcounts) HDfree(recvcounts);
+ if (global_buf) HDfree(global_buf);
+ if (read_buf) HDfree(read_buf);
+ if (correct_buf) HDfree(correct_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Tclose(memtype) >= 0), "Memory datatype close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+/*
+ * Tests write of filtered data to a dataset
+ * by a single process. After the write has
+ * succeeded, the dataset is closed and then
+ * re-opened in parallel and read by all
+ * processes to ensure data correctness.
+ *
+ * Programmer: Jordan Henderson
+ * 08/03/2017
+ */
+static void
+test_write_serial_read_parallel(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write file serially; read file in parallel");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ dataset_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_NCOLS;
+ dataset_dims[2] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_DEPTH;
+
+ /* Write the file on the MAINPROCESS rank */
+ if (MAINPROCESS) {
+ /* Set up file access property list */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ chunk_dims[0] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_SERIAL_READ_PARALLEL_CH_NCOLS;
+ chunk_dims[2] = 1;
+
+ filespace = H5Screate_simple(WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ data_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*data);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+ }
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (long) i;
+
+ /* All ranks open the file and verify their "portion" of the dataset is correct */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_SERIAL_READ_PARALLEL_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, plist_id, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ if (correct_buf) HDfree(correct_buf);
+ if (read_buf) HDfree(read_buf);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+
+#if MPI_VERSION >= 3
+/*
+ * Tests parallel write of filtered data
+ * to a dataset. After the write has
+ * succeeded, the dataset is closed and
+ * then re-opened and read by a single
+ * process to ensure data correctness.
+ *
+ * Programmer: Jordan Henderson
+ * 08/03/2017
+ */
+static void
+test_write_parallel_read_serial(void)
+{
+ C_DATATYPE *data = NULL;
+ C_DATATYPE *read_buf = NULL;
+ C_DATATYPE *correct_buf = NULL;
+ hsize_t dataset_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
+ hsize_t chunk_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
+ hsize_t sel_dims[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
+ hsize_t count[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
+ hsize_t stride[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
+ hsize_t block[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
+ hsize_t offset[WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS];
+ size_t i, data_size, correct_buf_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing write file in parallel; read serially");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NROWS;
+ dataset_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS;
+ dataset_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH;
+ chunk_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ chunk_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ chunk_dims[2] = 1;
+ sel_dims[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ sel_dims[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS;
+ sel_dims[2] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_DEPTH;
+
+ filespace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, HDF5_DATATYPE_NAME, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /* Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_NCOLS / (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ count[2] = (hsize_t) mpi_size;
+ stride[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ stride[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ stride[2] = 1;
+ block[0] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS;
+ block[1] = (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NCOLS;
+ block[2] = 1;
+ offset[0] = ((hsize_t) mpi_rank * (hsize_t) WRITE_PARALLEL_READ_SERIAL_CH_NROWS * count[0]);
+ offset[1] = 0;
+ offset[2] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu, %llu ], stride[ %llu, %llu, %llu ], offset[ %llu, %llu, %llu ], block size[ %llu, %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], count[2], stride[0], stride[1], stride[2], offset[0], offset[1], offset[2], block[0], block[1], block[2]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((filespace >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, offset, stride,
+ count, block) >= 0), "Hyperslab selection succeeded");
+
+ /* Fill data buffer */
+ data_size = sel_dims[0] * sel_dims[1] * sel_dims[2] * sizeof(*data);
+
+ data = (C_DATATYPE *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < data_size / sizeof(*data); i++)
+ data[i] = (C_DATATYPE) GEN_DATA(i);
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ VRFY((H5Dwrite(dset_id, HDF5_DATATYPE_NAME, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+
+ if (data) HDfree(data);
+
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ if (MAINPROCESS) {
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ dset_id = H5Dopen2(file_id, "/" WRITE_PARALLEL_READ_SERIAL_DATASET_NAME, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset open succeeded");
+
+ correct_buf_size = dataset_dims[0] * dataset_dims[1] * dataset_dims[2] * sizeof(*correct_buf);
+
+ correct_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != correct_buf), "HDcalloc succeeded");
+
+ read_buf = (C_DATATYPE *) HDcalloc(1, correct_buf_size);
+ VRFY((NULL != read_buf), "HDcalloc succeeded");
+
+ for (i = 0; i < correct_buf_size / sizeof(*correct_buf); i++)
+ correct_buf[i] = (C_DATATYPE) (
+ (i % (dataset_dims[0] * dataset_dims[1]))
+ + (i / (dataset_dims[0] * dataset_dims[1]))
+ );
+
+ VRFY((H5Dread(dset_id, HDF5_DATATYPE_NAME, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) >= 0),
+ "Dataset read succeeded");
+
+ VRFY((0 == HDmemcmp(read_buf, correct_buf, correct_buf_size)),
+ "Data verification succeeded");
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ HDfree(correct_buf);
+ HDfree(read_buf);
+ }
+
+ return;
+}
+
+/*
+ * Tests that causing chunks to continually grow and shrink
+ * by writing random data followed by zeroed-out data (and
+ * thus controlling the compression ratio) does not cause
+ * problems.
+ *
+ * Programmer: Jordan Henderson
+ * 06/04/2018
+ */
+static void
+test_shrinking_growing_chunks(void)
+{
+ double *data = NULL;
+ hsize_t dataset_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t chunk_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t sel_dims[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t start[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t stride[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t count[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ hsize_t block[SHRINKING_GROWING_CHUNKS_DATASET_DIMS];
+ size_t i, data_size;
+ hid_t file_id = -1, dset_id = -1, plist_id = -1;
+ hid_t filespace = -1, memspace = -1;
+
+ if (MAINPROCESS) HDputs("Testing continually shrinking/growing chunks");
+
+ CHECK_CUR_FILTER_AVAIL();
+
+ /* Set up file access property list with parallel I/O access */
+ plist_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((plist_id >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(plist_id, comm, info) >= 0),
+ "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(plist_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fopen(filenames[0], H5F_ACC_RDWR, plist_id);
+ VRFY((file_id >= 0), "Test file open succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "FAPL close succeeded");
+
+ /* Create the dataspace for the dataset */
+ dataset_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_NROWS;
+ dataset_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS;
+ chunk_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ chunk_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ sel_dims[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ sel_dims[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS;
+
+ filespace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, dataset_dims, NULL);
+ VRFY((filespace >= 0), "File dataspace creation succeeded");
+
+ memspace = H5Screate_simple(SHRINKING_GROWING_CHUNKS_DATASET_DIMS, sel_dims, NULL);
+ VRFY((memspace >= 0), "Memory dataspace creation succeeded");
+
+ /* Create chunked dataset */
+ plist_id = H5Pcreate(H5P_DATASET_CREATE);
+ VRFY((plist_id >= 0), "DCPL creation succeeded");
+
+ VRFY((H5Pset_chunk(plist_id, SHRINKING_GROWING_CHUNKS_DATASET_DIMS, chunk_dims) >= 0),
+ "Chunk size set");
+
+ /* Add test filter to the pipeline */
+ VRFY((set_dcpl_filter(plist_id) >= 0), "Filter set");
+
+ dset_id = H5Dcreate2(file_id, SHRINKING_GROWING_CHUNKS_DATASET_NAME, H5T_NATIVE_DOUBLE, filespace,
+ H5P_DEFAULT, plist_id, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "Dataset creation succeeded");
+
+ VRFY((H5Pclose(plist_id) >= 0), "DCPL close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+
+ /*
+ * Each process defines the dataset selection in memory and writes
+ * it to the hyperslab in the file
+ */
+ count[0] = 1;
+ count[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_NCOLS / (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ stride[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ stride[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ block[0] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS;
+ block[1] = (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NCOLS;
+ start[0] = ((hsize_t) mpi_rank * (hsize_t) SHRINKING_GROWING_CHUNKS_CH_NROWS * count[0]);
+ start[1] = 0;
+
+ if (VERBOSE_MED) {
+ HDprintf("Process %d is writing with count[ %llu, %llu ], stride[ %llu, %llu ], start[ %llu, %llu ], block size[ %llu, %llu ]\n",
+ mpi_rank, count[0], count[1], stride[0], stride[1], start[0], start[1], block[0], block[1]);
+ HDfflush(stdout);
+ }
+
+ /* Select hyperslab in the file */
+ filespace = H5Dget_space(dset_id);
+ VRFY((dset_id >= 0), "File dataspace retrieval succeeded");
+
+ VRFY((H5Sselect_hyperslab(filespace, H5S_SELECT_SET, start, stride, count, block) >= 0),
+ "Hyperslab selection succeeded");
+
+ /* Create property list for collective dataset write */
+ plist_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((plist_id >= 0), "DXPL creation succeeded");
+
+ VRFY((H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE) >= 0),
+ "Set DXPL MPIO succeeded");
+
+ data_size = sel_dims[0] * sel_dims[1] * sizeof(double);
+
+ data = (double *) HDcalloc(1, data_size);
+ VRFY((NULL != data), "HDcalloc succeeded");
+
+ for (i = 0; i < SHRINKING_GROWING_CHUNKS_NLOOPS; i++) {
+ /* Continually write random float data, followed by zeroed-out data */
+ if ((i % 2))
+ HDmemset(data, 0, data_size);
+ else {
+ size_t j;
+ for (j = 0; j < data_size / sizeof(*data); j++) {
+ data[j] = (float) ( rand() / (double) (RAND_MAX / (double) 1.0L) );
+ }
+ }
+
+ VRFY((H5Dwrite(dset_id, H5T_NATIVE_DOUBLE, memspace, filespace, plist_id, data) >= 0),
+ "Dataset write succeeded");
+ }
+
+ if (data) HDfree(data);
+
+ VRFY((H5Dclose(dset_id) >= 0), "Dataset close succeeded");
+ VRFY((H5Sclose(filespace) >= 0), "File dataspace close succeeded");
+ VRFY((H5Sclose(memspace) >= 0), "Memory dataspace close succeeded");
+ VRFY((H5Pclose(plist_id) >= 0), "DXPL close succeeded");
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ return;
+}
+#endif
+
+int
+main(int argc, char** argv)
+{
+ size_t i;
+ hid_t file_id = -1, fapl = -1;
+ int mpi_code;
+
+ /* Initialize MPI */
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(comm, &mpi_size);
+ MPI_Comm_rank(comm, &mpi_rank);
+
+ if (mpi_size <= 0) {
+ if (MAINPROCESS) {
+ HDprintf("The Parallel Filters tests require at least 1 rank.\n");
+ HDprintf("Quitting...\n");
+ }
+
+ MPI_Abort(MPI_COMM_WORLD, 1);
+ }
+
+ if (H5dont_atexit() < 0) {
+ if (MAINPROCESS) {
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
+ }
+ }
+
+ H5open();
+
+ if (MAINPROCESS) {
+ HDprintf("==========================\n");
+ HDprintf("Parallel Filters tests\n");
+ HDprintf("==========================\n\n");
+ }
+
+ if (VERBOSE_MED) h5_show_hostname();
+
+ ALARM_ON;
+
+ /* Create test file */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ VRFY((h5_fixname(FILENAME[0], fapl, filenames[0], sizeof(filenames[0])) != NULL),
+ "Test file name created");
+
+ file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((file_id >= 0), "Test file creation succeeded");
+
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
+ (*tests[i])();
+ }
+ else {
+ if (MAINPROCESS) MESG("MPI_Barrier failed");
+ nerrors++;
+ }
+ }
+
+ /*
+ * Increment the filter index to switch to the checksum filter
+ * and re-run the tests.
+ */
+ cur_filter_idx++;
+
+ h5_clean_files(FILENAME, fapl);
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl >= 0), "FAPL creation succeeded");
+
+ VRFY((H5Pset_fapl_mpio(fapl, comm, info) >= 0), "Set FAPL MPIO succeeded");
+
+ VRFY((H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) >= 0),
+ "Set libver bounds succeeded");
+
+ file_id = H5Fcreate(filenames[0], H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ VRFY((file_id >= 0), "Test file creation succeeded");
+
+ VRFY((H5Fclose(file_id) >= 0), "File close succeeded");
+
+ if (MAINPROCESS) {
+ HDprintf("\n=================================================================\n");
+ HDprintf("Re-running Parallel Filters tests with Fletcher32 checksum filter\n");
+ HDprintf("=================================================================\n\n");
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); i++) {
+ if (MPI_SUCCESS == (mpi_code = MPI_Barrier(comm))) {
+ (*tests[i])();
+ }
+ else {
+ if (MAINPROCESS) MESG("MPI_Barrier failed");
+ nerrors++;
+ }
+ }
+
+ if (nerrors) goto exit;
+
+ if (MAINPROCESS) HDputs("All Parallel Filters tests passed\n");
+
+exit:
+ if (nerrors)
+ if (MAINPROCESS)
+ HDprintf("*** %d TEST ERROR%s OCCURRED ***\n", nerrors,
+ nerrors > 1 ? "S" : "");
+
+ ALARM_OFF;
+
+ h5_clean_files(FILENAME, fapl);
+
+ H5close();
+
+ MPI_Finalize();
+
+ exit((nerrors ? EXIT_FAILURE : EXIT_SUCCESS));
+}
diff --git a/testpar/t_filters_parallel.h b/testpar/t_filters_parallel.h
new file mode 100644
index 0000000..9543508
--- /dev/null
+++ b/testpar/t_filters_parallel.h
@@ -0,0 +1,339 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the files COPYING and Copyright.html. COPYING can be found at the root *
+ * of the source code distribution tree; Copyright.html can be found at the *
+ * root level of an installed copy of the electronic HDF5 document set and *
+ * is linked from the top-level documents page. It can also be found at *
+ * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
+ * access to either file, you may request a copy from help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Jordan Henderson
+ * 01/31/2017
+ *
+ * This file contains #defines for tests of the use
+ * of filters in parallel HDF5, implemented in
+ * H5Dmpio.c
+ */
+
+#ifndef TEST_PARALLEL_FILTERS_H_
+#define TEST_PARALLEL_FILTERS_H_
+
+#include <string.h>
+
+#include "stdlib.h"
+#include "testpar.h"
+
+/* Used to load other filters than GZIP */
+/* #define DYNAMIC_FILTER */ /* Uncomment and define the fields below to use a dynamically loaded filter */
+#define FILTER_NUM_CDVALUES 1
+const unsigned int cd_values[FILTER_NUM_CDVALUES] = { 0 };
+H5Z_filter_t filter_id;
+unsigned int flags = 0;
+size_t cd_nelmts = FILTER_NUM_CDVALUES;
+
+/* Utility Macros */
+#define STRINGIFY(type) #type
+
+/* Common defines for all tests */
+#define C_DATATYPE long
+#define C_DATATYPE_MPI MPI_LONG
+#define COMPOUND_C_DATATYPE cmpd_filtered_t
+#define C_DATATYPE_STR(type) STRINGIFY(type)
+#define HDF5_DATATYPE_NAME H5T_NATIVE_LONG
+
+/* Macro used to generate data for datasets for later verification */
+#define GEN_DATA(i) INCREMENTAL_DATA(i)
+
+/* For experimental purposes only, will cause tests to fail data verification phase - JTH */
+/* #define GEN_DATA(i) RANK_DATA(i) */ /* Given an index value i, generates test data based upon selected mode */
+
+#define INCREMENTAL_DATA(i) ((size_t) mpi_rank + i) /* Generates incremental test data */
+#define RANK_DATA(i) (mpi_rank) /* Generates test data to visibly show which rank wrote to which parts of the dataset */
+
+#define DEFAULT_DEFLATE_LEVEL 6
+
+#define DIM0_SCALE_FACTOR 4
+#define DIM1_SCALE_FACTOR 2
+
+/* Struct type for the compound datatype filtered dataset tests */
+typedef struct {
+ short field1;
+ int field2;
+ long field3;
+} COMPOUND_C_DATATYPE;
+
+/* Defines for the one-chunk filtered dataset write test */
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_write"
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_DIMS 2
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NROWS WRITE_ONE_CHUNK_FILTERED_DATASET_NROWS
+#define WRITE_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS WRITE_ONE_CHUNK_FILTERED_DATASET_NCOLS
+
+/* Defines for the unshared filtered chunks write test */
+#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size)
+
+/* Defines for the shared filtered chunks write test */
+#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_write"
+#define WRITE_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size)
+#define WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size)
+#define WRITE_SHARED_FILTERED_CHUNKS_NROWS (WRITE_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the filtered chunks write test where a process has no selection */
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_write"
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define WRITE_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1)
+
+/* Defines for the filtered chunks write test where no process has a selection */
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_write"
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+
+/* Defines for the filtered chunks write test with a point selection */
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_write"
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NROWS (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define WRITE_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (WRITE_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+
+/* Defines for the filtered dataset interleaved write test */
+#define INTERLEAVED_WRITE_FILTERED_DATASET_NAME "filtered_dataset_interleaved_write"
+#define INTERLEAVED_WRITE_FILTERED_DATASET_DIMS 2
+#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS (mpi_size)
+#define INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define INTERLEAVED_WRITE_FILTERED_DATASET_NROWS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR)
+#define INTERLEAVED_WRITE_FILTERED_DATASET_NCOLS (INTERLEAVED_WRITE_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the 3D unshared filtered dataset separate page write test */
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
+
+/* Defines for the 3D unshared filtered dataset same page write test */
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_write"
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
+#define WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (WRITE_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
+
+/* Defines for the 3d shared filtered dataset write test */
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_write"
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size)
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_NROWS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_NCOLS (WRITE_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define WRITE_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size)
+
+/* Defines for the compound datatype filtered dataset no conversion write test with unshared chunks */
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
+
+/* Defines for the compound datatype filtered dataset no conversion write test with shared chunks */
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC WRITE_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
+
+/* Defines for the compound datatype filtered dataset type conversion write test with unshared chunks */
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
+
+/* Defines for the compound datatype filtered dataset type conversion write test with shared chunks */
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion_write"
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
+#define WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC WRITE_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
+
+/* Defines for the one-chunk filtered dataset read test */
+#define READ_ONE_CHUNK_FILTERED_DATASET_NAME "one_chunk_filtered_dataset_read"
+#define READ_ONE_CHUNK_FILTERED_DATASET_DIMS 2
+#define READ_ONE_CHUNK_FILTERED_DATASET_NROWS (mpi_size * DIM0_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define READ_ONE_CHUNK_FILTERED_DATASET_NCOLS (mpi_size * DIM1_SCALE_FACTOR) /* Must be an even multiple of the number of ranks to avoid issues */
+#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NROWS READ_ONE_CHUNK_FILTERED_DATASET_NROWS
+#define READ_ONE_CHUNK_FILTERED_DATASET_CH_NCOLS READ_ONE_CHUNK_FILTERED_DATASET_NCOLS
+
+/* Defines for the unshared filtered chunks read test */
+#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_NAME "unshared_filtered_chunks_read"
+#define READ_UNSHARED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_UNSHARED_FILTERED_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_NROWS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_NCOLS / mpi_size)
+
+/* Defines for the shared filtered chunks read test */
+#define READ_SHARED_FILTERED_CHUNKS_DATASET_NAME "shared_filtered_chunks_read"
+#define READ_SHARED_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_SHARED_FILTERED_CHUNKS_CH_NROWS (mpi_size)
+#define READ_SHARED_FILTERED_CHUNKS_CH_NCOLS (mpi_size)
+#define READ_SHARED_FILTERED_CHUNKS_NROWS (READ_SHARED_FILTERED_CHUNKS_CH_NROWS * DIM0_SCALE_FACTOR)
+#define READ_SHARED_FILTERED_CHUNKS_NCOLS (READ_SHARED_FILTERED_CHUNKS_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the filtered chunks read test where a process has no selection */
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "single_no_selection_filtered_chunks_read"
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NROWS (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NCOLS (READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+#define READ_SINGLE_NO_SELECTION_FILTERED_CHUNKS_NO_SELECT_PROC (mpi_size - 1)
+
+/* Defines for the filtered chunks read test where no process has a selection */
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_NAME "all_no_selection_filtered_chunks_read"
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NROWS (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define READ_ALL_NO_SELECTION_FILTERED_CHUNKS_NCOLS (READ_ALL_NO_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+
+/* Defines for the filtered chunks read test with a point selection */
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_NAME "point_selection_filtered_chunks_read"
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_DATASET_DIMS 2
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS (DIM0_SCALE_FACTOR)
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_NROWS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NROWS * mpi_size)
+#define READ_POINT_SELECTION_FILTERED_CHUNKS_NCOLS (READ_POINT_SELECTION_FILTERED_CHUNKS_CH_NCOLS * mpi_size)
+
+/* Defines for the filtered dataset interleaved read test */
+#define INTERLEAVED_READ_FILTERED_DATASET_NAME "filtered_dataset_interleaved_read"
+#define INTERLEAVED_READ_FILTERED_DATASET_DIMS 2
+#define INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS (mpi_size)
+#define INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define INTERLEAVED_READ_FILTERED_DATASET_NROWS (INTERLEAVED_READ_FILTERED_DATASET_CH_NROWS * DIM0_SCALE_FACTOR)
+#define INTERLEAVED_READ_FILTERED_DATASET_NCOLS (INTERLEAVED_READ_FILTERED_DATASET_CH_NCOLS * DIM1_SCALE_FACTOR)
+
+/* Defines for the 3D unshared filtered dataset separate page read test */
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_separate_pages_read"
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DATASET_DIMS 3
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_DEPTH (mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NROWS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_3D_SEP_PAGE_NCOLS / mpi_size)
+
+/* Defines for the 3D unshared filtered dataset same page read test */
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_NAME "3D_unshared_filtered_chunks_same_pages_read"
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DATASET_DIMS 3
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_DEPTH (mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NROWS (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NROWS / mpi_size)
+#define READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_CH_NCOLS (READ_UNSHARED_FILTERED_CHUNKS_3D_SAME_PAGE_NCOLS / mpi_size)
+
+/* Defines for the 3d shared filtered dataset read test */
+#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_NAME "3D_shared_filtered_chunks_read"
+#define READ_SHARED_FILTERED_CHUNKS_3D_DATASET_DIMS 3
+#define READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS (mpi_size)
+#define READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS (DIM1_SCALE_FACTOR)
+#define READ_SHARED_FILTERED_CHUNKS_3D_NROWS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NROWS * DIM0_SCALE_FACTOR)
+#define READ_SHARED_FILTERED_CHUNKS_3D_NCOLS (READ_SHARED_FILTERED_CHUNKS_3D_CH_NCOLS * DIM1_SCALE_FACTOR)
+#define READ_SHARED_FILTERED_CHUNKS_3D_DEPTH (mpi_size)
+
+/* Defines for the compound datatype filtered dataset no conversion read test with unshared chunks */
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_no_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_ENTRIES_PER_PROC (READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_UNSHARED_NCOLS / mpi_size)
+
+/* Defines for the compound datatype filtered dataset no conversion read test with shared chunks */
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_no_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_ENTRIES_PER_PROC READ_COMPOUND_FILTERED_CHUNKS_NO_CONVERSION_SHARED_NCOLS
+
+/* Defines for the compound datatype filtered dataset type conversion read test with unshared chunks */
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_NAME "compound_unshared_filtered_chunks_type_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NROWS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_ENTRIES_PER_PROC (READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_UNSHARED_NCOLS / mpi_size)
+
+/* Defines for the compound datatype filtered dataset type conversion read test with shared chunks */
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_NAME "compound_shared_filtered_chunks_type_conversion_read"
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_DATASET_DIMS 2
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NROWS mpi_size
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_CH_NCOLS 1
+#define READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_ENTRIES_PER_PROC READ_COMPOUND_FILTERED_CHUNKS_TYPE_CONVERSION_SHARED_NCOLS
+
+/* Defines for the write file serially/read in parallel test */
+#define WRITE_SERIAL_READ_PARALLEL_DATASET_NAME "write_serial_read_parallel"
+#define WRITE_SERIAL_READ_PARALLEL_DATASET_DIMS 3
+#define WRITE_SERIAL_READ_PARALLEL_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_SERIAL_READ_PARALLEL_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_SERIAL_READ_PARALLEL_DEPTH (mpi_size)
+#define WRITE_SERIAL_READ_PARALLEL_CH_NROWS (WRITE_SERIAL_READ_PARALLEL_NROWS / mpi_size)
+#define WRITE_SERIAL_READ_PARALLEL_CH_NCOLS (WRITE_SERIAL_READ_PARALLEL_NCOLS / mpi_size)
+
+/* Defines for the write file in parallel/read serially test */
+#define WRITE_PARALLEL_READ_SERIAL_DATASET_NAME "write_parallel_read_serial"
+#define WRITE_PARALLEL_READ_SERIAL_DATASET_DIMS 3
+#define WRITE_PARALLEL_READ_SERIAL_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define WRITE_PARALLEL_READ_SERIAL_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define WRITE_PARALLEL_READ_SERIAL_DEPTH (mpi_size)
+#define WRITE_PARALLEL_READ_SERIAL_CH_NROWS (WRITE_PARALLEL_READ_SERIAL_NROWS / mpi_size)
+#define WRITE_PARALLEL_READ_SERIAL_CH_NCOLS (WRITE_PARALLEL_READ_SERIAL_NCOLS / mpi_size)
+
+/* Defines for the shrinking/growing chunks test */
+#define SHRINKING_GROWING_CHUNKS_DATASET_NAME "shrink_grow_chunks_test"
+#define SHRINKING_GROWING_CHUNKS_DATASET_DIMS 2
+#define SHRINKING_GROWING_CHUNKS_NROWS (mpi_size * DIM0_SCALE_FACTOR)
+#define SHRINKING_GROWING_CHUNKS_NCOLS (mpi_size * DIM1_SCALE_FACTOR)
+#define SHRINKING_GROWING_CHUNKS_CH_NROWS (SHRINKING_GROWING_CHUNKS_NROWS / mpi_size)
+#define SHRINKING_GROWING_CHUNKS_CH_NCOLS (SHRINKING_GROWING_CHUNKS_NCOLS / mpi_size)
+#define SHRINKING_GROWING_CHUNKS_NLOOPS 20
+
+#endif /* TEST_PARALLEL_FILTERS_H_ */
diff --git a/testpar/t_init_term.c b/testpar/t_init_term.c
index 933fbd2..0e40fe4 100644
--- a/testpar/t_init_term.c
+++ b/testpar/t_init_term.c
@@ -37,7 +37,7 @@ main (int argc, char **argv)
/* Initialize and finalize MPI */
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("Usage of Serial HDF5 after MPI_Finalize() is called");
@@ -65,7 +65,7 @@ main (int argc, char **argv)
if(MAINPROCESS) {
if(0 == nerrors)
- PASSED()
+ PASSED();
else
H5_FAILED()
}
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 5d989bb..db0d059 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -12,6 +12,8 @@
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "testphdf5.h"
+#include "H5Dprivate.h"
+#include "H5private.h"
#define DIM 2
#define SIZE 32
@@ -20,7 +22,7 @@
enum obj_type { is_group, is_dset };
-static int get_size(void);
+static int get_size(void);
static void write_dataset(hid_t, hid_t, hid_t);
static int read_dataset(hid_t, hid_t, hid_t);
static void create_group_recursive(hid_t, hid_t, hid_t, int);
@@ -53,13 +55,9 @@ get_size(void)
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
if(mpi_size > size ) {
-
if((mpi_size % 2) == 0 ) {
-
size = mpi_size;
-
} else {
-
size = mpi_size + 1;
}
}
@@ -78,7 +76,7 @@ get_size(void)
void zero_dim_dset(void)
{
int mpi_size, mpi_rank;
- const char *filename;
+ const char *filename;
hid_t fid, plist, dcpl, dsid, sid;
hsize_t dim, chunk_dim;
herr_t ret;
@@ -132,38 +130,39 @@ void zero_dim_dset(void)
* Example of using PHDF5 to create ndatasets datasets. Each process write
* a slab of array to the file.
*
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
+ * Changes: Updated function to use a dynamically calculated size,
+ * instead of the old SIZE #define. This should allow it
+ * to function with an arbitrary number of processors.
*
- * JRM - 8/11/04
+ * JRM - 8/11/04
*/
void multiple_dset_write(void)
{
- int i, j, n, mpi_size, mpi_rank, size;
+ int i, j, n, mpi_size, mpi_rank, size;
hid_t iof, plist, dataset, memspace, filespace;
hid_t dcpl; /* Dataset creation property list */
hsize_t chunk_origin [DIM];
hsize_t chunk_dims [DIM], file_dims [DIM];
hsize_t count[DIM]={1,1};
- double * outme = NULL;
+ double *outme = NULL;
double fill=1.0; /* Fill value */
- char dname [100];
+ char dname [100];
herr_t ret;
- const H5Ptest_param_t *pt;
- char *filename;
- int ndatasets;
+ const H5Ptest_param_t *pt;
+ char *filename;
+ int ndatasets;
pt = GetTestParameters();
filename = pt->name;
ndatasets = pt->count;
size = get_size();
+ H5_CHECK_OVERFLOW(size, int, size_t);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- outme = HDmalloc((size_t)(size * size * sizeof(double)));
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -189,23 +188,23 @@ void multiple_dset_write(void)
VRFY((ret>=0), "set fill-value succeeded");
for(n = 0; n < ndatasets; n++) {
- sprintf(dname, "dataset %d", n);
- dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- VRFY((dataset > 0), dname);
+ HDsprintf(dname, "dataset %d", n);
+ dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ VRFY((dataset > 0), dname);
- /* calculate data to write */
- for(i = 0; i < size; i++)
- for(j = 0; j < size; j++)
- outme [(i * size) + j] = n*1000 + mpi_rank;
+ /* calculate data to write */
+ for(i = 0; i < size; i++)
+ for(j = 0; j < size; j++)
+ outme [(i * size) + j] = n*1000 + mpi_rank;
- H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
+ H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
- H5Dclose(dataset);
+ H5Dclose(dataset);
#ifdef BARRIER_CHECKS
- if(!((n+1) % 10)) {
- printf("created %d datasets\n", n+1);
- MPI_Barrier(MPI_COMM_WORLD);
- }
+ if(!((n+1) % 10)) {
+ HDprintf("created %d datasets\n", n+1);
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
#endif /* BARRIER_CHECKS */
}
@@ -220,35 +219,38 @@ void multiple_dset_write(void)
/* Example of using PHDF5 to create, write, and read compact dataset.
*
- * Changes: Updated function to use a dynamically calculated size,
- * instead of the old SIZE #define. This should allow it
- * to function with an arbitrary number of processors.
+ * Changes: Updated function to use a dynamically calculated size,
+ * instead of the old SIZE #define. This should allow it
+ * to function with an arbitrary number of processors.
*
- * JRM - 8/11/04
+ * JRM - 8/11/04
*/
void compact_dataset(void)
{
- int i, j, mpi_size, mpi_rank, size, err_num=0;
- hid_t iof, plist, dcpl, dxpl, dataset, filespace;
+ int i, j, mpi_size, mpi_rank, size, err_num=0;
+ hid_t iof, plist, dcpl, dxpl, dataset, filespace;
hsize_t file_dims [DIM];
- double * outme;
- double * inme;
- char dname[]="dataset";
- herr_t ret;
+ double *outme;
+ double *inme;
+ char dname[]="dataset";
+ herr_t ret;
const char *filename;
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif
size = get_size();
for(i = 0; i < DIM; i++ )
- file_dims[i] = size;
+ file_dims[i] = (hsize_t)size;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
- outme = HDmalloc((size_t)(size * size * sizeof(double)));
+ outme = HDmalloc((size_t)((size_t)size * (size_t)size * sizeof(double)));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
- inme = HDmalloc((size_t)(size * size * sizeof(double)));
+ inme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
VRFY((outme != NULL), "HDmalloc succeeded for inme");
filename = GetTestParameters();
@@ -277,15 +279,15 @@ void compact_dataset(void)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
/* Recalculate data to write. Each process writes the same data. */
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
- outme[(i * size) + j] =(i + j) * 1000;
+ outme[(i * size) + j] =(i + j) * 1000;
ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme);
VRFY((ret >= 0), "H5Dwrite succeeded");
@@ -307,23 +309,36 @@ void compact_dataset(void)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
-
dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
VRFY((dataset >= 0), "H5Dopen2 succeeded");
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0), "H5Pinsert2() succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme);
VRFY((ret >= 0), "H5Dread succeeded");
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "H5Pget succeeded");
+ VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),"rank 0 Bcast optimization was performed for a compact dataset");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
/* Verify data value */
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
- if(inme[(i * size) + j] != outme[(i * size) + j])
+ if(!H5_DBL_ABS_EQUAL(inme[(i * size) + j], outme[(i * size) + j]))
if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]);
+ HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]);
H5Pclose(plist);
H5Pclose(dxpl);
@@ -337,24 +352,24 @@ void compact_dataset(void)
* Example of using PHDF5 to create, write, and read dataset and attribute
* of Null dataspace.
*
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
+ * Changes: Removed the assert that mpi_size <= the SIZE #define.
+ * As best I can tell, this assert isn't needed here,
+ * and in any case, the SIZE #define is being removed
+ * in an update of the functions in this file to run
+ * with an arbitrary number of processes.
*
* JRM - 8/24/04
*/
void null_dataset(void)
{
- int mpi_size, mpi_rank;
- hid_t iof, plist, dxpl, dataset, attr, sid;
+ int mpi_size, mpi_rank;
+ hid_t iof, plist, dxpl, dataset, attr, sid;
unsigned uval=2; /* Buffer for writing to dataset */
- int val=1; /* Buffer for writing to attribute */
- int nelem;
- char dname[]="dataset";
- char attr_name[]="attribute";
- herr_t ret;
+ int val=1; /* Buffer for writing to attribute */
+ hssize_t nelem;
+ char dname[]="dataset";
+ char attr_name[]="attribute";
+ herr_t ret;
const char *filename;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -382,8 +397,8 @@ void null_dataset(void)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
@@ -416,8 +431,8 @@ void null_dataset(void)
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pcreate xfer succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((ret>= 0),"set independent IO collectively succeeded");
+ ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((ret>= 0),"set independent IO collectively succeeded");
}
@@ -449,11 +464,11 @@ void null_dataset(void)
* sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
* the boundary of interest.
*
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
+ * Changes: Removed the assert that mpi_size <= the SIZE #define.
+ * As best I can tell, this assert isn't needed here,
+ * and in any case, the SIZE #define is being removed
+ * in an update of the functions in this file to run
+ * with an arbitrary number of processes.
*
* JRM - 8/11/04
*/
@@ -577,13 +592,13 @@ void big_dataset(void)
* not have actual data written to the entire raw data area and relies on the
* default fill value of zeros to work correctly.
*
- * Changes: Removed the assert that mpi_size <= the SIZE #define.
- * As best I can tell, this assert isn't needed here,
- * and in any case, the SIZE #define is being removed
- * in an update of the functions in this file to run
- * with an arbitrary number of processes.
+ * Changes: Removed the assert that mpi_size <= the SIZE #define.
+ * As best I can tell, this assert isn't needed here,
+ * and in any case, the SIZE #define is being removed
+ * in an update of the functions in this file to run
+ * with an arbitrary number of processes.
*
- * Also added code to free dynamically allocated buffers.
+ * Also added code to free dynamically allocated buffers.
*
* JRM - 8/11/04
*/
@@ -603,10 +618,13 @@ void dataset_fillvalue(void)
hsize_t req_count[4] = {1, 6, 7, 8};
hsize_t dset_size; /* Dataset size */
int *rdata, *wdata; /* Buffers for data to read and write */
- int *twdata, *trdata; /* Temporary pointer into buffer */
- int acc, i, j, k, l; /* Local index variables */
+ int *twdata, *trdata; /* Temporary pointer into buffer */
+ int acc, i, ii, j, k, l; /* Local index variables */
herr_t ret; /* Generic return value */
const char *filename;
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -615,7 +633,7 @@ void dataset_fillvalue(void)
/* Set the dataset dimension to be one row more than number of processes */
/* and calculate the actual dataset size. */
- dset_dims[0]=mpi_size+1;
+ dset_dims[0]=(hsize_t)(mpi_size+1);
dset_size=dset_dims[0]*dset_dims[1]*dset_dims[2]*dset_dims[3];
/* Allocate space for the buffers */
@@ -645,27 +663,59 @@ void dataset_fillvalue(void)
/*
* Read dataset before any data is written.
*/
- /* set entire read buffer with the constant 2 */
- HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
- /* Independently read the entire dataset back */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
- VRFY((ret >= 0), "H5Dread succeeded");
- /* Verify all data read are the fill value 0 */
- trdata = rdata;
- err_num = 0;
- for(i = 0; i < (int)dset_dims[0]; i++)
+ /* Create DXPL for I/O */
+ dxpl = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl >= 0), "H5Pcreate succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL);
+ VRFY((ret >= 0),"testing property list inserted succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for(ii = 0; ii < 2; ii++) {
+
+ if(ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
+
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if(ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify all data read are the fill value 0 */
+ trdata = rdata;
+ err_num = 0;
+ for(i = 0; i < (int)dset_dims[0]; i++)
for(j = 0; j < (int)dset_dims[1]; j++)
- for(k = 0; k < (int)dset_dims[2]; k++)
- for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
- if(*trdata != 0)
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata);
- if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if(err_num){
- printf("%d errors found in check_value\n", err_num);
- nerrors++;
+ for(k = 0; k < (int)dset_dims[2]; k++)
+ for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
+ if(*trdata != 0)
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata);
+ if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if(err_num) {
+ HDprintf("%d errors found in check_value\n", err_num);
+ nerrors++;
+ }
}
/* Barrier to ensure all processes have completed the above test. */
@@ -675,16 +725,12 @@ void dataset_fillvalue(void)
* Each process writes 1 row of data. Thus last row is not written.
*/
/* Create hyperslabs in memory and file dataspaces */
- req_start[0]=mpi_rank;
+ req_start[0]=(hsize_t)mpi_rank;
ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
- /* Create DXPL for collective I/O */
- dxpl = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl >= 0), "H5Pcreate succeeded");
-
ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
@@ -711,35 +757,62 @@ void dataset_fillvalue(void)
/*
* Read dataset after partial write.
*/
- /* set entire read buffer with the constant 2 */
- HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
- /* Independently read the entire dataset back */
- ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
- VRFY((ret >= 0), "H5Dread succeeded");
- /* Verify correct data read */
- twdata=wdata;
- trdata=rdata;
- err_num=0;
- for(i=0; i<(int)dset_dims[0]; i++)
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), " H5Pset succeeded");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for(ii = 0; ii < 2; ii++) {
+
+ if(ii == 0)
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
+ else
+ ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
+ VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
+
+ /* set entire read buffer with the constant 2 */
+ HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
+
+ /* Read the entire dataset back */
+ ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
+ VRFY((ret >= 0), "H5Dread succeeded");
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ prop_value = FALSE;
+ ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
+ VRFY((ret >= 0), "testing property list get succeeded");
+ if(ii == 0)
+ VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
+ else
+ VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* Verify correct data read */
+ twdata=wdata;
+ trdata=rdata;
+ err_num=0;
+ for(i=0; i<(int)dset_dims[0]; i++)
for(j=0; j<(int)dset_dims[1]; j++)
- for(k=0; k<(int)dset_dims[2]; k++)
- for(l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++)
- if(i<mpi_size) {
- if(*twdata != *trdata )
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
- } /* end if */
- else {
- if(*trdata != 0)
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata);
- } /* end else */
- if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
- if(err_num){
- printf("%d errors found in check_value\n", err_num);
- nerrors++;
+ for(k=0; k<(int)dset_dims[2]; k++)
+ for(l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++)
+ if(i<mpi_size) {
+ if(*twdata != *trdata )
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
+ } /* end if */
+ else {
+ if(*trdata != 0)
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata);
+ } /* end else */
+ if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("[more errors ...]\n");
+ if(err_num){
+ HDprintf("%d errors found in check_value\n", err_num);
+ nerrors++;
+ }
}
/* Close all file objects */
@@ -767,6 +840,13 @@ void dataset_fillvalue(void)
HDfree(wdata);
}
+/* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
+void collective_group_write_independent_group_read(void)
+{
+ collective_group_write();
+ independent_group_read();
+}
+
/* Write multiple groups with a chunked dataset in each group collectively.
* These groups and datasets are for testing independent read later.
*
@@ -778,18 +858,18 @@ void dataset_fillvalue(void)
*/
void collective_group_write(void)
{
- int mpi_rank, mpi_size, size;
- int i, j, m;
- char gname[64], dname[32];
+ int mpi_rank, mpi_size, size;
+ int i, j, m;
+ char gname[64], dname[32];
hid_t fid, gid, did, plist, dcpl, memspace, filespace;
- DATATYPE * outme = NULL;
- hsize_t chunk_origin[DIM];
- hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
- hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
- herr_t ret1, ret2;
+ DATATYPE *outme = NULL;
+ hsize_t chunk_origin[DIM];
+ hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
+ hsize_t chunk_size[2]; /* Chunk dimensions - computed shortly */
+ herr_t ret1, ret2;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -803,7 +883,7 @@ void collective_group_write(void)
chunk_size[0] =(hsize_t)(size / 2);
chunk_size[1] =(hsize_t)(size / 2);
- outme = HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
@@ -834,11 +914,11 @@ void collective_group_write(void)
/* creates ngroups groups under the root group, writes chunked
* datasets in parallel. */
for(m = 0; m < ngroups; m++) {
- sprintf(gname, "group%d", m);
+ HDsprintf(gname, "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
- sprintf(dname, "dataset%d", m);
+ HDsprintf(dname, "dataset%d", m);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
VRFY((did > 0), dname);
@@ -846,17 +926,16 @@ void collective_group_write(void)
for(j = 0; j < size; j++)
outme[(i * size) + j] =(i + j) * 1000 + mpi_rank;
- H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
- outme);
+ H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
H5Dclose(did);
H5Gclose(gid);
#ifdef BARRIER_CHECKS
if(!((m+1) % 10)) {
- printf("created %d groups\n", m+1);
+ HDprintf("created %d groups\n", m+1);
MPI_Barrier(MPI_COMM_WORLD);
- }
+ }
#endif /* BARRIER_CHECKS */
}
@@ -876,8 +955,8 @@ void independent_group_read(void)
int mpi_rank, m;
hid_t plist, fid;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -911,9 +990,9 @@ void independent_group_read(void)
* instead of the old SIZE #define. This should allow it
* to function with an arbitrary number of processors.
*
- * Also added code to verify the results of dynamic memory
- * allocations, and to free dynamically allocated memeory
- * when we are done with it.
+ * Also added code to verify the results of dynamic memory
+ * allocations, and to free dynamically allocated memeory
+ * when we are done with it.
*
* JRM - 8/16/04
*/
@@ -928,19 +1007,19 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
size = get_size();
- indata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ indata =(DATATYPE*)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((indata != NULL), "HDmalloc succeeded for indata");
- outdata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ outdata =(DATATYPE*)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
/* open every group under root group. */
- sprintf(gname, "group%d", m);
+ HDsprintf(gname, "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* check the data. */
- sprintf(dname, "dataset%d", m);
+ HDsprintf(dname, "dataset%d", m);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did>0), dname);
@@ -997,16 +1076,16 @@ group_dataset_read(hid_t fid, int mpi_rank, int m)
*/
void multiple_group_write(void)
{
- int mpi_rank, mpi_size, size;
- int m;
- char gname[64];
- hid_t fid, gid, plist, memspace, filespace;
+ int mpi_rank, mpi_size, size;
+ int m;
+ char gname[64];
+ hid_t fid, gid, plist, memspace, filespace;
hsize_t chunk_origin[DIM];
hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
- herr_t ret;
+ herr_t ret;
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -1041,23 +1120,23 @@ void multiple_group_write(void)
/* creates ngroups groups under the root group, writes datasets in
* parallel. */
for(m = 0; m < ngroups; m++) {
- sprintf(gname, "group%d", m);
+ HDsprintf(gname, "group%d", m);
gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* create attribute for these groups. */
- write_attribute(gid, is_group, m);
+ write_attribute(gid, is_group, m);
if(m != 0)
- write_dataset(memspace, filespace, gid);
+ write_dataset(memspace, filespace, gid);
H5Gclose(gid);
#ifdef BARRIER_CHECKS
if(!((m+1) % 10)) {
- printf("created %d groups\n", m+1);
+ HDprintf("created %d groups\n", m+1);
MPI_Barrier(MPI_COMM_WORLD);
- }
+ }
#endif /* BARRIER_CHECKS */
}
@@ -1088,28 +1167,28 @@ void multiple_group_write(void)
static void
write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
- int i, j, n, size;
- int mpi_rank, mpi_size;
- char dname[32];
- DATATYPE * outme = NULL;
- hid_t did;
+ int i, j, n, size;
+ int mpi_rank, mpi_size;
+ char dname[32];
+ DATATYPE *outme = NULL;
+ hid_t did;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
size = get_size();
- outme = HDmalloc((size_t)(size * size * sizeof(double)));
+ outme = HDmalloc((size_t)size * (size_t)size * sizeof(double));
VRFY((outme != NULL), "HDmalloc succeeded for outme");
for(n = 0; n < NDATASET; n++) {
- sprintf(dname, "dataset%d", n);
+ HDsprintf(dname, "dataset%d", n);
did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((did > 0), dname);
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
- outme[(i * size) + j] = n * 1000 + mpi_rank;
+ outme[(i * size) + j] = n * 1000 + mpi_rank;
H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
@@ -1136,12 +1215,12 @@ create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
#ifdef BARRIER_CHECKS
if(!((counter+1) % 10)) {
- printf("created %dth child groups\n", counter+1);
+ HDprintf("created %dth child groups\n", counter+1);
MPI_Barrier(MPI_COMM_WORLD);
}
#endif /* BARRIER_CHECKS */
- sprintf(gname, "%dth_child_group", counter+1);
+ HDsprintf(gname, "%dth_child_group", counter+1);
child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((child_gid > 0), gname);
@@ -1173,8 +1252,8 @@ void multiple_group_read(void)
hsize_t chunk_origin[DIM];
hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
const H5Ptest_param_t *pt;
- char *filename;
- int ngroups;
+ char *filename;
+ int ngroups;
pt = GetTestParameters();
filename = pt->name;
@@ -1202,19 +1281,19 @@ void multiple_group_read(void)
/* open every group under root group. */
for(m=0; m<ngroups; m++) {
- sprintf(gname, "group%d", m);
+ HDsprintf(gname, "group%d", m);
gid = H5Gopen2(fid, gname, H5P_DEFAULT);
VRFY((gid > 0), gname);
/* check the data. */
if(m != 0)
if((error_num = read_dataset(memspace, filespace, gid))>0)
- nerrors += error_num;
+ nerrors += error_num;
/* check attribute.*/
error_num = 0;
if((error_num = read_attribute(gid, is_group, m))>0 )
- nerrors += error_num;
+ nerrors += error_num;
H5Gclose(gid);
@@ -1249,36 +1328,35 @@ void multiple_group_read(void)
static int
read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
{
- int i, j, n, mpi_rank, mpi_size, size, attr_errors=0, vrfy_errors=0;
- char dname[32];
+ int i, j, n, mpi_rank, mpi_size, size, attr_errors=0, vrfy_errors=0;
+ char dname[32];
DATATYPE *outdata = NULL, *indata = NULL;
- hid_t did;
+ hid_t did;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
size = get_size();
- indata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ indata =(DATATYPE*)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((indata != NULL), "HDmalloc succeeded for indata");
- outdata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
+ outdata =(DATATYPE*)HDmalloc((size_t)size * (size_t)size * sizeof(DATATYPE));
VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
for(n=0; n<NDATASET; n++) {
- sprintf(dname, "dataset%d", n);
+ HDsprintf(dname, "dataset%d", n);
did = H5Dopen2(gid, dname, H5P_DEFAULT);
VRFY((did>0), dname);
- H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT,
- indata);
+ H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata);
/* this is the original value */
for(i=0; i<size; i++)
- for(j=0; j<size; j++) {
- *outdata = n*1000 + mpi_rank;
+ for(j=0; j<size; j++) {
+ *outdata = n*1000 + mpi_rank;
outdata++;
- }
+ }
outdata -= size * size;
/* compare the original value(outdata) to the value in file(indata).*/
@@ -1318,7 +1396,7 @@ recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
nerrors += err_num;
if(counter < GROUP_DEPTH ) {
- sprintf(gname, "%dth_child_group", counter+1);
+ HDsprintf(gname, "%dth_child_group", counter+1);
child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
VRFY((child_gid>0), gname);
recursive_read_group(memspace, filespace, child_gid, counter+1);
@@ -1340,7 +1418,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if(this_type == is_group) {
- sprintf(attr_name, "Group Attribute %d", num);
+ HDsprintf(attr_name, "Group Attribute %d", num);
sid = H5Screate(H5S_SCALAR);
aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
H5Awrite(aid, H5T_NATIVE_INT, &num);
@@ -1348,7 +1426,7 @@ write_attribute(hid_t obj_id, int this_type, int num)
H5Sclose(sid);
} /* end if */
else if(this_type == is_dset) {
- sprintf(attr_name, "Dataset Attribute %d", num);
+ HDsprintf(attr_name, "Dataset Attribute %d", num);
for(i=0; i<8; i++)
attr_data[i] = i;
sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
@@ -1372,23 +1450,23 @@ read_attribute(hid_t obj_id, int this_type, int num)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if(this_type == is_group) {
- sprintf(attr_name, "Group Attribute %d", num);
+ HDsprintf(attr_name, "Group Attribute %d", num);
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
if(MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, &in_num);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
- }
+ }
H5Aclose(aid);
}
else if(this_type == is_dset) {
- sprintf(attr_name, "Dataset Attribute %d", num);
+ HDsprintf(attr_name, "Dataset Attribute %d", num);
for(i=0; i<8; i++)
out_data[i] = i;
aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
if(MAINPROCESS) {
H5Aread(aid, H5T_NATIVE_INT, in_data);
vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
- }
+ }
H5Aclose(aid);
}
@@ -1398,18 +1476,18 @@ read_attribute(hid_t obj_id, int this_type, int num)
/* This functions compares the original data with the read-in data for its
* hyperslab part only by process ID.
*
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
+ * Changes: Modified function to use a passed in size parameter
+ * instead of the old SIZE #define. This should let us
+ * run with an arbitrary number of processes.
*
- * JRM - 8/16/04
+ * JRM - 8/16/04
*/
static int
check_value(DATATYPE *indata, DATATYPE *outdata, int size)
{
- int mpi_rank, mpi_size, err_num=0;
- hsize_t i, j;
- hsize_t chunk_origin[DIM];
+ int mpi_rank, mpi_size, err_num=0;
+ hsize_t i, j;
+ hsize_t chunk_origin[DIM];
hsize_t chunk_dims[DIM], count[DIM];
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -1417,28 +1495,28 @@ check_value(DATATYPE *indata, DATATYPE *outdata, int size)
get_slab(chunk_origin, chunk_dims, count, NULL, size);
- indata += chunk_origin[0]*size;
- outdata += chunk_origin[0]*size;
+ indata += chunk_origin[0]*(hsize_t)size;
+ outdata += chunk_origin[0]*(hsize_t)size;
for(i=chunk_origin[0]; i<(chunk_origin[0]+chunk_dims[0]); i++)
- for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) {
- if(*indata != *outdata )
- if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata);
- }
+ for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) {
+ if(*indata != *outdata )
+ if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata);
+ }
if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("[more errors ...]\n");
+ HDprintf("[more errors ...]\n");
if(err_num)
- printf("%d errors found in check_value\n", err_num);
+ HDprintf("%d errors found in check_value\n", err_num);
return err_num;
}
/* Decide the portion of data chunk in dataset by process ID.
*
- * Changes: Modified function to use a passed in size parameter
- * instead of the old SIZE #define. This should let us
- * run with an arbitrary number of processes.
+ * Changes: Modified function to use a passed in size parameter
+ * instead of the old SIZE #define. This should let us
+ * run with an arbitrary number of processes.
*
- * JRM - 8/11/04
+ * JRM - 8/11/04
*/
static void
@@ -1451,15 +1529,15 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[],
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
if(chunk_origin != NULL) {
- chunk_origin[0] = mpi_rank *(size/mpi_size);
+ chunk_origin[0] = (hsize_t)mpi_rank * (hsize_t)(size/mpi_size);
chunk_origin[1] = 0;
}
if(chunk_dims != NULL) {
- chunk_dims[0] = size/mpi_size;
- chunk_dims[1] = size;
+ chunk_dims[0] = (hsize_t)(size/mpi_size);
+ chunk_dims[1] = (hsize_t)size;
}
if(file_dims != NULL)
- file_dims[0] = file_dims[1] = size;
+ file_dims[0] = file_dims[1] = (hsize_t)size;
if(count != NULL)
count[0] = count[1] = 1;
}
@@ -1482,7 +1560,7 @@ get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[],
* on failure.
* JRM - 9/13/04
*
- * Changes: None.
+ * Changes: None.
*/
#define N 4
@@ -1517,10 +1595,10 @@ void io_mode_confusion(void)
* test bed related variables
*/
- const char * fcn_name = "io_mode_confusion";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
+ const char * fcn_name = "io_mode_confusion";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t * pt;
+ char * filename;
pt = GetTestParameters();
@@ -1666,8 +1744,8 @@ void io_mode_confusion(void)
status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
VRFY((status >= 0 ), "H5Pset_dxpl_mpio() failed");
if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
- status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
- VRFY((status>= 0),"set independent IO collectively succeeded");
+ status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
+ VRFY((status>= 0),"set independent IO collectively succeeded");
}
@@ -1721,13 +1799,13 @@ void io_mode_confusion(void)
/*
* At present, the object header code maintains an image of its on disk
* representation, which is updates as necessary instead of generating on
- * request.
+ * request.
*
* Prior to the fix that this test in designed to verify, the image of the
* on disk representation was only updated on flush -- not when the object
* header was marked clean.
*
- * This worked perfectly well as long as all writes of a given object
+ * This worked perfectly well as long as all writes of a given object
* header were written from a single process. However, with the implementation
* of round robin metadata data writes in parallel HDF5, this is no longer
* the case -- it is possible for a given object header to be flushed from
@@ -1735,14 +1813,14 @@ void io_mode_confusion(void)
* clean in all other processes on each flush. This resulted in NULL or
* out of data object header information being written to disk.
*
- * To repair this, I modified the object header code to update its
- * on disk image both on flush on when marked clean.
+ * To repair this, I modified the object header code to update its
+ * on disk image both on flush on when marked clean.
*
* This test is directed at verifying that the fix performs as expected.
*
* The test functions by creating a HDF5 file with several small datasets,
- * and then flushing the file. This should result of at least one of
- * the associated object headers being flushed by a process other than
+ * and then flushing the file. This should result of at least one of
+ * the associated object headers being flushed by a process other than
* process 0.
*
* Then for each data set, add an attribute and flush the file again.
@@ -1752,26 +1830,26 @@ void io_mode_confusion(void)
* Open the each of the data sets in turn. If all opens are successful,
* the test passes. Otherwise the test fails.
*
- * Note that this test will probably become irrelevent shortly, when we
+ * Note that this test will probably become irrelevent shortly, when we
* land the journaling modifications on the trunk -- at which point all
* cache clients will have to construct on disk images on demand.
*
- * JRM -- 10/13/10
+ * JRM -- 10/13/10
*
* Changes:
- * Break it into two parts, a writer to write the file and a reader
- * the correctness of the writer. AKC -- 2010/10/27
+ * Break it into two parts, a writer to write the file and a reader
+ * the correctness of the writer. AKC -- 2010/10/27
*/
-#define NUM_DATA_SETS 4
-#define LOCAL_DATA_SIZE 4
-#define LARGE_ATTR_SIZE 256
+#define NUM_DATA_SETS 4
+#define LOCAL_DATA_SIZE 4
+#define LARGE_ATTR_SIZE 256
/* Since all even and odd processes are split into writer and reader comm
* respectively, process 0 and 1 in COMM_WORLD become the root process of
* the writer and reader comm respectively.
*/
-#define Writer_Root 0
-#define Reader_Root 1
+#define Writer_Root 0
+#define Reader_Root 1
#define Reader_wait(mpi_err, xsteps) \
mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD)
#define Reader_result(mpi_err, xsteps_done) \
@@ -1783,26 +1861,26 @@ void io_mode_confusion(void)
/* object names used by both rr_obj_hdr_flush_confusion and
* rr_obj_hdr_flush_confusion_reader.
*/
-const char * dataset_name[NUM_DATA_SETS] =
- {
- "dataset_0",
- "dataset_1",
- "dataset_2",
- "dataset_3"
+const char * dataset_name[NUM_DATA_SETS] =
+ {
+ "dataset_0",
+ "dataset_1",
+ "dataset_2",
+ "dataset_3"
};
-const char * att_name[NUM_DATA_SETS] =
- {
- "attribute_0",
- "attribute_1",
- "attribute_2",
- "attribute_3"
+const char * att_name[NUM_DATA_SETS] =
+ {
+ "attribute_0",
+ "attribute_1",
+ "attribute_2",
+ "attribute_3"
};
-const char * lg_att_name[NUM_DATA_SETS] =
- {
- "large_attribute_0",
- "large_attribute_1",
- "large_attribute_2",
- "large_attribute_3"
+const char * lg_att_name[NUM_DATA_SETS] =
+ {
+ "large_attribute_0",
+ "large_attribute_1",
+ "large_attribute_2",
+ "large_attribute_3"
};
void rr_obj_hdr_flush_confusion(void)
@@ -1811,14 +1889,14 @@ void rr_obj_hdr_flush_confusion(void)
/* private communicator size and rank */
int mpi_size;
int mpi_rank;
- int mrc; /* mpi error code */
- int is_reader; /* 1 for reader process; 0 for writer process. */
+ int mrc; /* mpi error code */
+ int is_reader; /* 1 for reader process; 0 for writer process. */
MPI_Comm comm;
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion";
- const hbool_t verbose = FALSE;
+ const char * fcn_name = "rr_obj_hdr_flush_confusion";
+ const hbool_t verbose = FALSE;
/* Create two new private communicators from MPI_COMM_WORLD.
* Even and odd ranked processes go to comm_writers and comm_readers
@@ -1841,9 +1919,9 @@ void rr_obj_hdr_flush_confusion(void)
* step. When all steps are done, they inform readers to end.
*/
if (is_reader)
- rr_obj_hdr_flush_confusion_reader(comm);
+ rr_obj_hdr_flush_confusion_reader(comm);
else
- rr_obj_hdr_flush_confusion_writer(comm);
+ rr_obj_hdr_flush_confusion_writer(comm);
MPI_Comm_free(&comm);
if(verbose )
@@ -1887,16 +1965,16 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* private communicator size and rank */
int mpi_size;
int mpi_rank;
- int mrc; /* mpi error code */
+ int mrc; /* mpi error code */
/* steps to verify and have been verified */
int steps = 0;
int steps_done = 0;
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion_writer";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_writer";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t *pt;
+ char *filename;
/*
* setup test bed related variables:
@@ -1930,7 +2008,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n",
+ HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n",
mpi_rank, fcn_name, filename);
file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
@@ -1945,7 +2023,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: Creating the datasets.\n",
+ HDfprintf(stdout, "%0d:%s: Creating the datasets.\n",
mpi_rank, fcn_name);
disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size);
@@ -1954,15 +2032,15 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
disk_space[i] = H5Screate_simple(1, disk_size, NULL);
- VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
+ VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
- dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE,
+ dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE,
disk_space[i], H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n");
}
- /*
+ /*
* setup data transfer property list
*/
@@ -1973,11 +2051,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0),
+ VRFY((err >= 0),
"H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
- /*
- * write data to the data sets
+ /*
+ * write data to the data sets
*/
if(verbose )
@@ -1993,22 +2071,22 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
}
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
- NULL, disk_count, NULL);
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
+ NULL, disk_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
mem_space[i] = H5Screate_simple(1, mem_size, NULL);
- VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
- mem_start, NULL, mem_count, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
+ mem_start, NULL, mem_count, NULL);
VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
- disk_space[i], dxpl_id, data);
+ err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
+ disk_space[i], dxpl_id, data);
VRFY((err >= 0), "H5Dwrite(1) failed.\n");
for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- data[j] *= 10.0;
+ data[j] *= 10.0;
}
- /*
+ /*
* close the data spaces
*/
@@ -2024,12 +2102,12 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* End of Step 1: create the data sets and write data. */
- /*
+ /*
* flush the metadata cache
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(1) failed.\n");
@@ -2053,7 +2131,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
att_space[i] = H5Screate_simple(1, att_size, NULL);
VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n");
- att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE,
+ att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE,
att_space[i], H5P_DEFAULT, H5P_DEFAULT);
VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n");
err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att);
@@ -2064,15 +2142,14 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
}
/*
- * close attribute IDs and spaces
+ * close attribute IDs and spaces
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n",
+ HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n",
mpi_rank, fcn_name);
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
-
err = H5Sclose(att_space[i]);
VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n");
err = H5Aclose(att_id[i]);
@@ -2081,12 +2158,12 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* End of Step 2: write attributes to each dataset */
- /*
+ /*
* flush the metadata cache again
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(2) failed.\n");
@@ -2100,7 +2177,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: writing large attributes.\n",
+ HDfprintf(stdout, "%0d:%s: writing large attributes.\n",
mpi_rank, fcn_name);
lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE);
@@ -2112,7 +2189,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL);
VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n");
- lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE,
+ lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE,
lg_att_space[i], H5P_DEFAULT, H5P_DEFAULT);
VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n");
err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
@@ -2121,21 +2198,21 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
lg_att[j] /= 10.0;
}
}
-
+
/* Step 3: write large attributes to each dataset */
- /*
+ /*
* flush the metadata cache yet again to clean the object headers.
*
* This is an attempt to crate a situation where we have dirty
* object header continuation chunks, but clean opject headers
* to verify a speculative bug fix -- it doesn't seem to work,
- * but I will leave the code in anyway, as the object header
+ * but I will leave the code in anyway, as the object header
* code is going to change a lot in the near future.
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(3) failed.\n");
@@ -2149,7 +2226,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: writing different large attributes.\n",
+ HDfprintf(stdout, "%0d:%s: writing different large attributes.\n",
mpi_rank, fcn_name);
for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
@@ -2166,11 +2243,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* End of Step 4: write different large attributes to each dataset */
- /*
+ /*
* flush the metadata cache again
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
+ HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
mpi_rank, fcn_name);
err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
VRFY((err >= 0), "H5Fflush(3) failed.\n");
@@ -2182,11 +2259,11 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
/* Step 5: Close all objects and the file */
/*
- * close large attribute IDs and spaces
+ * close large attribute IDs and spaces
*/
if(verbose )
- HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n",
+ HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n",
mpi_rank, fcn_name);
for ( i = 0; i < NUM_DATA_SETS; i++ ) {
@@ -2198,7 +2275,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
}
- /*
+ /*
* close the data sets
*/
@@ -2230,7 +2307,7 @@ void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
err = H5Fclose(file_id);
VRFY((err >= 0 ), "H5Fclose(1) failed");
-
+
/* End of Step 5: Close all objects and the file */
/* Tell the reader to check the file up to steps. */
steps++;
@@ -2276,20 +2353,20 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* MPI variables */
/* world communication size and rank */
- int mpi_world_size;
- int mpi_world_rank;
+ int mpi_world_size;
+ int mpi_world_rank;
/* private communicator size and rank */
- int mpi_size;
- int mpi_rank;
- int mrc; /* mpi error code */
- int steps = -1; /* How far (steps) to verify the file */
- int steps_done = -1; /* How far (steps) have been verified */
+ int mpi_size;
+ int mpi_rank;
+ int mrc; /* mpi error code */
+ int steps = -1; /* How far (steps) to verify the file */
+ int steps_done = -1; /* How far (steps) have been verified */
/* test bed related variables */
- const char * fcn_name = "rr_obj_hdr_flush_confusion_reader";
- const hbool_t verbose = FALSE;
- const H5Ptest_param_t * pt;
- char * filename;
+ const char *fcn_name = "rr_obj_hdr_flush_confusion_reader";
+ const hbool_t verbose = FALSE;
+ const H5Ptest_param_t *pt;
+ char *filename;
/*
* setup test bed related variables:
@@ -2306,291 +2383,290 @@ void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
/* Repeatedly re-open the file and verify its contents until it is */
/* told to end (when steps=0). */
while (steps_done != 0){
- Reader_wait(mrc, steps);
- VRFY((mrc >= 0), "Reader_wait failed");
- steps_done = 0;
+ Reader_wait(mrc, steps);
+ VRFY((mrc >= 0), "Reader_wait failed");
+ steps_done = 0;
- if (steps > 0 ){
- /*
- * Set up file access property list with parallel I/O access
- */
+ if (steps > 0 ){
+ /*
+ * Set up file access property list with parallel I/O access
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
- mpi_rank, fcn_name);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
+ mpi_rank, fcn_name);
- fapl_id = H5Pcreate(H5P_FILE_ACCESS);
- VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
- err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
- VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+ VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
+ err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
+ VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
- /*
- * Create a new file collectively and release property list identifier.
- */
+ /*
+ * Create a new file collectively and release property list identifier.
+ */
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n",
- mpi_rank, fcn_name, filename);
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n",
+ mpi_rank, fcn_name, filename);
- file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
- VRFY((file_id >= 0 ), "H5Fopen() failed");
- err = H5Pclose(fapl_id);
- VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
+ file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
+ VRFY((file_id >= 0 ), "H5Fopen() failed");
+ err = H5Pclose(fapl_id);
+ VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
#if 1
- if (steps >= 1){
- /*=====================================================*
- * Step 1: open the data sets and read data.
- *=====================================================*/
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: opening the datasets.\n",
- mpi_rank, fcn_name);
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- dataset[i] = -1;
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
- VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
- disk_space[i] = H5Dget_space(dataset[i]);
- VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
- }
-
- /*
- * setup data transfer property list
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
-
- dxpl_id = H5Pcreate(H5P_DATASET_XFER);
- VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
- err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
- VRFY((err >= 0),
- "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
-
- /*
- * read data from the data sets
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
-
- disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
-
- mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
-
- mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
- mem_start[0] = (hsize_t)(0);
-
- /* set up expected data for verification */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
- data[j] = (double)(mpi_rank + 1);
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
- NULL, disk_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
- mem_space[i] = H5Screate_simple(1, mem_size, NULL);
- VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
- err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
- mem_start, NULL, mem_count, NULL);
- VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
- err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
- disk_space[i], dxpl_id, data_read);
- VRFY((err >= 0), "H5Dread(1) failed.\n");
-
- /* compare read data with expected data */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- if (data_read[j] != data[j]){
- HDfprintf(stdout,
- "%0d:%s: Reading datasets value failed in "
- "Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, data[j], data_read[j]);
- nerrors++;
- }
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- data[j] *= 10.0;
- }
-
- /*
- * close the data spaces
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- err = H5Sclose(disk_space[i]);
- VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
- err = H5Sclose(mem_space[i]);
- VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
- }
- steps_done++;
- }
- /* End of Step 1: open the data sets and read data. */
+ if (steps >= 1){
+ /*=====================================================*
+ * Step 1: open the data sets and read data.
+ *=====================================================*/
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: opening the datasets.\n",
+ mpi_rank, fcn_name);
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ dataset[i] = -1;
+ }
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
+ VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
+ disk_space[i] = H5Dget_space(dataset[i]);
+ VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
+ }
+
+ /*
+ * setup data transfer property list
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+ VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
+ err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
+ VRFY((err >= 0),
+ "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
+
+ /*
+ * read data from the data sets
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
+
+ disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
+
+ mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
+
+ mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
+ mem_start[0] = (hsize_t)(0);
+
+ /* set up expected data for verification */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ data[j] = (double)(mpi_rank + 1);
+ }
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
+ NULL, disk_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
+ mem_space[i] = H5Screate_simple(1, mem_size, NULL);
+ VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
+ err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
+ mem_start, NULL, mem_count, NULL);
+ VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
+ err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
+ disk_space[i], dxpl_id, data_read);
+ VRFY((err >= 0), "H5Dread(1) failed.\n");
+
+ /* compare read data with expected data */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ if (!H5_DBL_ABS_EQUAL(data_read[j], data[j])){
+ HDfprintf(stdout,
+ "%0d:%s: Reading datasets value failed in "
+ "Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, data[j], data_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ data[j] *= 10.0;
+ }
+
+ /*
+ * close the data spaces
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ err = H5Sclose(disk_space[i]);
+ VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
+ err = H5Sclose(mem_space[i]);
+ VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 1: open the data sets and read data. */
#endif
#if 1
- /*=====================================================*
- * Step 2: reading attributes from each dataset
- *=====================================================*/
-
- if (steps >= 2){
- if(verbose )
- HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
-
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
-
- att[j] = (double)(j + 1);
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- hid_t att_id, att_type;
-
- att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
- VRFY((att_id >= 0), "H5Aopen failed.\n");
- att_type = H5Aget_type(att_id);
- VRFY((att_type >= 0), "H5Aget_type failed.\n");
- tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
- VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err==0){
- HDfprintf(stdout,
- "%0d:%s: Mismatched Attribute type of Dataset %d.\n",
- mpi_rank, fcn_name, i);
- nerrors++;
- }else{
- /* should verify attribute size before H5Aread */
- err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
- VRFY((err >= 0), "H5Aread failed.\n");
- /* compare read attribute data with expected data */
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
- if (att_read[j] != att[j]){
- HDfprintf(stdout,
- "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, att[j], att_read[j]);
- nerrors++;
- }
- for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
-
- att[j] /= 10.0;
- }
- }
- err = H5Aclose(att_id);
- VRFY((err >= 0), "H5Aclose failed.\n");
- }
- steps_done++;
- }
- /* End of Step 2: reading attributes from each dataset */
+ /*=====================================================*
+ * Step 2: reading attributes from each dataset
+ *=====================================================*/
+
+ if (steps >= 2){
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
+
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ att[j] = (double)(j + 1);
+ }
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ hid_t att_id, att_type;
+
+ att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
+ VRFY((att_id >= 0), "H5Aopen failed.\n");
+ att_type = H5Aget_type(att_id);
+ VRFY((att_type >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err==0){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched Attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
+ nerrors++;
+ }
+ else {
+ /* should verify attribute size before H5Aread */
+ err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
+ if (!H5_DBL_ABS_EQUAL(att_read[j], att[j])){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, att[j], att_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
+ att[j] /= 10.0;
+ }
+ }
+ err = H5Aclose(att_id);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ steps_done++;
+ }
+ /* End of Step 2: reading attributes from each dataset */
#endif
#if 1
- /*=====================================================*
- * Step 3 or 4: read large attributes from each dataset.
- * Step 4 has different attribute value from step 3.
- *=====================================================*/
-
- if (steps >= 3){
- if(verbose )
- HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
-
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
-
- lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2);
- }
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
- VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
- lg_att_type[i] = H5Aget_type(lg_att_id[i]);
- VRFY((err >= 0), "H5Aget_type failed.\n");
- tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
- VRFY((tri_err >= 0), "H5Tequal failed.\n");
- if (tri_err==0){
- HDfprintf(stdout,
- "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
- mpi_rank, fcn_name, i);
- nerrors++;
- }else{
- /* should verify large attribute size before H5Aread */
- err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
- VRFY((err >= 0), "H5Aread failed.\n");
- /* compare read attribute data with expected data */
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ )
- if (lg_att_read[j] != lg_att[j]){
- HDfprintf(stdout,
- "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
- mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
- nerrors++;
- }
- for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
-
- lg_att[j] /= 10.0;
- }
- }
- err = H5Tclose(lg_att_type[i]);
- VRFY((err >= 0), "H5Tclose failed.\n");
- err = H5Aclose(lg_att_id[i]);
- VRFY((err >= 0), "H5Aclose failed.\n");
- }
- /* Both step 3 and 4 use this same read checking code. */
- steps_done = (steps==3) ? 3 : 4;
- }
-
- /* End of Step 3 or 4: read large attributes from each dataset */
+ /*=====================================================*
+ * Step 3 or 4: read large attributes from each dataset.
+ * Step 4 has different attribute value from step 3.
+ *=====================================================*/
+
+ if (steps >= 3){
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
+
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+ lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2);
+ }
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
+ VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
+ lg_att_type[i] = H5Aget_type(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aget_type failed.\n");
+ tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
+ VRFY((tri_err >= 0), "H5Tequal failed.\n");
+ if (tri_err==0){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
+ mpi_rank, fcn_name, i);
+ nerrors++;
+ }
+ else{
+ /* should verify large attribute size before H5Aread */
+ err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
+ VRFY((err >= 0), "H5Aread failed.\n");
+ /* compare read attribute data with expected data */
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ )
+ if (!H5_DBL_ABS_EQUAL(lg_att_read[j], lg_att[j])){
+ HDfprintf(stdout,
+ "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
+ mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
+ nerrors++;
+ }
+ for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
+
+ lg_att[j] /= 10.0;
+ }
+ }
+ err = H5Tclose(lg_att_type[i]);
+ VRFY((err >= 0), "H5Tclose failed.\n");
+ err = H5Aclose(lg_att_id[i]);
+ VRFY((err >= 0), "H5Aclose failed.\n");
+ }
+ /* Both step 3 and 4 use this same read checking code. */
+ steps_done = (steps==3) ? 3 : 4;
+ }
+
+ /* End of Step 3 or 4: read large attributes from each dataset */
#endif
- /*=====================================================*
- * Step 5: read all objects from the file
- *=====================================================*/
- if (steps>=5){
- /* nothing extra to verify. The file is closed normally. */
- /* Just increment steps_done */
- steps_done++;
- }
-
- /*
- * Close the data sets
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing datasets again.\n",
- mpi_rank, fcn_name);
-
- for ( i = 0; i < NUM_DATA_SETS; i++ ) {
- if ( dataset[i] >= 0 ) {
- err = H5Dclose(dataset[i]);
- VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
- }
- }
-
- /*
- * close the data transfer property list.
- */
-
- if(verbose )
- HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
-
- err = H5Pclose(dxpl_id);
- VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
-
- /*
- * Close the file
- */
- if(verbose)
- HDfprintf(stdout, "%0d:%s: closing file again.\n",
- mpi_rank, fcn_name);
- err = H5Fclose(file_id);
- VRFY((err >= 0 ), "H5Fclose(1) failed");
-
- } /* else if (steps_done==0) */
- Reader_result(mrc, steps_done);
+ /*=====================================================*
+ * Step 5: read all objects from the file
+ *=====================================================*/
+ if (steps>=5){
+ /* nothing extra to verify. The file is closed normally. */
+ /* Just increment steps_done */
+ steps_done++;
+ }
+
+ /*
+ * Close the data sets
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing datasets again.\n",
+ mpi_rank, fcn_name);
+
+ for ( i = 0; i < NUM_DATA_SETS; i++ ) {
+ if ( dataset[i] >= 0 ) {
+ err = H5Dclose(dataset[i]);
+ VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
+ }
+ }
+
+ /*
+ * close the data transfer property list.
+ */
+
+ if(verbose )
+ HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
+
+ err = H5Pclose(dxpl_id);
+ VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
+
+ /*
+ * Close the file
+ */
+ if(verbose)
+ HDfprintf(stdout, "%0d:%s: closing file again.\n",
+ mpi_rank, fcn_name);
+ err = H5Fclose(file_id);
+ VRFY((err >= 0 ), "H5Fclose(1) failed");
+
+ } /* else if (steps_done==0) */
+ Reader_result(mrc, steps_done);
} /* end while(1) */
if(verbose )
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index 3d501c9..0719ca6 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -28,149 +28,143 @@
#include "testpar.h"
/* FILENAME and filenames must have the same number of names */
-const char *FILENAME[2]={
- "MPItest",
- NULL};
-char filenames[2][200];
-int nerrors = 0;
-hid_t fapl; /* file access property list */
+const char *FILENAME[2] = { "MPItest", NULL };
+char filenames[2][200];
+int nerrors = 0;
+hid_t fapl; /* file access property list */
/* protocols */
static int errors_sum(int nerrs);
#define MPIO_TEST_WRITE_SIZE 1024*1024 /* 1 MB */
-static int
-test_mpio_overlap_writes(char *filename)
-{
+static int test_mpio_overlap_writes(char *filename) {
int mpi_size, mpi_rank;
MPI_Comm comm;
MPI_Info info = MPI_INFO_NULL;
int color, mrc;
- MPI_File fh;
+ MPI_File fh;
int i;
int vrfyerrs, nerrs;
- unsigned char buf[4093]; /* use some prime number for size */
+ unsigned char buf[4093]; /* use some prime number for size */
int bufsize = sizeof(buf);
- MPI_Offset stride;
- MPI_Offset mpi_off;
- MPI_Status mpi_stat;
-
+ MPI_Offset stride;
+ MPI_Offset mpi_off;
+ MPI_Status mpi_stat;
if (VERBOSE_MED)
- printf("MPIO independent overlapping writes test on file %s\n",
- filename);
+ HDprintf("MPIO independent overlapping writes test on file %s\n",
+ filename);
nerrs = 0;
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* Need at least 2 processes */
if (mpi_size < 2) {
- if (MAINPROCESS)
- printf("Need at least 2 processes to run MPIO test.\n");
- printf(" -SKIP- \n");
- return 0;
+ if (MAINPROCESS)
+ HDprintf("Need at least 2 processes to run MPIO test.\n");
+ HDprintf(" -SKIP- \n");
+ return 0;
}
/* splits processes 0 to n-2 into one comm. and the last one into another */
color = ((mpi_rank < (mpi_size - 1)) ? 0 : 1);
- mrc = MPI_Comm_split (MPI_COMM_WORLD, color, mpi_rank, &comm);
- VRFY((mrc==MPI_SUCCESS), "Comm_split succeeded");
-
- if (color==0){
- /* First n-1 processes (color==0) open a file and write it */
- mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
- info, &fh);
- VRFY((mrc==MPI_SUCCESS), "");
-
- stride = 1;
- mpi_off = mpi_rank*stride;
- while (mpi_off < MPIO_TEST_WRITE_SIZE){
- /* make sure the write does not exceed the TEST_WRITE_SIZE */
- if (mpi_off+stride > MPIO_TEST_WRITE_SIZE)
- stride = MPIO_TEST_WRITE_SIZE - mpi_off;
-
- /* set data to some trivial pattern for easy verification */
- for (i=0; i<stride; i++)
- buf[i] = (unsigned char)(mpi_off+i);
- mrc = MPI_File_write_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
- &mpi_stat);
- VRFY((mrc==MPI_SUCCESS), "");
-
- /* move the offset pointer to last byte written by all processes */
- mpi_off += (mpi_size - 1 - mpi_rank) * stride;
-
- /* Increase chunk size without exceeding buffer size. */
- /* Then move the starting offset for next write. */
- stride *= 2;
- if (stride > bufsize)
- stride = bufsize;
- mpi_off += mpi_rank*stride;
- }
-
- /* close file and free the communicator */
- mrc = MPI_File_close(&fh);
- VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
- mrc = MPI_Comm_free(&comm);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
-
- /* sync with the other waiting processes */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync after writes");
- }else{
- /* last process waits till writes are done,
- * then opens file to verify data.
- */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync after writes");
-
- mrc = MPI_File_open(comm, filename, MPI_MODE_RDONLY,
- info, &fh);
- VRFY((mrc==MPI_SUCCESS), "");
-
- stride = bufsize;
- for (mpi_off=0; mpi_off < MPIO_TEST_WRITE_SIZE; mpi_off += bufsize){
- /* make sure it does not read beyond end of data */
- if (mpi_off+stride > MPIO_TEST_WRITE_SIZE)
- stride = MPIO_TEST_WRITE_SIZE - mpi_off;
- mrc = MPI_File_read_at(fh, mpi_off, buf, (int)stride, MPI_BYTE,
- &mpi_stat);
- VRFY((mrc==MPI_SUCCESS), "");
- vrfyerrs=0;
- for (i=0; i<stride; i++){
- unsigned char expected;
- expected = (unsigned char)(mpi_off+i);
- if ((expected != buf[i]) &&
- (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
- printf("proc %d: found data error at [%ld], expect %u, got %u\n",
- mpi_rank, (long)(mpi_off+i), expected, buf[i]);
- }
- }
- if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("proc %d: [more errors ...]\n", mpi_rank);
-
- nerrs += vrfyerrs;
- }
-
- /* close file and free the communicator */
- mrc = MPI_File_close(&fh);
- VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
- mrc = MPI_Comm_free(&comm);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
+ mrc = MPI_Comm_split(MPI_COMM_WORLD, color, mpi_rank, &comm);
+ VRFY((mrc == MPI_SUCCESS), "Comm_split succeeded");
+
+ if (color == 0) {
+ /* First n-1 processes (color==0) open a file and write it */
+ mrc = MPI_File_open(comm, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
+ info, &fh);
+ VRFY((mrc == MPI_SUCCESS), "");
+
+ stride = 1;
+ mpi_off = mpi_rank * stride;
+ while (mpi_off < MPIO_TEST_WRITE_SIZE) {
+ /* make sure the write does not exceed the TEST_WRITE_SIZE */
+ if (mpi_off + stride > MPIO_TEST_WRITE_SIZE)
+ stride = MPIO_TEST_WRITE_SIZE - mpi_off;
+
+ /* set data to some trivial pattern for easy verification */
+ for (i = 0; i < stride; i++)
+ buf[i] = (unsigned char) (mpi_off + i);
+ mrc = MPI_File_write_at(fh, mpi_off, buf, (int) stride, MPI_BYTE,
+ &mpi_stat);
+ VRFY((mrc == MPI_SUCCESS), "");
+
+ /* move the offset pointer to last byte written by all processes */
+ mpi_off += (mpi_size - 1 - mpi_rank) * stride;
+
+ /* Increase chunk size without exceeding buffer size. */
+ /* Then move the starting offset for next write. */
+ stride *= 2;
+ if (stride > bufsize)
+ stride = bufsize;
+ mpi_off += mpi_rank * stride;
+ }
+
+ /* close file and free the communicator */
+ mrc = MPI_File_close(&fh);
+ VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE");
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+
+ /* sync with the other waiting processes */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after writes");
+ } else {
+ /* last process waits till writes are done,
+ * then opens file to verify data.
+ */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after writes");
+
+ mrc = MPI_File_open(comm, filename, MPI_MODE_RDONLY, info, &fh);
+ VRFY((mrc == MPI_SUCCESS), "");
+
+ stride = bufsize;
+ for (mpi_off = 0; mpi_off < MPIO_TEST_WRITE_SIZE; mpi_off += bufsize) {
+ /* make sure it does not read beyond end of data */
+ if (mpi_off + stride > MPIO_TEST_WRITE_SIZE)
+ stride = MPIO_TEST_WRITE_SIZE - mpi_off;
+ mrc = MPI_File_read_at(fh, mpi_off, buf, (int) stride, MPI_BYTE,
+ &mpi_stat);
+ VRFY((mrc == MPI_SUCCESS), "");
+ vrfyerrs = 0;
+ for (i = 0; i < stride; i++) {
+ unsigned char expected;
+ expected = (unsigned char) (mpi_off + i);
+ if ((expected != buf[i])
+ && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
+ HDprintf(
+ "proc %d: found data error at [%ld], expect %u, got %u\n",
+ mpi_rank, (long) (mpi_off + i), expected, buf[i]);
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("proc %d: [more errors ...]\n", mpi_rank);
+
+ nerrs += vrfyerrs;
+ }
+
+ /* close file and free the communicator */
+ mrc = MPI_File_close(&fh);
+ VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE");
+ mrc = MPI_Comm_free(&comm);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
}
/*
- * one more sync to ensure all processes have done reading
- * before ending this test.
- */
+ * one more sync to ensure all processes have done reading
+ * before ending this test.
+ */
mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync before leaving test");
+ VRFY((mrc == MPI_SUCCESS), "Sync before leaving test");
return (nerrs);
}
-
#define MB 1048576 /* 1024*1024 == 2**20 */
#define GB 1073741824 /* 1024**3 == 2**30 */
#define TWO_GB_LESS1 2147483647 /* 2**31 - 1 */
@@ -186,227 +180,236 @@ test_mpio_overlap_writes(char *filename)
* Then reads the file back in by reverse order, that is process 0
* reads the data of process n-1 and vice versa.
*/
-static int
-test_mpio_gb_file(char *filename)
-{
+static int test_mpio_gb_file(char *filename) {
int mpi_size, mpi_rank;
MPI_Info info = MPI_INFO_NULL;
int mrc;
- MPI_File fh;
+ MPI_File fh;
int i, j, n;
int vrfyerrs;
- int writerrs; /* write errors */
+ int writerrs; /* write errors */
int nerrs;
- int ntimes; /* how many times */
- char *buf = NULL;
- char expected;
- MPI_Offset size;
- MPI_Offset mpi_off;
- MPI_Offset mpi_off_old;
- MPI_Status mpi_stat;
+ int ntimes; /* how many times */
+ char *buf = NULL;
+ char expected;
+ MPI_Offset size;
+ MPI_Offset mpi_off;
+ MPI_Offset mpi_off_old;
+ MPI_Status mpi_stat;
int is_signed, sizeof_mpi_offset;
nerrs = 0;
/* set up MPI parameters */
- MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
- MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
if (VERBOSE_MED)
- printf("MPI_Offset range test\n");
+ HDprintf("MPI_Offset range test\n");
/* figure out the signness and sizeof MPI_Offset */
mpi_off = 0;
is_signed = ((MPI_Offset)(mpi_off - 1)) < 0;
- sizeof_mpi_offset = (int)(sizeof(MPI_Offset));
+ sizeof_mpi_offset = (int) (sizeof(MPI_Offset));
/*
- * Verify the sizeof MPI_Offset and correctness of handling multiple GB
- * sizes.
- */
- if (MAINPROCESS){ /* only process 0 needs to check it*/
- printf("MPI_Offset is %s %d bytes integeral type\n",
- is_signed ? "signed" : "unsigned", (int)sizeof(MPI_Offset));
- if (sizeof_mpi_offset <= 4 && is_signed){
- printf("Skipped 2GB range test "
- "because MPI_Offset cannot support it\n");
- }else {
- /* verify correctness of assigning 2GB sizes */
- mpi_off = 2 * 1024 * (MPI_Offset)MB;
- INFO((mpi_off>0), "2GB OFFSET assignment no overflow");
- INFO((mpi_off-1)==TWO_GB_LESS1, "2GB OFFSET assignment succeed");
-
- /* verify correctness of increasing from below 2 GB to above 2GB */
- mpi_off = TWO_GB_LESS1;
- for (i=0; i < 3; i++){
- mpi_off_old = mpi_off;
- mpi_off = mpi_off + 1;
- /* no overflow */
- INFO((mpi_off>0), "2GB OFFSET increment no overflow");
- /* correct inc. */
- INFO((mpi_off-1)==mpi_off_old, "2GB OFFSET increment succeed");
- }
- }
-
- if (sizeof_mpi_offset <= 4){
- printf("Skipped 4GB range test "
- "because MPI_Offset cannot support it\n");
- }else {
- /* verify correctness of assigning 4GB sizes */
- mpi_off = 4 * 1024 * (MPI_Offset)MB;
- INFO((mpi_off>0), "4GB OFFSET assignment no overflow");
- INFO((mpi_off-1)==FOUR_GB_LESS1, "4GB OFFSET assignment succeed");
-
- /* verify correctness of increasing from below 4 GB to above 4 GB */
- mpi_off = FOUR_GB_LESS1;
- for (i=0; i < 3; i++){
- mpi_off_old = mpi_off;
- mpi_off = mpi_off + 1;
- /* no overflow */
- INFO((mpi_off>0), "4GB OFFSET increment no overflow");
- /* correct inc. */
- INFO((mpi_off-1)==mpi_off_old, "4GB OFFSET increment succeed");
- }
- }
+ * Verify the sizeof MPI_Offset and correctness of handling multiple GB
+ * sizes.
+ */
+ if (MAINPROCESS) { /* only process 0 needs to check it*/
+ HDprintf("MPI_Offset is %s %d bytes integeral type\n",
+ is_signed ? "signed" : "unsigned", (int) sizeof(MPI_Offset));
+ if (sizeof_mpi_offset <= 4 && is_signed) {
+ HDprintf("Skipped 2GB range test "
+ "because MPI_Offset cannot support it\n");
+ } else {
+ /* verify correctness of assigning 2GB sizes */
+ mpi_off = 2 * 1024 * (MPI_Offset) MB;
+ INFO((mpi_off > 0), "2GB OFFSET assignment no overflow");
+ INFO((mpi_off-1)==TWO_GB_LESS1, "2GB OFFSET assignment succeed");
+
+ /* verify correctness of increasing from below 2 GB to above 2GB */
+ mpi_off = TWO_GB_LESS1;
+ for (i = 0; i < 3; i++) {
+ mpi_off_old = mpi_off;
+ mpi_off = mpi_off + 1;
+ /* no overflow */
+ INFO((mpi_off > 0), "2GB OFFSET increment no overflow");
+ /* correct inc. */
+ INFO((mpi_off - 1) == mpi_off_old,
+ "2GB OFFSET increment succeed");
+ }
+ }
+
+ if (sizeof_mpi_offset <= 4) {
+ HDprintf("Skipped 4GB range test "
+ "because MPI_Offset cannot support it\n");
+ } else {
+ /* verify correctness of assigning 4GB sizes */
+ mpi_off = 4 * 1024 * (MPI_Offset) MB;
+ INFO((mpi_off > 0), "4GB OFFSET assignment no overflow");
+ INFO((mpi_off-1)==FOUR_GB_LESS1, "4GB OFFSET assignment succeed");
+
+ /* verify correctness of increasing from below 4 GB to above 4 GB */
+ mpi_off = FOUR_GB_LESS1;
+ for (i = 0; i < 3; i++) {
+ mpi_off_old = mpi_off;
+ mpi_off = mpi_off + 1;
+ /* no overflow */
+ INFO((mpi_off > 0), "4GB OFFSET increment no overflow");
+ /* correct inc. */
+ INFO((mpi_off - 1) == mpi_off_old,
+ "4GB OFFSET increment succeed");
+ }
+ }
}
/*
- * Verify if we can write to a file of multiple GB sizes.
- */
+ * Verify if we can write to a file of multiple GB sizes.
+ */
if (VERBOSE_MED)
- printf("MPIO GB file test %s\n", filename);
-
- if (sizeof_mpi_offset <= 4){
- printf("Skipped GB file range test "
- "because MPI_Offset cannot support it\n");
- }else{
- buf = (char *)HDmalloc(MB);
- VRFY((buf!=NULL), "malloc succeed");
-
- /* open a new file. Remove it first in case it exists. */
- /* Must delete because MPI_File_open does not have a Truncate mode. */
- /* Don't care if it has error. */
- MPI_File_delete(filename, MPI_INFO_NULL);
- MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */
-
- mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE|MPI_MODE_RDWR,
- info, &fh);
- VRFY((mrc==MPI_SUCCESS), "MPI_FILE_OPEN");
-
- printf("MPIO GB file write test %s\n", filename);
-
- /* instead of writing every bytes of the file, we will just write
- * some data around the 2 and 4 GB boundaries. That should cover
- * potential integer overflow and filesystem size limits.
- */
- writerrs = 0;
- for (n=2; n <= 4; n+=2){
- ntimes = GB/MB*n/mpi_size + 1;
- for (i=ntimes-2; i <= ntimes; i++){
- mpi_off = (i*mpi_size + mpi_rank)*(MPI_Offset)MB;
- if (VERBOSE_MED)
- HDfprintf(stdout,"proc %d: write to mpi_off=%016llx, %lld\n",
- mpi_rank, mpi_off, mpi_off);
- /* set data to some trivial pattern for easy verification */
- for (j=0; j<MB; j++)
- *(buf+j) = i*mpi_size + mpi_rank;
- if (VERBOSE_MED)
- HDfprintf(stdout,"proc %d: writing %d bytes at offset %lld\n",
- mpi_rank, MB, mpi_off);
- mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
- INFO((mrc==MPI_SUCCESS), "GB size file write");
- if (mrc!=MPI_SUCCESS)
- writerrs++;
- }
- }
-
- /* close file and free the communicator */
- mrc = MPI_File_close(&fh);
- VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
-
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync after writes");
-
- /*
- * Verify if we can read the multiple GB file just created.
- */
- /* open it again to verify the data written */
- /* but only if there was no write errors */
- printf("MPIO GB file read test %s\n", filename);
- if (errors_sum(writerrs)>0){
- printf("proc %d: Skip read test due to previous write errors\n",
- mpi_rank);
- goto finish;
- }
- mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh);
- VRFY((mrc==MPI_SUCCESS), "");
-
- /* Only read back parts of the file that have been written. */
- for (n=2; n <= 4; n+=2){
- ntimes = GB/MB*n/mpi_size + 1;
- for (i=ntimes-2; i <= ntimes; i++){
- mpi_off = (i*mpi_size + (mpi_size - mpi_rank - 1))*(MPI_Offset)MB;
- if (VERBOSE_MED)
- HDfprintf(stdout,"proc %d: read from mpi_off=%016llx, %lld\n",
- mpi_rank, mpi_off, mpi_off);
- mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE, &mpi_stat);
- INFO((mrc==MPI_SUCCESS), "GB size file read");
- expected = i*mpi_size + (mpi_size - mpi_rank - 1);
- vrfyerrs=0;
- for (j=0; j<MB; j++){
- if ((*(buf+j) != expected) &&
- (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)){
- printf("proc %d: found data error at [%ld+%d], expect %d, got %d\n",
- mpi_rank, (long)mpi_off, j, expected, *(buf+j));
- }
- }
- if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
- printf("proc %d: [more errors ...]\n", mpi_rank);
-
- nerrs += vrfyerrs;
- }
- }
-
- /* close file and free the communicator */
- mrc = MPI_File_close(&fh);
- VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
-
- /*
- * one more sync to ensure all processes have done reading
- * before ending this test.
- */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync before leaving test");
-
- printf("Test if MPI_File_get_size works correctly with %s\n", filename);
-
- mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info, &fh);
- VRFY((mrc==MPI_SUCCESS), "");
-
- if (MAINPROCESS){ /* only process 0 needs to check it*/
+ HDprintf("MPIO GB file test %s\n", filename);
+
+ if (sizeof_mpi_offset <= 4) {
+ HDprintf("Skipped GB file range test "
+ "because MPI_Offset cannot support it\n");
+ } else {
+ buf = (char *) HDmalloc(MB);
+ VRFY((buf != NULL), "malloc succeed");
+
+ /* open a new file. Remove it first in case it exists. */
+ /* Must delete because MPI_File_open does not have a Truncate mode. */
+ /* Don't care if it has error. */
+ MPI_File_delete(filename, MPI_INFO_NULL);
+ MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */
+
+ mrc = MPI_File_open(MPI_COMM_WORLD, filename,
+ MPI_MODE_CREATE | MPI_MODE_RDWR, info, &fh);
+ VRFY((mrc == MPI_SUCCESS), "MPI_FILE_OPEN");
+
+ HDprintf("MPIO GB file write test %s\n", filename);
+
+ /* instead of writing every bytes of the file, we will just write
+ * some data around the 2 and 4 GB boundaries. That should cover
+ * potential integer overflow and filesystem size limits.
+ */
+ writerrs = 0;
+ for (n = 2; n <= 4; n += 2) {
+ ntimes = GB / MB * n / mpi_size + 1;
+ for (i = ntimes - 2; i <= ntimes; i++) {
+ mpi_off = (i * mpi_size + mpi_rank) * (MPI_Offset) MB;
+ if (VERBOSE_MED)
+ HDfprintf(stdout,
+ "proc %d: write to mpi_off=%016llx, %lld\n",
+ mpi_rank, mpi_off, mpi_off);
+ /* set data to some trivial pattern for easy verification */
+ for (j = 0; j < MB; j++)
+ *(buf + j) = (int8_t)(i * mpi_size + mpi_rank);
+ if (VERBOSE_MED)
+ HDfprintf(stdout,
+ "proc %d: writing %d bytes at offset %lld\n",
+ mpi_rank, MB, mpi_off);
+ mrc = MPI_File_write_at(fh, mpi_off, buf, MB, MPI_BYTE,
+ &mpi_stat);
+ INFO((mrc == MPI_SUCCESS), "GB size file write");
+ if (mrc != MPI_SUCCESS)
+ writerrs++;
+ }
+ }
+
+ /* close file and free the communicator */
+ mrc = MPI_File_close(&fh);
+ VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE");
+
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync after writes");
+
+ /*
+ * Verify if we can read the multiple GB file just created.
+ */
+ /* open it again to verify the data written */
+ /* but only if there was no write errors */
+ HDprintf("MPIO GB file read test %s\n", filename);
+ if (errors_sum(writerrs) > 0) {
+ HDprintf("proc %d: Skip read test due to previous write errors\n",
+ mpi_rank);
+ goto finish;
+ }
+ mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info,
+ &fh);
+ VRFY((mrc == MPI_SUCCESS), "");
+
+ /* Only read back parts of the file that have been written. */
+ for (n = 2; n <= 4; n += 2) {
+ ntimes = GB / MB * n / mpi_size + 1;
+ for (i = ntimes - 2; i <= ntimes; i++) {
+ mpi_off = (i * mpi_size + (mpi_size - mpi_rank - 1))
+ * (MPI_Offset) MB;
+ if (VERBOSE_MED)
+ HDfprintf(stdout,
+ "proc %d: read from mpi_off=%016llx, %lld\n",
+ mpi_rank, mpi_off, mpi_off);
+ mrc = MPI_File_read_at(fh, mpi_off, buf, MB, MPI_BYTE,
+ &mpi_stat);
+ INFO((mrc == MPI_SUCCESS), "GB size file read");
+ expected = (int8_t)(i * mpi_size + (mpi_size - mpi_rank - 1));
+ vrfyerrs = 0;
+ for (j = 0; j < MB; j++) {
+ if ((*(buf + j) != expected)
+ && (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED)) {
+ HDprintf(
+ "proc %d: found data error at [%ld+%d], expect %d, got %d\n",
+ mpi_rank, (long) mpi_off, j, expected,
+ *(buf + j));
+ }
+ }
+ if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
+ HDprintf("proc %d: [more errors ...]\n", mpi_rank);
+
+ nerrs += vrfyerrs;
+ }
+ }
+
+ /* close file and free the communicator */
+ mrc = MPI_File_close(&fh);
+ VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE");
+
+ /*
+ * one more sync to ensure all processes have done reading
+ * before ending this test.
+ */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync before leaving test");
+
+ HDprintf("Test if MPI_File_get_size works correctly with %s\n", filename);
+
+ mrc = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, info,
+ &fh);
+ VRFY((mrc == MPI_SUCCESS), "");
+
+ if (MAINPROCESS) { /* only process 0 needs to check it*/
mrc = MPI_File_get_size(fh, &size);
- VRFY((mrc==MPI_SUCCESS), "");
- VRFY((size == mpi_off+MB), "MPI_File_get_size doesn't return correct file size.");
+ VRFY((mrc == MPI_SUCCESS), "");
+ VRFY((size == mpi_off+MB),
+ "MPI_File_get_size doesn't return correct file size.");
}
- /* close file and free the communicator */
- mrc = MPI_File_close(&fh);
- VRFY((mrc==MPI_SUCCESS), "MPI_FILE_CLOSE");
+ /* close file and free the communicator */
+ mrc = MPI_File_close(&fh);
+ VRFY((mrc == MPI_SUCCESS), "MPI_FILE_CLOSE");
- /*
- * one more sync to ensure all processes have done reading
- * before ending this test.
- */
- mrc = MPI_Barrier(MPI_COMM_WORLD);
- VRFY((mrc==MPI_SUCCESS), "Sync before leaving test");
+ /*
+ * one more sync to ensure all processes have done reading
+ * before ending this test.
+ */
+ mrc = MPI_Barrier(MPI_COMM_WORLD);
+ VRFY((mrc == MPI_SUCCESS), "Sync before leaving test");
}
-finish:
- if (buf)
- HDfree(buf);
+ finish: if (buf)
+ HDfree(buf);
return (nerrs);
}
-
/*
* MPI-IO Test: One writes, Many reads.
* Verify if only one process writes some data and then all other
@@ -424,191 +427,190 @@ finish:
* Each process writes something, then reads all data back.
*/
-#define DIMSIZE 32 /* Dimension size. */
-#define PRINTID printf("Proc %d: ", mpi_rank)
+#define DIMSIZE 32 /* Dimension size. */
+#define PRINTID HDprintf("Proc %d: ", mpi_rank)
#define USENONE 0
-#define USEATOM 1 /* request atomic I/O */
-#define USEFSYNC 2 /* request file_sync */
-
+#define USEATOM 1 /* request atomic I/O */
+#define USEFSYNC 2 /* request file_sync */
-static int
-test_mpio_1wMr(char *filename, int special_request)
-{
+static int test_mpio_1wMr(char *filename, int special_request) {
char hostname[128];
- int mpi_size, mpi_rank;
+ int mpi_size, mpi_rank;
MPI_File fh;
char mpi_err_str[MPI_MAX_ERROR_STRING];
- int mpi_err_strlen;
- int mpi_err;
+ int mpi_err_strlen;
+ int mpi_err;
unsigned char writedata[DIMSIZE], readdata[DIMSIZE];
unsigned char expect_val;
- int i, irank;
- int nerrs = 0; /* number of errors */
- int atomicity;
- MPI_Offset mpi_off;
- MPI_Status mpi_stat;
+ int i, irank;
+ int nerrs = 0; /* number of errors */
+ int atomicity;
+ MPI_Offset mpi_off;
+ MPI_Status mpi_stat;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
- if (MAINPROCESS && VERBOSE_MED){
- printf("Testing one process writes, all processes read.\n");
- printf("Using %d processes accessing file %s\n", mpi_size, filename);
- printf(" (Filename can be specified via program argument)\n");
+ if (MAINPROCESS && VERBOSE_MED) {
+ HDprintf("Testing one process writes, all processes read.\n");
+ HDprintf("Using %d processes accessing file %s\n", mpi_size, filename);
+ HDprintf(" (Filename can be specified via program argument)\n");
}
/* show the hostname so that we can tell where the processes are running */
- if (VERBOSE_DEF){
- if (gethostname(hostname, 128) < 0){
- PRINTID;
- printf("gethostname failed\n");
- return 1;
- }
- PRINTID;
- printf("hostname=%s\n", hostname);
+ if (VERBOSE_DEF) {
+#ifdef H5_HAVE_GETHOSTNAME
+ if(HDgethostname(hostname, sizeof(hostname)) < 0) {
+ HDprintf("gethostname failed\n");
+ hostname[0] = '\0';
+ }
+#else
+ HDprintf("gethostname unavailable\n");
+ hostname[0] = '\0';
+#endif
+ PRINTID;
+ HDprintf("hostname=%s\n", hostname);
}
/* Delete any old file in order to start anew. */
/* Must delete because MPI_File_open does not have a Truncate mode. */
/* Don't care if it has error. */
MPI_File_delete(filename, MPI_INFO_NULL);
- MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */
+ MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */
if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
- MPI_MODE_RDWR | MPI_MODE_CREATE ,
- MPI_INFO_NULL, &fh))
- != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- PRINTID;
- printf("MPI_File_open failed (%s)\n", mpi_err_str);
- return 1;
+ MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
+ != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
+ return 1;
}
-if (special_request & USEATOM){
- /* ==================================================
- * Set atomcity to true (1). A POSIX compliant filesystem
- * should not need this.
- * ==================================================*/
- if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- PRINTID;
- printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
- }
- if (VERBOSE_HI)
- printf("Initial atomicity = %d\n", atomicity);
- if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- PRINTID;
- printf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str);
- }
- if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- PRINTID;
- printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
+ if (special_request & USEATOM) {
+ /* ==================================================
+ * Set atomcity to true (1). A POSIX compliant filesystem
+ * should not need this.
+ * ==================================================*/
+ if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ HDprintf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
+ }
+ if (VERBOSE_HI)
+ HDprintf("Initial atomicity = %d\n", atomicity);
+ if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ HDprintf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str);
+ }
+ if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ HDprintf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str);
+ }
+ if (VERBOSE_HI)
+ HDprintf("After set_atomicity atomicity = %d\n", atomicity);
}
- if (VERBOSE_HI)
- printf("After set_atomicity atomicity = %d\n", atomicity);
-}
/* This barrier is not necessary but do it anyway. */
MPI_Barrier(MPI_COMM_WORLD);
- if (VERBOSE_HI){
- PRINTID;
- printf("between MPI_Barrier and MPI_File_write_at\n");
+ if (VERBOSE_HI) {
+ PRINTID;
+ HDprintf("between MPI_Barrier and MPI_File_write_at\n");
}
/* ==================================================
- * Each process calculates what to write but
- * only process irank(0) writes.
- * ==================================================*/
- irank=0;
- for (i=0; i < DIMSIZE; i++)
- writedata[i] = irank*DIMSIZE + i;
- mpi_off = irank*DIMSIZE;
+ * Each process calculates what to write but
+ * only process irank(0) writes.
+ * ==================================================*/
+ irank = 0;
+ for (i = 0; i < DIMSIZE; i++)
+ writedata[i] = (uint8_t)(irank * DIMSIZE + i);
+ mpi_off = irank * DIMSIZE;
/* Only one process writes */
- if (mpi_rank==irank){
- if (VERBOSE_HI){
- PRINTID; printf("wrote %d bytes at %ld\n", DIMSIZE, (long)mpi_off);
- }
- if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE,
- MPI_BYTE, &mpi_stat))
- != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- PRINTID;
- printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
- (long) mpi_off, DIMSIZE, mpi_err_str);
- return 1;
- };
+ if (mpi_rank == irank) {
+ if (VERBOSE_HI) {
+ PRINTID;
+ HDprintf("wrote %d bytes at %ld\n", DIMSIZE, (long) mpi_off);
+ }
+ if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE,
+ MPI_BYTE, &mpi_stat)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
+ (long) mpi_off, DIMSIZE, mpi_err_str);
+ return 1;
+ };
};
/* Bcast the return code and */
/* make sure all writing are done before reading. */
MPI_Bcast(&mpi_err, 1, MPI_INT, irank, MPI_COMM_WORLD);
- if (VERBOSE_HI){
- PRINTID;
- printf("MPI_Bcast: mpi_err = %d\n", mpi_err);
+ if (VERBOSE_HI) {
+ PRINTID;
+ HDprintf("MPI_Bcast: mpi_err = %d\n", mpi_err);
}
-if (special_request & USEFSYNC){
- /* ==================================================
- * Do a file sync. A POSIX compliant filesystem
- * should not need this.
- * ==================================================*/
- if (VERBOSE_HI)
- printf("Apply MPI_File_sync\n");
- /* call file_sync to force the write out */
- if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- PRINTID;
- printf("MPI_File_sync failed (%s)\n", mpi_err_str);
- }
- MPI_Barrier(MPI_COMM_WORLD);
- /* call file_sync to force the write out */
- if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- PRINTID;
- printf("MPI_File_sync failed (%s)\n", mpi_err_str);
+ if (special_request & USEFSYNC) {
+ /* ==================================================
+ * Do a file sync. A POSIX compliant filesystem
+ * should not need this.
+ * ==================================================*/
+ if (VERBOSE_HI)
+ HDprintf("Apply MPI_File_sync\n");
+ /* call file_sync to force the write out */
+ if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ HDprintf("MPI_File_sync failed (%s)\n", mpi_err_str);
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+ /* call file_sync to force the write out */
+ if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ HDprintf("MPI_File_sync failed (%s)\n", mpi_err_str);
+ }
}
-}
/* This barrier is not necessary because the Bcase or File_sync above */
/* should take care of it. Do it anyway. */
MPI_Barrier(MPI_COMM_WORLD);
- if (VERBOSE_HI){
- PRINTID;
- printf("after MPI_Barrier\n");
+ if (VERBOSE_HI) {
+ PRINTID;
+ HDprintf("after MPI_Barrier\n");
}
/* ==================================================
- * Each process reads what process 0 wrote and verify.
- * ==================================================*/
- irank=0;
- mpi_off = irank*DIMSIZE;
+ * Each process reads what process 0 wrote and verify.
+ * ==================================================*/
+ irank = 0;
+ mpi_off = irank * DIMSIZE;
if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE,
- &mpi_stat))
- != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- PRINTID;
- printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
- (long) mpi_off, DIMSIZE, mpi_err_str);
- return 1;
+ &mpi_stat)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ PRINTID;
+ HDprintf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n",
+ (long) mpi_off, DIMSIZE, mpi_err_str);
+ return 1;
};
- for (i=0; i < DIMSIZE; i++){
- expect_val = irank*DIMSIZE + i;
- if (readdata[i] != expect_val){
- PRINTID;
- printf("read data[%d:%d] got %02x, expect %02x\n", irank, i,
- readdata[i], expect_val);
- nerrs++;
- }
+ for (i = 0; i < DIMSIZE; i++) {
+ expect_val = (uint8_t)(irank * DIMSIZE + i);
+ if (readdata[i] != expect_val) {
+ PRINTID;
+ HDprintf("read data[%d:%d] got %02x, expect %02x\n", irank, i,
+ readdata[i], expect_val);
+ nerrs++;
+ }
}
MPI_File_close(&fh);
- if (VERBOSE_HI){
- PRINTID;
- printf("%d data errors detected\n", nerrs);
+ if (VERBOSE_HI) {
+ PRINTID;
+ HDprintf("%d data errors detected\n", nerrs);
}
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
@@ -617,272 +619,268 @@ if (special_request & USEFSYNC){
/*
-Function: test_mpio_derived_dtype
-
-Test Whether the Displacement of MPI derived datatype
-(+ File_set_view + MPI_write)works or not on this MPI-IO package
-and this platform.
-
-1. Details for the test:
-1) Create two derived datatypes with MPI_Type_create_hindexed:
- datatype1:
- count = 1, blocklens = 1, offsets = 0,
- base type = MPI_BYTE(essentially a char)
- datatype2:
- count = 1, blocklens = 1, offsets = 1(byte),
- base type = MPI_BYTE
-
-2) Using these two derived datatypes,
- Build another derived datatype with MPI_Type_create_struct:
- advtype: derived from datatype1 and datatype2
- advtype:
- count = 2, blocklens[0] = 1, blocklens[1]=1,
- offsets[0] = 0, offsets[1] = 1(byte),
- bas_type[0]=datatype1,
- bas_type[1] = datatype2;
-
-3) Setting MPI file view with advtype
-4) Writing 2 bytes 1 to 2 using MPI_File_write to a file
-5) File content:
-Suppose the fill value of the file is 0(most machines indeed do so)
-and Fill value is embraced with "() in the following output:
-Expected output should be:
-1,0,2
-
-
-
-However, at some platforms, for example, IBM AIX(at March 23rd, 2005):
-the following values were obtained:
-1,2,0
-
-The problem is that the displacement of the second derived datatype(datatype2) which formed the final derived datatype(advtype)
+ Function: test_mpio_derived_dtype
+
+ Test Whether the Displacement of MPI derived datatype
+ (+ File_set_view + MPI_write)works or not on this MPI-IO package
+ and this platform.
+
+ 1. Details for the test:
+ 1) Create two derived datatypes with MPI_Type_create_hindexed:
+ datatype1:
+ count = 1, blocklens = 1, offsets = 0,
+ base type = MPI_BYTE(essentially a char)
+ datatype2:
+ count = 1, blocklens = 1, offsets = 1(byte),
+ base type = MPI_BYTE
+
+ 2) Using these two derived datatypes,
+ Build another derived datatype with MPI_Type_create_struct:
+ advtype: derived from datatype1 and datatype2
+ advtype:
+ count = 2, blocklens[0] = 1, blocklens[1]=1,
+ offsets[0] = 0, offsets[1] = 1(byte),
+ bas_type[0]=datatype1,
+ bas_type[1] = datatype2;
+
+ 3) Setting MPI file view with advtype
+ 4) Writing 2 bytes 1 to 2 using MPI_File_write to a file
+ 5) File content:
+ Suppose the fill value of the file is 0(most machines indeed do so)
+ and Fill value is embraced with "() in the following output:
+ Expected output should be:
+ 1,0,2
+
+
+
+ However, at some platforms, for example, IBM AIX(at March 23rd, 2005):
+ the following values were obtained:
+ 1,2,0
+
+ The problem is that the displacement of the second derived datatype(datatype2) which formed the final derived datatype(advtype)
has been put after the basic datatype(MPI_BYTE) of datatype2. This is a bug.
-2. This test will verify whether the complicated derived datatype is working on
-the current platform.
+ 2. This test will verify whether the complicated derived datatype is working on
+ the current platform.
-If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change
-the configuration specific file of HDF5 so that we can change our configurationsetting to support collective IO for irregular selections.
+ If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to tell the developer to change
+ the configuration specific file of HDF5 so that we can change our configurationsetting to support collective IO for irregular selections.
-If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that
-we can turn off collective IO support for irregular selections.
-*/
+ If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that
+ we can turn off collective IO support for irregular selections.
+ */
static int test_mpio_derived_dtype(char *filename) {
MPI_File fh;
char mpi_err_str[MPI_MAX_ERROR_STRING];
- int mpi_err_strlen;
- int mpi_err;
- int i;
- MPI_Datatype etype,filetype;
- MPI_Datatype adv_filetype,bas_filetype[2];
- MPI_Datatype filetypenew;
- MPI_Offset disp;
- MPI_Status Status;
- MPI_Aint adv_disp[2];
- MPI_Aint offsets[1];
- int blocklens[1],adv_blocklens[2];
- int count,outcount;
- int retcode;
-
- int mpi_rank,mpi_size;
-
- char buf[3],outbuf[3] = {0};
+ int mpi_err_strlen;
+ int mpi_err;
+ int i;
+ MPI_Datatype etype, filetype;
+ MPI_Datatype adv_filetype, bas_filetype[2];
+ MPI_Datatype filetypenew;
+ MPI_Offset disp;
+ MPI_Status Status;
+ MPI_Aint adv_disp[2];
+ MPI_Aint offsets[1];
+ int blocklens[1], adv_blocklens[2];
+ int count, outcount;
+ int retcode;
+
+ int mpi_rank, mpi_size;
+
+ char buf[3], outbuf[3] = { 0 };
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
retcode = 0;
- for(i=0;i<3;i++)
- buf[i] = i+1;
-
+ for (i = 0; i < 3; i++)
+ buf[i] = (char)(i + 1);
if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
- MPI_MODE_RDWR | MPI_MODE_CREATE,
- MPI_INFO_NULL, &fh))
- != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_open failed (%s)\n", mpi_err_str);
- return 1;
+ MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
+ != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
+ return 1;
}
- disp = 0;
+ disp = 0;
etype = MPI_BYTE;
count = 1;
blocklens[0] = 1;
- offsets[0] = 0;
+ offsets[0] = 0;
- if((mpi_err= MPI_Type_create_hindexed(count,blocklens,offsets,MPI_BYTE,&filetype))
- != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
- return 1;
+ if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE,
+ &filetype)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
+ return 1;
}
- if((mpi_err=MPI_Type_commit(&filetype))!=MPI_SUCCESS){
+ if ((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
- return 1;
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ return 1;
}
count = 1;
- blocklens[0]=1;
+ blocklens[0] = 1;
offsets[0] = 1;
- if((mpi_err= MPI_Type_create_hindexed(count,blocklens,offsets,MPI_BYTE,&filetypenew))
- != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
- return 1;
+ if ((mpi_err = MPI_Type_create_hindexed(count, blocklens, offsets, MPI_BYTE,
+ &filetypenew)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
+ return 1;
}
- if((mpi_err=MPI_Type_commit(&filetypenew))!=MPI_SUCCESS){
+ if ((mpi_err = MPI_Type_commit(&filetypenew)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
- return 1;
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ return 1;
}
- outcount = 2;
+ outcount = 2;
adv_blocklens[0] = 1;
adv_blocklens[1] = 1;
- adv_disp[0] = 0;
- adv_disp[1] = 1;
- bas_filetype[0] = filetype;
- bas_filetype[1] = filetypenew;
-
- if((mpi_err= MPI_Type_create_struct(outcount,adv_blocklens,adv_disp,bas_filetype,&adv_filetype))
- != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_create_struct failed (%s)\n", mpi_err_str);
- return 1;
+ adv_disp[0] = 0;
+ adv_disp[1] = 1;
+ bas_filetype[0] = filetype;
+ bas_filetype[1] = filetypenew;
+
+ if ((mpi_err = MPI_Type_create_struct(outcount, adv_blocklens, adv_disp,
+ bas_filetype, &adv_filetype)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_Type_create_struct failed (%s)\n", mpi_err_str);
+ return 1;
}
- if((mpi_err=MPI_Type_commit(&adv_filetype))!=MPI_SUCCESS){
+ if ((mpi_err = MPI_Type_commit(&adv_filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
- return 1;
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ return 1;
}
-
- if((mpi_err = MPI_File_set_view(fh,disp,etype,adv_filetype,"native",MPI_INFO_NULL))!= MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
- return 1;
+ if ((mpi_err = MPI_File_set_view(fh, disp, etype, adv_filetype, "native",
+ MPI_INFO_NULL)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
+ return 1;
}
- if((mpi_err = MPI_File_write(fh,buf,3,MPI_BYTE,&Status))!= MPI_SUCCESS){
+ if ((mpi_err = MPI_File_write(fh, buf, 3, MPI_BYTE, &Status))
+ != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_write failed (%s)\n", mpi_err_str);
- return 1;
- ;
+ HDprintf("MPI_File_write failed (%s)\n", mpi_err_str);
+ return 1;
}
-
- if((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_close failed (%s)\n", mpi_err_str);
- return 1;
+ if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_File_close failed (%s)\n", mpi_err_str);
+ return 1;
}
-
- if((mpi_err = MPI_File_open(MPI_COMM_WORLD,filename,MPI_MODE_RDONLY,MPI_INFO_NULL,&fh)) != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_open failed (%s)\n", mpi_err_str);
- return 1;
+ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY,
+ MPI_INFO_NULL, &fh)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
+ return 1;
}
- if((mpi_err = MPI_File_set_view(fh,0,MPI_BYTE,MPI_BYTE,"native",MPI_INFO_NULL))!= MPI_SUCCESS){
+ if ((mpi_err = MPI_File_set_view(fh, 0, MPI_BYTE, MPI_BYTE, "native",
+ MPI_INFO_NULL)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
- return 1;
+ HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
+ return 1;
}
- if((mpi_err = MPI_File_read(fh,outbuf,3,MPI_BYTE,&Status))!=MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_read failed (%s)\n", mpi_err_str);
- return 1;
+ if ((mpi_err = MPI_File_read(fh, outbuf, 3, MPI_BYTE, &Status))
+ != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_File_read failed (%s)\n", mpi_err_str);
+ return 1;
}
- if(outbuf[2]==2) {
- retcode = 0;
- }
- else {
-/* if(mpi_rank == 0) {
- printf("complicated derived datatype is NOT working at this platform\n");
- printf("go back to hdf5/config and find the corresponding\n");
- printf("configure-specific file and change ?????\n");
- }
-*/
- retcode = -1;
- }
-
- if((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS){
- MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_close failed (%s)\n", mpi_err_str);
- return 1;
+ if (outbuf[2] == 2) {
+ retcode = 0;
+ } else {
+ /* if(mpi_rank == 0) {
+ HDprintf("complicated derived datatype is NOT working at this platform\n");
+ HDprintf("go back to hdf5/config and find the corresponding\n");
+ HDprintf("configure-specific file and change ?????\n");
+ }
+ */
+ retcode = -1;
}
+ if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) {
+ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
+ HDprintf("MPI_File_close failed (%s)\n", mpi_err_str);
+ return 1;
+ }
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
- if(retcode == -1) {
- if(mpi_rank == 0) {
- printf("Complicated derived datatype is NOT working at this platform\n");
- printf(" Please report to help@hdfgroup.org about this problem.\n");
- }
- retcode = 1;
+ if (retcode == -1) {
+ if (mpi_rank == 0) {
+ HDprintf(
+ "Complicated derived datatype is NOT working at this platform\n");
+ HDprintf(" Please report to help@hdfgroup.org about this problem.\n");
+ }
+ retcode = 1;
}
return retcode;
}
/*
-Function: test_mpio_special_collective
+ Function: test_mpio_special_collective
-Test Whether collective IO is still working when more than one process
-has no contribution to IO. To properly test this case, at least FOUR
-processes are needed.
+ Test Whether collective IO is still working when more than one process
+ has no contribution to IO. To properly test this case, at least FOUR
+ processes are needed.
-1. Details for the test:
-1) Create one derived datatype with MPI_Type_create_hindexed:
+ 1. Details for the test:
+ 1) Create one derived datatype with MPI_Type_create_hindexed:
-2) Choosing at least two processes to contribute none for IO with
- the buf size inside MPI_Write_at_all to 0.
-3) Choosing at least two processes to have real contributions for IO.
-4) Do collective IO.
+ 2) Choosing at least two processes to contribute none for IO with
+ the buf size inside MPI_Write_at_all to 0.
+ 3) Choosing at least two processes to have real contributions for IO.
+ 4) Do collective IO.
-2. This test will fail with the MPI-IO package that doesn't support this. For example,
-mpich 1.2.6.
+ 2. This test will fail with the MPI-IO package that doesn't support this. For example,
+ mpich 1.2.6.
-If this bug has been fixed in the previous not-working package, this test will issue a printf message to tell the developer to change
-the configuration specific file of HDF5 so that we can change our configurationsetting to support special collective IO; currently only special collective IO.
+ If this bug has been fixed in the previous not-working package, this test will issue a HDprintf message to tell the developer to change
+ the configuration specific file of HDF5 so that we can change our configurationsetting to support special collective IO; currently only special collective IO.
-If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that
-we can turn off the support for special collective IO; currently only special collective IO.
-*/
+ If it turns out that the previous working MPI-IO package no longer works, this test will also issue a message to inform the corresponding failure so that
+ we can turn off the support for special collective IO; currently only special collective IO.
+ */
-static int
-test_mpio_special_collective(char *filename)
-{
- int mpi_size, mpi_rank;
+static int test_mpio_special_collective(char *filename) {
+ int mpi_size, mpi_rank;
MPI_File fh;
- MPI_Datatype etype,buftype,filetype;
+ MPI_Datatype etype, buftype, filetype;
char mpi_err_str[MPI_MAX_ERROR_STRING];
- int mpi_err_strlen;
- int mpi_err;
- char writedata[2*DIMSIZE];
+ int mpi_err_strlen;
+ int mpi_err;
+ char writedata[2 * DIMSIZE];
char filerep[7] = "native";
- int i;
- int count,bufcount;
+ int i;
+ int count, bufcount;
int blocklens[2];
MPI_Aint offsets[2];
- MPI_Offset mpi_off = 0;
- MPI_Status mpi_stat;
- int retcode = 0;
+ MPI_Offset mpi_off = 0;
+ MPI_Status mpi_stat;
+ int retcode = 0;
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* create MPI data type */
etype = MPI_BYTE;
- if(mpi_rank == 0 || mpi_rank == 1) {
+ if (mpi_rank == 0 || mpi_rank == 1) {
count = DIMSIZE;
bufcount = 1;
} /* end if */
@@ -892,101 +890,85 @@ test_mpio_special_collective(char *filename)
} /* end else */
blocklens[0] = count;
- offsets[0] = mpi_rank*count;
+ offsets[0] = mpi_rank * count;
blocklens[1] = count;
- offsets[1] = (mpi_size+mpi_rank)*count;
-
- if(count !=0) {
- if((mpi_err = MPI_Type_create_hindexed(2,
- blocklens,
- offsets,
- etype,
- &filetype)) != MPI_SUCCESS) {
+ offsets[1] = (mpi_size + mpi_rank) * count;
+
+ if (count != 0) {
+ if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype,
+ &filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
- if((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_Type_commit(&filetype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
- if((mpi_err = MPI_Type_create_hindexed(2,
- blocklens,
- offsets,
- etype,
- &buftype)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_Type_create_hindexed(2, blocklens, offsets, etype,
+ &buftype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_contiguous failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
- if((mpi_err = MPI_Type_commit(&buftype)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_Type_commit(&buftype)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_Type_commit failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_Type_commit failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
} /* end if */
else {
filetype = MPI_BYTE;
- buftype = MPI_BYTE;
+ buftype = MPI_BYTE;
} /* end else */
/* Open a file */
- if ((mpi_err = MPI_File_open(MPI_COMM_WORLD,
- filename,
- MPI_MODE_RDWR | MPI_MODE_CREATE,
- MPI_INFO_NULL,
- &fh)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename,
+ MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh))
+ != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_open failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_open failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
/* each process writes some data */
- for (i=0; i < 2*DIMSIZE; i++)
- writedata[i] = (char)(mpi_rank*DIMSIZE + i);
+ for (i = 0; i < 2 * DIMSIZE; i++)
+ writedata[i] = (char) (mpi_rank * DIMSIZE + i);
/* Set the file view */
- if((mpi_err = MPI_File_set_view(fh,
- mpi_off,
- MPI_BYTE,
- filetype,
- filerep,
- MPI_INFO_NULL)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_set_view(fh, mpi_off, MPI_BYTE, filetype, filerep,
+ MPI_INFO_NULL)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_set_view failed (%s)\n", mpi_err_str);
+ HDprintf("MPI_File_set_view failed (%s)\n", mpi_err_str);
return 1;
} /* end if */
/* Collectively write into the file */
- if ((mpi_err = MPI_File_write_at_all(fh,
- mpi_off,
- writedata,
- bufcount,
- buftype,
- &mpi_stat)) != MPI_SUCCESS) {
+ if ((mpi_err = MPI_File_write_at_all(fh, mpi_off, writedata, bufcount,
+ buftype, &mpi_stat)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
- (long) mpi_off, bufcount, mpi_err_str);
+ HDprintf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n",
+ (long) mpi_off, bufcount, mpi_err_str);
return 1;
} /* end if */
/* Close the file */
if ((mpi_err = MPI_File_close(&fh)) != MPI_SUCCESS) {
MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen);
- printf("MPI_File_close failed. \n");
+ HDprintf("MPI_File_close failed. \n");
return 1;
} /* end if */
/* Perform a barrier */
mpi_err = MPI_Barrier(MPI_COMM_WORLD);
- if(retcode != 0) {
- if(mpi_rank == 0) {
- printf("special collective IO is NOT working at this platform\n");
- printf(" Please report to help@hdfgroup.org about this problem.\n");
+ if (retcode != 0) {
+ if (mpi_rank == 0) {
+ HDprintf("special collective IO is NOT working at this platform\n");
+ HDprintf(" Please report to help@hdfgroup.org about this problem.\n");
} /* end if */
retcode = 1;
} /* end if */
@@ -998,93 +980,86 @@ test_mpio_special_collective(char *filename)
/*
* parse the command line options
*/
-static int
-parse_options(int argc, char **argv)
-{
- while (--argc){
- if (**(++argv) != '-'){
- break;
- }else{
- switch(*(*argv+1)){
- case 'v': if (*((*argv+1)+1))
- ParseTestVerbosity((*argv+1)+1);
- else
- SetTestVerbosity(VERBO_MED);
- break;
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
- break;
- case 'h': /* print help message--return with nerrors set */
- return(1);
- default: nerrors++;
- return(1);
- }
- }
+static int parse_options(int argc, char **argv) {
+ while (--argc) {
+ if (**(++argv) != '-') {
+ break;
+ } else {
+ switch (*(*argv + 1)) {
+ case 'v':
+ if (*((*argv + 1) + 1))
+ ParseTestVerbosity((*argv + 1) + 1);
+ else
+ SetTestVerbosity(VERBO_MED);
+ break;
+ case 'f':
+ if (--argc < 1) {
+ nerrors++;
+ return (1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return (1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return (1);
+ default:
+ nerrors++;
+ return (1);
+ }
+ }
} /*while*/
/* compose the test filenames */
{
- int i, n;
- hid_t plist;
-
- plist = H5Pcreate (H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(plist, MPI_COMM_WORLD, MPI_INFO_NULL);
- n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
-
- for (i=0; i < n; i++)
- if (h5_fixname(FILENAME[i],plist,filenames[i],sizeof(filenames[i]))
- == NULL){
- printf("h5_fixname failed\n");
- nerrors++;
- return(1);
- }
- H5Pclose(plist);
- if (VERBOSE_MED){
- printf("Test filenames are:\n");
- for (i=0; i < n; i++)
- printf(" %s\n", filenames[i]);
- }
+ int i, n;
+ hid_t plist;
+
+ plist = H5Pcreate(H5P_FILE_ACCESS);
+ H5Pset_fapl_mpio(plist, MPI_COMM_WORLD, MPI_INFO_NULL);
+ n = sizeof(FILENAME) / sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i = 0; i < n; i++)
+ if (h5_fixname(FILENAME[i], plist, filenames[i],
+ sizeof(filenames[i])) == NULL) {
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return (1);
+ }
+ H5Pclose(plist);
+ if (VERBOSE_MED) {
+ HDprintf("Test filenames are:\n");
+ for (i = 0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
+ }
}
- return(0);
+ return (0);
}
-
/*
* Show command usage
*/
-static void
-usage(void)
-{
- printf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n");
- printf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n");
- printf("\t-f <prefix>\tfilename prefix\n");
- printf("\n");
+static void usage(void) {
+ HDprintf("Usage: t_mpi [-v<verbosity>] [-f <prefix>]\n");
+ HDprintf("\t-v<verbosity>\tset verbose level (0-9,l,m,h)\n");
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+ HDprintf("\n");
}
/*
* return the sum of all errors.
*/
-static int
-errors_sum(int nerrs)
-{
+static int errors_sum(int nerrs) {
int temp;
MPI_Allreduce(&nerrs, &temp, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD);
- return(temp);
+ return (temp);
}
-
-int
-main(int argc, char **argv)
-{
- int mpi_size, mpi_rank; /* mpi variables */
+int main(int argc, char **argv) {
+ int mpi_size, mpi_rank; /* mpi variables */
int ret_code;
MPI_Init(&argc, &argv);
@@ -1092,113 +1067,111 @@ main(int argc, char **argv)
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
/* Attempt to turn off atexit post processing so that in case errors
- * happen during the test and the process is aborted, it will not get
- * hang in the atexit post processing in which it may try to make MPI
- * calls. By then, MPI calls may not work.
- */
- if (H5dont_atexit() < 0){
- printf("Failed to turn off atexit processing. Continue.\n");
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0) {
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
};
H5open();
- if (parse_options(argc, argv) != 0){
- if (MAINPROCESS)
- usage();
- goto finish;
+ if (parse_options(argc, argv) != 0) {
+ if (MAINPROCESS)
+ usage();
+ goto finish;
}
- if (MAINPROCESS){
- printf("===================================\n");
- printf("MPI functionality tests\n");
- printf("===================================\n");
+ if (MAINPROCESS) {
+ HDprintf("===================================\n");
+ HDprintf("MPI functionality tests\n");
+ HDprintf("===================================\n");
}
if (VERBOSE_MED)
- h5_show_hostname();
+ h5_show_hostname();
- fapl = H5Pcreate (H5P_FILE_ACCESS);
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
H5Pset_fapl_mpio(fapl, MPI_COMM_WORLD, MPI_INFO_NULL);
/* set alarm. */
ALARM_ON;
-
/*=======================================
- * MPIO 1 write Many read test
- *=======================================*/
+ * MPIO 1 write Many read test
+ *=======================================*/
MPI_BANNER("MPIO 1 write Many read test...");
ret_code = test_mpio_1wMr(filenames[0], USENONE);
ret_code = errors_sum(ret_code);
- if (mpi_rank==0 && ret_code > 0){
- printf("***FAILED with %d total errors\n", ret_code);
- nerrors += ret_code;
+ if (mpi_rank == 0 && ret_code > 0) {
+ HDprintf("***FAILED with %d total errors\n", ret_code);
+ nerrors += ret_code;
}
/* test atomicity and file sync in high verbose mode only */
/* since they often hang when broken and PHDF5 does not use them. */
- if (VERBOSE_HI){
- MPI_BANNER("MPIO 1 write Many read test with atomicity...");
- ret_code = test_mpio_1wMr(filenames[0], USEATOM);
- ret_code = errors_sum(ret_code);
- if (mpi_rank==0 && ret_code > 0){
- printf("***FAILED with %d total errors\n", ret_code);
- nerrors += ret_code;
- }
-
- MPI_BANNER("MPIO 1 write Many read test with file sync...");
- ret_code = test_mpio_1wMr(filenames[0], USEFSYNC);
- ret_code = errors_sum(ret_code);
- if (mpi_rank==0 && ret_code > 0){
- printf("***FAILED with %d total errors\n", ret_code);
- nerrors += ret_code;
- }
- }
+ if (VERBOSE_HI) {
+ MPI_BANNER("MPIO 1 write Many read test with atomicity...");
+ ret_code = test_mpio_1wMr(filenames[0], USEATOM);
+ ret_code = errors_sum(ret_code);
+ if (mpi_rank == 0 && ret_code > 0) {
+ HDprintf("***FAILED with %d total errors\n", ret_code);
+ nerrors += ret_code;
+ }
+ MPI_BANNER("MPIO 1 write Many read test with file sync...");
+ ret_code = test_mpio_1wMr(filenames[0], USEFSYNC);
+ ret_code = errors_sum(ret_code);
+ if (mpi_rank == 0 && ret_code > 0) {
+ HDprintf("***FAILED with %d total errors\n", ret_code);
+ nerrors += ret_code;
+ }
+ }
/*=======================================
- * MPIO MPIO File size range test
- *=======================================*/
+ * MPIO MPIO File size range test
+ *=======================================*/
MPI_BANNER("MPIO File size range test...");
#ifndef H5_HAVE_WIN32_API
ret_code = test_mpio_gb_file(filenames[0]);
ret_code = errors_sum(ret_code);
- if (mpi_rank==0 && ret_code > 0){
- printf("***FAILED with %d total errors\n", ret_code);
- nerrors += ret_code;
+ if (mpi_rank == 0 && ret_code > 0) {
+ HDprintf("***FAILED with %d total errors\n", ret_code);
+ nerrors += ret_code;
}
#else
if (mpi_rank==0)
- printf(" will be skipped on Windows (JIRA HDDFV-8064)\n");
+ HDprintf(" will be skipped on Windows (JIRA HDDFV-8064)\n");
#endif
/*=======================================
- * MPIO independent overlapping writes
- *=======================================*/
+ * MPIO independent overlapping writes
+ *=======================================*/
MPI_BANNER("MPIO independent overlapping writes...");
ret_code = test_mpio_overlap_writes(filenames[0]);
ret_code = errors_sum(ret_code);
- if (mpi_rank==0 && ret_code > 0){
- printf("***FAILED with %d total errors\n", ret_code);
- nerrors += ret_code;
+ if (mpi_rank == 0 && ret_code > 0) {
+ HDprintf("***FAILED with %d total errors\n", ret_code);
+ nerrors += ret_code;
}
/*=======================================
- * MPIO complicated derived datatype test
- *=======================================*/
+ * MPIO complicated derived datatype test
+ *=======================================*/
MPI_BANNER("MPIO complicated derived datatype test...");
ret_code = test_mpio_derived_dtype(filenames[0]);
ret_code = errors_sum(ret_code);
- if (mpi_rank==0 && ret_code > 0){
- printf("***FAILED with %d total errors\n", ret_code);
- nerrors += ret_code;
+ if (mpi_rank == 0 && ret_code > 0) {
+ HDprintf("***FAILED with %d total errors\n", ret_code);
+ nerrors += ret_code;
}
/*=======================================
- * MPIO special collective IO test
- *=======================================*/
+ * MPIO special collective IO test
+ *=======================================*/
if (mpi_size < 4) {
MPI_BANNER("MPIO special collective io test SKIPPED.");
if (mpi_rank == 0)
- printf("This test needs at least four processes to run.\n");
+ HDprintf("This test needs at least four processes to run.\n");
ret_code = 0;
goto sc_finish;
} /* end if */
@@ -1206,28 +1179,26 @@ main(int argc, char **argv)
MPI_BANNER("MPIO special collective io test...");
ret_code = test_mpio_special_collective(filenames[0]);
-sc_finish:
- ret_code = errors_sum(ret_code);
- if (mpi_rank==0 && ret_code > 0){
- printf("***FAILED with %d total errors\n", ret_code);
- nerrors += ret_code;
+ sc_finish: ret_code = errors_sum(ret_code);
+ if (mpi_rank == 0 && ret_code > 0) {
+ HDprintf("***FAILED with %d total errors\n", ret_code);
+ nerrors += ret_code;
}
-
-finish:
+ finish:
/* make sure all processes are finished before final report, cleanup
- * and exit.
- */
+ * and exit.
+ */
MPI_Barrier(MPI_COMM_WORLD);
- if (MAINPROCESS){ /* only process 0 reports */
- printf("===================================\n");
- if (nerrors){
- printf("***MPI tests detected %d errors***\n", nerrors);
- }
- else{
- printf("MPI tests finished with no errors\n");
- }
- printf("===================================\n");
+ if (MAINPROCESS) { /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrors) {
+ HDprintf("***MPI tests detected %d errors***\n", nerrors);
+ }
+ else {
+ HDprintf("MPI tests finished with no errors\n");
+ }
+ HDprintf("===================================\n");
}
/* turn off alarm */
@@ -1240,6 +1211,6 @@ finish:
MPI_Finalize();
/* cannot just return (nerrors) because exit code is limited to 1byte */
- return(nerrors!=0);
+ return (nerrors != 0);
}
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index 0782f3d..27b561b 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -15,13 +15,12 @@
* Programmer: Leon Arber <larber@uiuc.edu>
* Sept. 28, 2006.
*
- * Purpose: This is the first half of a two-part test that makes sure
- * that a file can be read after a parallel application crashes as long
- * as the file was flushed first. We simulate a crash by
- * calling _exit(0) since this doesn't flush HDF5 caches but
- * still exits with success.
+ * Purpose: This is the first half of a two-part test that makes sure
+ * that a file can be read after a parallel application crashes
+ * as long as the file was flushed first. We simulate a crash by
+ * calling _exit() since this doesn't flush HDF5 caches but
+ * still exits with success.
*/
-#include <mpi.h>
#include "h5test.h"
const char *FILENAME[] = {
@@ -30,171 +29,190 @@ const char *FILENAME[] = {
NULL
};
-static double the_data[100][100];
+static int data_g[100][100];
+#define N_GROUPS 100
+
+
/*-------------------------------------------------------------------------
- * Function: create_file
- *
- * Purpose: Creates file used in part 1 of the test
+ * Function: create_test_file
*
- * Return: Success: 0
+ * Purpose: Creates the file used in part 1 of the test
*
- * Failure: 1
+ * Return: Success: A valid file ID
+ * Failure: H5I_INVALID_HID
*
- * Programmer: Leon Arber
+ * Programmer: Leon Arber
* Sept. 26, 2006
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static hid_t
-create_file(char* name, hid_t fapl)
+create_test_file(char *name, hid_t fapl_id)
{
- hid_t file, dcpl, space, dset, groups, grp, plist;
- hsize_t ds_size[2] = {100, 100};
- hsize_t ch_size[2] = {5, 5};
- hsize_t i, j;
-
-
-
- if((file=H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) goto error;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t dcpl_id = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t did = H5I_INVALID_HID;
+ hid_t top_level_gid = H5I_INVALID_HID;
+ hid_t gid = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hsize_t dims[2] = {100, 100};
+ hsize_t chunk_dims[2] = {5, 5};
+ hsize_t i, j;
+
+ if((fid = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0)
+ goto error;
/* Create a chunked dataset */
- if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
- if(H5Pset_chunk(dcpl, 2, ch_size) < 0) goto error;
- if((space = H5Screate_simple(2, ds_size, NULL)) < 0) goto error;
- if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_FLOAT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
- goto error;
-
- plist = H5Pcreate(H5P_DATASET_XFER);
- H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
-
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if(H5Pset_chunk(dcpl_id, 2, chunk_dims) < 0)
+ goto error;
+ if((sid = H5Screate_simple(2, dims, NULL)) < 0)
+ goto error;
+ if((did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto error;
+ if(H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
+ goto error;
/* Write some data */
- for(i = 0; i < ds_size[0]; i++) {
- /*
- * The extra cast in the following statement is a bug workaround
- * for the Win32 version 5.0 compiler.
- * 1998-11-06 ptl
- */
- for(j = 0; j < ds_size[1]; j++)
- the_data[i][j] = (double)(hssize_t)i/(hssize_t)(j+1);
- }
- if(H5Dwrite(dset, H5T_NATIVE_DOUBLE, space, space, plist, the_data) < 0) goto error;
+ for(i = 0; i < dims[0]; i++)
+ for(j = 0; j < dims[1]; j++)
+ data_g[i][j] = (int)(i + (i * j) + j);
+
+ if(H5Dwrite(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
+ goto error;
/* Create some groups */
- if((groups = H5Gcreate2(file, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
- for(i = 0; i < 100; i++) {
- sprintf(name, "grp%02u", (unsigned)i);
- if((grp = H5Gcreate2(groups, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
- if(H5Gclose(grp) < 0) goto error;
+ if((top_level_gid = H5Gcreate2(fid, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+ for(i = 0; i < N_GROUPS; i++) {
+ HDsprintf(name, "grp%02u", (unsigned)i);
+ if((gid = H5Gcreate2(top_level_gid, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+ if(H5Gclose(gid) < 0)
+ goto error;
}
- return file;
+ return fid;
error:
- HD_exit(1);
-}
+ return H5I_INVALID_HID;
+} /* end create_test_file() */
+
/*-------------------------------------------------------------------------
- * Function: main
+ * Function: main
*
- * Purpose: Part 1 of a two-part H5Fflush() test.
+ * Purpose: Part 1 of a two-part parallel H5Fflush() test.
*
- * Return: Success: 0
+ * Return: EXIT_FAILURE (always)
*
- * Failure: 1
- *
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Friday, October 23, 1998
*
- * Modifications:
- * Leon Arber
- * Sept. 26, 2006, expand test to check for failure if H5Fflush is not called.
- *
- *
*-------------------------------------------------------------------------
*/
int
main(int argc, char* argv[])
{
- hid_t file1, file2, fapl;
- MPI_File *mpifh_p = NULL;
- char name[1024];
- const char *envval = NULL;
- int mpi_size, mpi_rank;
- MPI_Comm comm = MPI_COMM_WORLD;
- MPI_Info info = MPI_INFO_NULL;
+ hid_t fid1 = H5I_INVALID_HID;
+ hid_t fid2 = H5I_INVALID_HID;
+ hid_t fapl_id = H5I_INVALID_HID;
+ MPI_File *mpifh_p = NULL;
+ char name[1024];
+ const char *envval = NULL;
+ int mpi_size;
+ int mpi_rank;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
MPI_Comm_rank(comm, &mpi_rank);
- fapl = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl, comm, info);
-
if(mpi_rank == 0)
- TESTING("H5Fflush (part1)");
+ TESTING("H5Fflush (part1)");
+
+ /* Don't run using the split VFD */
envval = HDgetenv("HDF5_DRIVER");
if(envval == NULL)
envval = "nomatch";
- if(HDstrcmp(envval, "split")) {
- /* Create the file */
- h5_fixname(FILENAME[0], fapl, name, sizeof name);
- file1 = create_file(name, fapl);
- /* Flush and exit without closing the library */
- if(H5Fflush(file1, H5F_SCOPE_GLOBAL) < 0) goto error;
-
- /* Create the other file which will not be flushed */
- h5_fixname(FILENAME[1], fapl, name, sizeof name);
- file2 = create_file(name, fapl);
-
-
- if(mpi_rank == 0)
- PASSED();
- fflush(stdout);
- fflush(stderr);
- } /* end if */
- else {
- SKIPPED();
- puts(" Test not compatible with current Virtual File Driver");
- } /* end else */
-
- /*
- * Some systems like AIX do not like files not closed when MPI_Finalize
+
+ if(!HDstrcmp(envval, "split")) {
+ if(mpi_rank == 0) {
+ SKIPPED();
+ HDputs(" Test not compatible with current Virtual File Driver");
+ }
+ MPI_Finalize();
+ HDexit(EXIT_FAILURE);
+ }
+
+ if((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_fapl_mpio(fapl_id, comm, info) < 0)
+ goto error;
+
+ /* Create the file */
+ h5_fixname(FILENAME[0], fapl_id, name, sizeof(name));
+ if((fid1 = create_test_file(name, fapl_id)) < 0)
+ goto error;
+ /* Flush and exit without closing the library */
+ if(H5Fflush(fid1, H5F_SCOPE_GLOBAL) < 0)
+ goto error;
+
+ /* Create the other file which will not be flushed */
+ h5_fixname(FILENAME[1], fapl_id, name, sizeof(name));
+ if((fid2 = create_test_file(name, fapl_id)) < 0)
+ goto error;
+
+ if(mpi_rank == 0)
+ PASSED();
+
+ HDfflush(stdout);
+ HDfflush(stderr);
+
+ /* Some systems like AIX do not like files not being closed when MPI_Finalize
* is called. So, we need to get the MPI file handles, close them by hand.
* Then the _exit is still needed to stop at_exit from happening in some systems.
* Note that MPIO VFD returns the address of the file-handle in the VFD struct
* because MPI_File_close wants to modify the file-handle variable.
*/
- /* close file1 */
- if(H5Fget_vfd_handle(file1, fapl, (void **)&mpifh_p) < 0) {
- printf("H5Fget_vfd_handle for file1 failed\n");
- goto error;
- } /* end if */
- if(MPI_File_close(mpifh_p) != MPI_SUCCESS) {
- printf("MPI_File_close for file1 failed\n");
- goto error;
- } /* end if */
- /* close file2 */
- if(H5Fget_vfd_handle(file2, fapl, (void **)&mpifh_p) < 0) {
- printf("H5Fget_vfd_handle for file2 failed\n");
- goto error;
- } /* end if */
- if(MPI_File_close(mpifh_p) != MPI_SUCCESS) {
- printf("MPI_File_close for file2 failed\n");
- goto error;
- } /* end if */
-
- fflush(stdout);
- fflush(stderr);
- HD_exit(0);
+ /* Close file 1 */
+ if(H5Fget_vfd_handle(fid1, fapl_id, (void **)&mpifh_p) < 0)
+ goto error;
+ if(MPI_File_close(mpifh_p) != MPI_SUCCESS)
+ goto error;
+
+ /* Close file 2 */
+ if(H5Fget_vfd_handle(fid2, fapl_id, (void **)&mpifh_p) < 0)
+ goto error;
+ if(MPI_File_close(mpifh_p) != MPI_SUCCESS)
+ goto error;
+
+ HDfflush(stdout);
+ HDfflush(stderr);
+
+ /* Always exit with a failure code!
+ *
+ * In accordance with the standard, not having all processes
+ * call MPI_Finalize() can be considered an error, so mpiexec
+ * et al. may indicate failure on return. It's much easier to
+ * always ignore the failure condition than to handle some
+ * platforms returning success and others failure.
+ */
+ HD_exit(EXIT_FAILURE);
error:
- fflush(stdout);
- fflush(stderr);
- HD_exit(1);
-}
+ HDfflush(stdout);
+ HDfflush(stderr);
+ HDprintf("*** ERROR ***\n");
+ HDprintf("THERE WAS A REAL ERROR IN t_pflush1.\n");
+ HD_exit(EXIT_FAILURE);
+} /* end main() */
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index 2051f4e..f4589c8 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -30,116 +30,124 @@ const char *FILENAME[] = {
NULL
};
-static double the_data[100][100];
+static int data_g[100][100];
+#define N_GROUPS 100
/*-------------------------------------------------------------------------
- * Function: check_file
+ * Function: check_test_file
*
- * Purpose: Part 2 of a two-part H5Fflush() test.
+ * Purpose: Part 2 of a two-part H5Fflush() test.
*
- * Return: Success: 0
+ * Return: SUCCEED/FAIL
*
- * Failure: 1
- *
- * Programmer: Leon Arber
+ * Programmer: Leon Arber
* Sept. 26, 2006.
*
*-------------------------------------------------------------------------
*/
-static int
-check_file(char* name, hid_t fapl)
+static herr_t
+check_test_file(char* name, hid_t fapl_id)
{
- hid_t file, space, dset, groups, grp, plist;
- hsize_t ds_size[2];
- double error;
- hsize_t i, j;
-
- plist = H5Pcreate(H5P_DATASET_XFER);
- H5Pset_dxpl_mpio(plist, H5FD_MPIO_COLLECTIVE);
- if((file = H5Fopen(name, H5F_ACC_RDONLY, fapl)) < 0) goto error;
+ hid_t fid = H5I_INVALID_HID;
+ hid_t sid = H5I_INVALID_HID;
+ hid_t did = H5I_INVALID_HID;
+ hid_t top_level_gid = H5I_INVALID_HID;
+ hid_t gid = H5I_INVALID_HID;
+ hid_t dxpl_id = H5I_INVALID_HID;
+ hsize_t dims[2];
+ int val;
+ hsize_t i, j;
+
+ if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto error;
+ if(H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0)
+ goto error;
+ if((fid = H5Fopen(name, H5F_ACC_RDONLY, fapl_id)) < 0)
+ goto error;
/* Open the dataset */
- if((dset = H5Dopen2(file, "dset", H5P_DEFAULT)) < 0) goto error;
- if((space = H5Dget_space(dset)) < 0) goto error;
- if(H5Sget_simple_extent_dims(space, ds_size, NULL) < 0) goto error;
- assert(100==ds_size[0] && 100==ds_size[1]);
+ if((did = H5Dopen2(fid, "dset", H5P_DEFAULT)) < 0)
+ goto error;
+ if((sid = H5Dget_space(did)) < 0)
+ goto error;
+ if(H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
+ goto error;
+ HDassert(100 == dims[0] && 100 == dims[1]);
/* Read some data */
- if (H5Dread(dset, H5T_NATIVE_DOUBLE, space, space, plist,
- the_data) < 0) goto error;
- for (i=0; i<ds_size[0]; i++) {
- for (j=0; j<ds_size[1]; j++) {
- /*
- * The extra cast in the following statement is a bug workaround
- * for the Win32 version 5.0 compiler.
- * 1998-11-06 ptl
- */
- error = fabs(the_data[i][j]-(double)(hssize_t)i/((hssize_t)j+1));
- if (error>0.0001) {
- H5_FAILED();
- printf(" dset[%lu][%lu] = %g\n",
- (unsigned long)i, (unsigned long)j, the_data[i][j]);
- printf(" should be %g\n",
- (double)(hssize_t)i/(hssize_t)(j+1));
- goto error;
- }
- }
+ if(H5Dread(did, H5T_NATIVE_INT, sid, sid, dxpl_id, data_g) < 0)
+ goto error;
+ for(i = 0; i < dims[0]; i++) {
+ for(j = 0; j < dims[1]; j++) {
+ val = (int)(i + (i * j) + j);
+ if(data_g[i][j] != val) {
+ H5_FAILED();
+ HDprintf(" data_g[%lu][%lu] = %d\n", (unsigned long)i, (unsigned long)j, data_g[i][j]);
+ HDprintf(" should be %d\n", val);
+ }
+ }
}
/* Open some groups */
- if((groups = H5Gopen2(file, "some_groups", H5P_DEFAULT)) < 0) goto error;
- for(i = 0; i < 100; i++) {
- sprintf(name, "grp%02u", (unsigned)i);
- if((grp = H5Gopen2(groups, name, H5P_DEFAULT)) < 0) goto error;
- if(H5Gclose(grp) < 0) goto error;
+ if((top_level_gid = H5Gopen2(fid, "some_groups", H5P_DEFAULT)) < 0)
+ goto error;
+ for(i = 0; i < N_GROUPS; i++) {
+ HDsprintf(name, "grp%02u", (unsigned)i);
+ if((gid = H5Gopen2(top_level_gid, name, H5P_DEFAULT)) < 0)
+ goto error;
+ if(H5Gclose(gid) < 0)
+ goto error;
}
- if(H5Gclose(groups) < 0) goto error;
- if(H5Dclose(dset) < 0) goto error;
- if(H5Fclose(file) < 0) goto error;
- if(H5Pclose(plist) < 0) goto error;
- if(H5Sclose(space) < 0) goto error;
+ if(H5Gclose(top_level_gid) < 0)
+ goto error;
+ if(H5Dclose(did) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+ if(H5Pclose(dxpl_id) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
- return 0;
+ return SUCCEED;
error:
H5E_BEGIN_TRY {
- H5Pclose(plist);
- H5Gclose(groups);
- H5Dclose(dset);
- H5Fclose(file);
- H5Sclose(space);
+ H5Pclose(dxpl_id);
+ H5Gclose(top_level_gid);
+ H5Dclose(did);
+ H5Fclose(fid);
+ H5Sclose(sid);
+ H5Gclose(gid);
} H5E_END_TRY;
- return 1;
-}
+ return FAIL;
+} /* end check_test_file() */
/*-------------------------------------------------------------------------
- * Function: main
+ * Function: main
*
- * Purpose: Part 2 of a two-part H5Fflush() test.
+ * Purpose: Part 2 of a two-part H5Fflush() test.
*
- * Return: Success: 0
+ * Return: EXIT_SUCCESS/EXIT_FAIL
*
- * Failure: 1
- *
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Friday, October 23, 1998
*
- * Modifications:
- * Leon Arber
- * Sept. 26, 2006, expand to check for case where the was file not flushed.
- *
*-------------------------------------------------------------------------
*/
int
-main(int argc, char* argv[])
+main(int argc, char *argv[])
{
+ hid_t fapl_id1 = H5I_INVALID_HID;
+ hid_t fapl_id2 = H5I_INVALID_HID;
H5E_auto2_t func;
- char name[1024];
+ char name[1024];
const char *envval = NULL;
- int mpi_size, mpi_rank;
+ int mpi_size;
+ int mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
@@ -148,69 +156,70 @@ main(int argc, char* argv[])
MPI_Comm_rank(comm, &mpi_rank);
if(mpi_rank == 0)
- TESTING("H5Fflush (part2 with flush)");
+ TESTING("H5Fflush (part2 with flush)");
- /* Don't run this test using the core or split file drivers */
+ /* Don't run using the split VFD */
envval = HDgetenv("HDF5_DRIVER");
- if (envval == NULL)
+ if(envval == NULL)
envval = "nomatch";
- if (HDstrcmp(envval, "core") && HDstrcmp(envval, "split")) {
- hid_t fapl1, fapl2;
-
- fapl1 = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl1, comm, info);
-
- fapl2 = H5Pcreate(H5P_FILE_ACCESS);
- H5Pset_fapl_mpio(fapl2, comm, info);
-
- /* Check the case where the file was flushed */
- h5_fixname(FILENAME[0], fapl1, name, sizeof name);
- if(check_file(name, fapl1))
- {
- H5_FAILED()
- goto error;
- }
- else if(mpi_rank == 0)
- {
- PASSED()
- }
-
- /* Check the case where the file was not flushed. This should give an error
- * so we turn off the error stack temporarily */
- if(mpi_rank == 0)
- TESTING("H5Fflush (part2 without flush)");
- H5Eget_auto2(H5E_DEFAULT,&func,NULL);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
-
- h5_fixname(FILENAME[1], fapl2, name, sizeof name);
- if(check_file(name, fapl2))
- {
- if(mpi_rank == 0)
- {
- PASSED()
- }
- }
- else
- {
- H5_FAILED()
- goto error;
- }
- H5Eset_auto2(H5E_DEFAULT, func, NULL);
-
-
- h5_clean_files(&FILENAME[0], fapl1);
- h5_clean_files(&FILENAME[1], fapl2);
+
+ if(!HDstrcmp(envval, "split")) {
+ if(mpi_rank == 0) {
+ SKIPPED();
+ HDputs(" Test not compatible with current Virtual File Driver");
+ }
+ MPI_Finalize();
+ HDexit(EXIT_FAILURE);
+ }
+
+ if((fapl_id1 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_fapl_mpio(fapl_id1, comm, info) < 0)
+ goto error;
+
+ if((fapl_id2 = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+ if(H5Pset_fapl_mpio(fapl_id2, comm, info) < 0)
+ goto error;
+
+ /* Check the case where the file was flushed */
+ h5_fixname(FILENAME[0], fapl_id1, name, sizeof(name));
+ if(check_test_file(name, fapl_id1)) {
+ H5_FAILED()
+ goto error;
}
- else
- {
- SKIPPED();
- puts(" Test not compatible with current Virtual File Driver");
+ else if(mpi_rank == 0) {
+ PASSED();
}
+ /* Check the case where the file was not flushed. This should give an error
+ * so we turn off the error stack temporarily.
+ */
+ if(mpi_rank == 0)
+ TESTING("H5Fflush (part2 without flush)");
+ H5Eget_auto2(H5E_DEFAULT,&func, NULL);
+ H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+
+ h5_fixname(FILENAME[1], fapl_id2, name, sizeof(name));
+ if(check_test_file(name, fapl_id2)) {
+ if(mpi_rank == 0)
+ PASSED();
+ }
+ else {
+ H5_FAILED()
+ goto error;
+ }
+
+ H5Eset_auto2(H5E_DEFAULT, func, NULL);
+
+ h5_clean_files(&FILENAME[0], fapl_id1);
+ h5_clean_files(&FILENAME[1], fapl_id2);
+
MPI_Finalize();
- return 0;
- error:
- return 1;
-}
+ HDexit(EXIT_SUCCESS);
+
+error:
+ HDexit(EXIT_FAILURE);
+} /* end main() */
diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c
index 574591c..73d262e 100644
--- a/testpar/t_ph5basic.c
+++ b/testpar/t_ph5basic.c
@@ -22,17 +22,15 @@
* Function: test_fapl_mpio_dup
*
* Purpose: Test if fapl_mpio property list keeps a duplicate of the
- * communicator and INFO objects given when set; and returns
- * duplicates of its components when H5Pget_fapl_mpio is called.
+ * communicator and INFO objects given when set; and returns
+ * duplicates of its components when H5Pget_fapl_mpio is called.
*
- * Return: Success: None
- *
- * Failure: Abort
+ * Return: Success: None
+ * Failure: Abort
*
* Programmer: Albert Cheng
* January 9, 2003
*
- * Modifications:
*-------------------------------------------------------------------------
*/
void
@@ -44,43 +42,43 @@ test_fapl_mpio_dup(void)
int mpi_size_tmp, mpi_rank_tmp;
MPI_Info info = MPI_INFO_NULL;
MPI_Info info_tmp = MPI_INFO_NULL;
- int mrc; /* MPI return value */
- hid_t acc_pl; /* File access properties */
- herr_t ret; /* hdf5 return value */
+ int mrc; /* MPI return value */
+ hid_t acc_pl; /* File access properties */
+ herr_t ret; /* HDF5 return value */
int nkeys, nkeys_tmp;
if (VERBOSE_MED)
- printf("Verify fapl_mpio duplicates communicator and INFO objects\n");
+ HDprintf("Verify fapl_mpio duplicates communicator and INFO objects\n");
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
if (VERBOSE_MED)
- printf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
+ HDprintf("rank/size of MPI_COMM_WORLD are %d/%d\n", mpi_rank, mpi_size);
/* Create a new communicator that has the same processes as MPI_COMM_WORLD.
* Use MPI_Comm_split because it is simplier than MPI_Comm_create
*/
mrc = MPI_Comm_split(MPI_COMM_WORLD, 0, 0, &comm);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_split");
- MPI_Comm_size(comm,&mpi_size_old);
- MPI_Comm_rank(comm,&mpi_rank_old);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_split");
+ MPI_Comm_size(comm, &mpi_size_old);
+ MPI_Comm_rank(comm, &mpi_rank_old);
if (VERBOSE_MED)
- printf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);
+ HDprintf("rank/size of comm are %d/%d\n", mpi_rank_old, mpi_size_old);
/* create a new INFO object with some trivial information. */
mrc = MPI_Info_create(&info);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_create");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_create");
mrc = MPI_Info_set(info, "hdf_info_name", "XYZ");
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_set");
- if (MPI_INFO_NULL != info){
- mrc=MPI_Info_get_nkeys(info, &nkeys);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_set");
+ if (MPI_INFO_NULL != info) {
+ mrc = MPI_Info_get_nkeys(info, &nkeys);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
}
if (VERBOSE_MED)
- h5_dump_info_object(info);
+ h5_dump_info_object(info);
- acc_pl = H5Pcreate (H5P_FILE_ACCESS);
+ acc_pl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((acc_pl >= 0), "H5P_FILE_ACCESS");
ret = H5Pset_fapl_mpio(acc_pl, comm, info);
@@ -92,28 +90,27 @@ test_fapl_mpio_dup(void)
* valid communicator and INFO object.
*/
mrc = MPI_Comm_free(&comm);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
- if (MPI_INFO_NULL!=info){
- mrc = MPI_Info_free(&info);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info) {
+ mrc = MPI_Info_free(&info);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
}
ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
VRFY((ret >= 0), "H5Pget_fapl_mpio");
- MPI_Comm_size(comm_tmp,&mpi_size_tmp);
- MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
if (VERBOSE_MED)
- printf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
- mpi_rank_tmp, mpi_size_tmp);
- VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size");
- VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
- if (MPI_INFO_NULL != info_tmp){
- mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
- VRFY((nkeys_tmp==nkeys), "new and old nkeys equal");
+ HDprintf("After H5Pget_fapl_mpio: rank/size of comm are %d/%d\n", mpi_rank_tmp, mpi_size_tmp);
+ VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
+ VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ VRFY((nkeys_tmp == nkeys), "new and old nkeys equal");
}
if (VERBOSE_MED)
- h5_dump_info_object(info_tmp);
+ h5_dump_info_object(info_tmp);
/* Case 2:
* Free the retrieved communicator and INFO object.
@@ -122,23 +119,23 @@ test_fapl_mpio_dup(void)
* Also verify the NULL argument option.
*/
mrc = MPI_Comm_free(&comm_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
- if (MPI_INFO_NULL!=info_tmp){
- mrc = MPI_Info_free(&info_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
}
/* check NULL argument options. */
ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, NULL);
VRFY((ret >= 0), "H5Pget_fapl_mpio Comm only");
mrc = MPI_Comm_free(&comm_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
ret = H5Pget_fapl_mpio(acc_pl, NULL, &info_tmp);
VRFY((ret >= 0), "H5Pget_fapl_mpio Info only");
- if (MPI_INFO_NULL!=info_tmp){
- mrc = MPI_Info_free(&info_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
}
ret = H5Pget_fapl_mpio(acc_pl, NULL, NULL);
@@ -148,44 +145,44 @@ test_fapl_mpio_dup(void)
/* Donot free the returned objects which are used in the next case. */
ret = H5Pget_fapl_mpio(acc_pl, &comm_tmp, &info_tmp);
VRFY((ret >= 0), "H5Pget_fapl_mpio");
- MPI_Comm_size(comm_tmp,&mpi_size_tmp);
- MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
if (VERBOSE_MED)
- printf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
- mpi_rank_tmp, mpi_size_tmp);
- VRFY((mpi_size_tmp==mpi_size), "MPI_Comm_size");
- VRFY((mpi_rank_tmp==mpi_rank), "MPI_Comm_rank");
- if (MPI_INFO_NULL != info_tmp){
- mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
- VRFY((nkeys_tmp==nkeys), "new and old nkeys equal");
+ HDprintf("After second H5Pget_fapl_mpio: rank/size of comm are %d/%d\n",
+ mpi_rank_tmp, mpi_size_tmp);
+ VRFY((mpi_size_tmp == mpi_size), "MPI_Comm_size");
+ VRFY((mpi_rank_tmp == mpi_rank), "MPI_Comm_rank");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
+ VRFY((nkeys_tmp == nkeys), "new and old nkeys equal");
}
if (VERBOSE_MED)
- h5_dump_info_object(info_tmp);
+ h5_dump_info_object(info_tmp);
/* Case 3:
* Close the property list and verify the retrieved communicator and INFO
* object are still valid.
*/
H5Pclose(acc_pl);
- MPI_Comm_size(comm_tmp,&mpi_size_tmp);
- MPI_Comm_rank(comm_tmp,&mpi_rank_tmp);
+ MPI_Comm_size(comm_tmp, &mpi_size_tmp);
+ MPI_Comm_rank(comm_tmp, &mpi_rank_tmp);
if (VERBOSE_MED)
- printf("After Property list closed: rank/size of comm are %d/%d\n",
- mpi_rank_tmp, mpi_size_tmp);
- if (MPI_INFO_NULL != info_tmp){
- mrc=MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_get_nkeys");
+ HDprintf("After Property list closed: rank/size of comm are %d/%d\n",
+ mpi_rank_tmp, mpi_size_tmp);
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_get_nkeys(info_tmp, &nkeys_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_get_nkeys");
}
if (VERBOSE_MED)
- h5_dump_info_object(info_tmp);
+ h5_dump_info_object(info_tmp);
/* clean up */
mrc = MPI_Comm_free(&comm_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Comm_free");
- if (MPI_INFO_NULL!=info_tmp){
- mrc = MPI_Info_free(&info_tmp);
- VRFY((mrc==MPI_SUCCESS), "MPI_Info_free");
+ VRFY((mrc == MPI_SUCCESS), "MPI_Comm_free");
+ if (MPI_INFO_NULL != info_tmp) {
+ mrc = MPI_Info_free(&info_tmp);
+ VRFY((mrc == MPI_SUCCESS), "MPI_Info_free");
}
-}
+} /* end test_fapl_mpio_dup() */
diff --git a/testpar/t_pread.c b/testpar/t_pread.c
new file mode 100644
index 0000000..ba4165e
--- /dev/null
+++ b/testpar/t_pread.c
@@ -0,0 +1,1251 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Collective file open optimization tests
+ *
+ */
+
+#include "testpar.h"
+#include "H5Dprivate.h"
+
+/* The collection of files is included below to aid
+ * an external "cleanup" process if required.
+ *
+ * Note that the code below relies on the ordering of this array
+ * since each set of three is used by the tests either to construct
+ * or to read and validate.
+ */
+#define NFILENAME 3
+const char *FILENAMES[NFILENAME + 1]={"reloc_t_pread_data_file",
+ "reloc_t_pread_group_0_file",
+ "reloc_t_pread_group_1_file",
+ NULL};
+#define FILENAME_BUF_SIZE 1024
+
+#define COUNT 1000
+
+#define LIMIT_NPROC 6
+
+hbool_t pass = true;
+static const char *random_hdf5_text =
+"Now is the time for all first-time-users of HDF5 to read their \
+manual or go thru the tutorials!\n\
+While you\'re at it, now is also the time to read up on MPI-IO.";
+
+static const char *hitchhiker_quote =
+"A common mistake that people make when trying to design something\n\
+completely foolproof is to underestimate the ingenuity of complete\n\
+fools.\n";
+
+static int generate_test_file(MPI_Comm comm, int mpi_rank, int group);
+static int test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group);
+
+static char *test_argv0 = NULL;
+
+
+/*-------------------------------------------------------------------------
+ * Function: generate_test_file
+ *
+ * Purpose: This function is called to produce an HDF5 data file
+ * whose superblock is relocated to a power-of-2 boundary.
+ *
+ * Since data will be read back and validated, we generate
+ * data in a predictable manner rather than randomly.
+ * For now, we simply use the global mpi_rank of the writing
+ * process as a starting component for the data generation.
+ * Subsequent writes are increments from the initial start
+ * value.
+ *
+ * In the overall scheme of running the test, we'll call
+ * this function twice: first as a collection of all MPI
+ * processes and then a second time with the processes split
+ * more or less in half. Each sub group will operate
+ * collectively on their assigned file. This split into
+ * subgroups validates that parallel groups can successfully
+ * open and read data independantly from the other parallel
+ * operations taking place.
+ *
+ * Return: Success: 0
+ *
+ * Failure: 1
+ *
+ * Programmer: Richard Warren
+ * 10/1/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+generate_test_file( MPI_Comm comm, int mpi_rank, int group_id )
+{
+ int header = -1;
+ const char *fcn_name = "generate_test_file()";
+ const char *failure_mssg = NULL;
+ const char *group_filename = NULL;
+ char data_filename[FILENAME_BUF_SIZE];
+ int file_index = 0;
+ int group_size;
+ int group_rank;
+ int local_failure = 0;
+ int global_failures = 0;
+ hsize_t count = COUNT;
+ hsize_t i;
+ hsize_t offset;
+ hsize_t dims[1] = {0};
+ hid_t file_id = -1;
+ hid_t memspace = -1;
+ hid_t filespace = -1;
+ hid_t fctmpl = -1;
+ hid_t fapl_id = -1;
+ hid_t dxpl_id = -1;
+ hid_t dset_id = -1;
+ hid_t dset_id_ch = -1;
+ hid_t dcpl_id = H5P_DEFAULT;
+ hsize_t chunk[1];
+ float nextValue;
+ float *data_slice = NULL;
+
+ pass = true;
+
+ HDassert(comm != MPI_COMM_NULL);
+
+ if ( (MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) {
+ pass = FALSE;
+ failure_mssg = "generate_test_file: MPI_Comm_rank failed.\n";
+ }
+
+ if ( (MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) {
+ pass = FALSE;
+ failure_mssg = "generate_test_file: MPI_Comm_size failed.\n";
+ }
+
+ if ( mpi_rank == 0 ) {
+
+ HDfprintf(stdout, "Constructing test files...");
+ }
+
+ /* Setup the file names
+ * The test specfic filenames are stored as consecutive
+ * array entries in the global 'FILENAMES' array above.
+ * Here, we simply decide on the starting index for
+ * file construction. The reading portion of the test
+ * will have a similar setup process...
+ */
+ if ( pass ) {
+ if ( comm == MPI_COMM_WORLD ) { /* Test 1 */
+ file_index = 0;
+ }
+ else if ( group_id == 0 ) { /* Test 2 group 0 */
+ file_index = 1;
+ }
+ else { /* Test 2 group 1 */
+ file_index = 2;
+ }
+
+ /* The 'group_filename' is just a temp variable and
+ * is used to call into the h5_fixname function. No
+ * need to worry that we reassign it for each file!
+ */
+ group_filename = FILENAMES[file_index];
+ HDassert( group_filename );
+
+ /* Assign the 'data_filename' */
+ if ( h5_fixname(group_filename, H5P_DEFAULT, data_filename,
+ sizeof(data_filename)) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "h5_fixname(0) failed.\n";
+ }
+ }
+
+ /* setup data to write */
+ if ( pass ) {
+ if ( (data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ nextValue = (float)(mpi_rank * COUNT);
+
+ for(i=0; i<COUNT; i++) {
+ data_slice[i] = nextValue;
+ nextValue += 1;
+ }
+ }
+
+ /* Initialize a file creation template */
+ if (pass) {
+ if ((fctmpl = H5Pcreate(H5P_FILE_CREATE)) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.\n";
+ }
+ else if (H5Pset_userblock(fctmpl, 512) != SUCCEED) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_userblock(,size) failed.\n";
+ }
+ }
+ /* setup FAPL */
+ if ( pass ) {
+ if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_fapl_mpio() failed\n";
+ }
+ }
+
+ /* create the data file */
+ if ( pass ) {
+ if ( (file_id = H5Fcreate(data_filename, H5F_ACC_TRUNC,
+ fctmpl, fapl_id)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Fcreate() failed.\n";
+ }
+ }
+
+ /* create and write the dataset */
+ if ( pass ) {
+ if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ dims[0] = COUNT;
+ if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed (1).\n";
+ }
+ }
+
+ if ( pass ) {
+ dims[0] *= (hsize_t)group_size;
+ if ( (filespace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed (2).\n";
+ }
+ }
+
+ if ( pass ) {
+ offset = (hsize_t)group_rank * (hsize_t)COUNT;
+ if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET, &offset,
+ NULL, &count, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (dset_id = H5Dcreate2(file_id, "dataset0", H5T_NATIVE_FLOAT,
+ filespace, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dcreate2() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Dwrite(dset_id, H5T_NATIVE_FLOAT, memspace,
+ filespace, dxpl_id, data_slice)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dwrite() failed.\n";
+ }
+ }
+
+
+ /* create a chunked dataset */
+ chunk[0] = COUNT/8;
+
+ if ( pass ) {
+ if ( (dcpl_id = H5Pcreate (H5P_DATASET_CREATE)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pcreate() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_chunk (dcpl_id, 1, chunk) ) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pset_chunk() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+
+ if ( (dset_id_ch = H5Dcreate2(file_id, "dataset0_chunked", H5T_NATIVE_FLOAT,
+ filespace, H5P_DEFAULT, dcpl_id,
+ H5P_DEFAULT)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dcreate2() failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Dwrite(dset_id_ch, H5T_NATIVE_FLOAT, memspace,
+ filespace, dxpl_id, data_slice)) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dwrite() failed.\n";
+ }
+ }
+ if ( pass || (dcpl_id != -1)) {
+ if ( H5Pclose(dcpl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(dcpl_id) failed.\n";
+ }
+ }
+
+ if ( pass || (dset_id_ch != -1)) {
+ if ( H5Dclose(dset_id_ch) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
+ }
+ }
+
+ /* close file, etc. */
+ if ( pass || (dset_id != -1)) {
+ if ( H5Dclose(dset_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id) failed.\n";
+ }
+ }
+
+ if ( pass || (memspace != -1) ) {
+ if ( H5Sclose(memspace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(memspace) failed.\n";
+ }
+ }
+
+ if ( pass || (filespace != -1) ) {
+ if ( H5Sclose(filespace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(filespace) failed.\n";
+ }
+ }
+
+ if ( pass || (file_id != -1) ) {
+ if ( H5Fclose(file_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Fclose(file_id) failed.\n";
+ }
+ }
+
+ if ( pass || (dxpl_id != -1) ) {
+ if ( H5Pclose(dxpl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(dxpl_id) failed.\n";
+ }
+ }
+
+ if ( pass || (fapl_id != -1) ) {
+ if ( H5Pclose(fapl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(fapl_id) failed.\n";
+ }
+ }
+
+ if (pass || (fctmpl != -1)) {
+ if (H5Pclose(fctmpl) < 0) {
+ pass = false;
+ failure_mssg = "H5Pclose(fctmpl) failed.\n";
+ }
+ }
+
+ /* Add a userblock to the head of the datafile.
+ * We will use this to for a functional test of the
+ * file open optimization. This is superblock
+ * relocation is done by the rank 0 process associated
+ * with the communicator being used. For test 1, we
+ * utilize MPI_COMM_WORLD, so group_rank 0 is the
+ * same as mpi_rank 0. For test 2 which utilizes
+ * two groups resulting from an MPI_Comm_split, we
+ * will have parallel groups and hence two
+ * group_rank(0) processes. Each parallel group
+ * will create a unique file with different text
+ * headers and different data.
+ */
+ if (group_rank == 0) {
+ const char *text_to_write;
+ size_t bytes_to_write;
+
+ if (group_id == 0)
+ text_to_write = random_hdf5_text;
+ else
+ text_to_write = hitchhiker_quote;
+
+ bytes_to_write = HDstrlen(text_to_write);
+
+ if (pass) {
+ if ((header = HDopen(data_filename, O_WRONLY)) < 0) {
+ pass = FALSE;
+ failure_mssg = "HDopen(data_filename, O_WRONLY) failed.\n";
+ }
+ }
+
+ if (pass) {
+ HDlseek(header, 0, SEEK_SET);
+ if (HDwrite(header, text_to_write, bytes_to_write) < 0) {
+ pass = FALSE;
+ failure_mssg = "Unable to write user text into file.\n";
+ }
+ }
+
+ if (pass || (header > 0)) {
+ if (HDclose(header) < 0) {
+ pass = FALSE;
+ failure_mssg = "HDclose() failed.\n";
+ }
+ }
+ }
+
+ /* collect results from other processes.
+ * Only overwrite the failure message if no previous error
+ * has been detected
+ */
+ local_failure = ( pass ? 0 : 1 );
+
+ /* This is a global all reduce (NOT group specific) */
+ if ( MPI_Allreduce(&local_failure, &global_failures, 1,
+ MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
+ if ( pass ) {
+ pass = FALSE;
+ failure_mssg = "MPI_Allreduce() failed.\n";
+ }
+ } else if ( ( pass ) && ( global_failures > 0 ) ) {
+ pass = FALSE;
+ failure_mssg = "One or more processes report failure.\n";
+ }
+
+ /* report results */
+ if ( mpi_rank == 0 ) {
+ if ( pass ) {
+ HDfprintf(stdout, "Done.\n");
+ } else {
+ HDfprintf(stdout, "FAILED.\n");
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
+ fcn_name, failure_mssg);
+ }
+ }
+
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ return(! pass);
+
+} /* generate_test_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_parallel_read
+ *
+ * Purpose: This actually tests the superblock optimization
+ * and covers the three primary cases we're interested in.
+ * 1). That HDF5 files can be opened in parallel by
+ * the rank 0 process and that the superblock
+ * offset is correctly broadcast to the other
+ * parallel file readers.
+ * 2). That a parallel application can correctly
+ * handle reading multiple files by using
+ * subgroups of MPI_COMM_WORLD and that each
+ * subgroup operates as described in (1) to
+ * collectively read the data.
+ * 3). Testing proc0-read-and-MPI_Bcast using
+ * sub-communicators, and reading into
+ * a memory space that is different from the
+ * file space, and chunked datasets.
+ *
+ * The global MPI rank is used for reading and
+ * writing data for process specific data in the
+ * dataset. We do this rather simplisticly, i.e.
+ * rank 0: writes/reads 0-9999
+ * rank 1: writes/reads 1000-1999
+ * rank 2: writes/reads 2000-2999
+ * ...
+ *
+ * Return: Success: 0
+ *
+ * Failure: 1
+ *
+ * Programmer: Richard Warren
+ * 10/1/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_parallel_read(MPI_Comm comm, int mpi_rank, int mpi_size, int group_id)
+{
+ const char *failure_mssg;
+ const char *fcn_name = "test_parallel_read()";
+ const char *group_filename = NULL;
+ char reloc_data_filename[FILENAME_BUF_SIZE];
+ int local_failure = 0;
+ int global_failures = 0;
+ int group_size;
+ int group_rank;
+ hid_t fapl_id = -1;
+ hid_t file_id = -1;
+ hid_t dset_id = -1;
+ hid_t dset_id_ch = -1;
+ hid_t dxpl_id = H5P_DEFAULT;
+ hid_t memspace = -1;
+ hid_t filespace = -1;
+ hid_t filetype = -1;
+ size_t filetype_size;
+ hssize_t dset_size;
+ hsize_t i;
+ hsize_t offset;
+ hsize_t count = COUNT;
+ hsize_t dims[1] = {0};
+ float nextValue;
+ float *data_slice = NULL;
+
+ pass = TRUE;
+
+ HDassert(comm != MPI_COMM_NULL);
+
+ if ( (MPI_Comm_rank(comm, &group_rank)) != MPI_SUCCESS) {
+ pass = FALSE;
+ failure_mssg = "test_parallel_read: MPI_Comm_rank failed.\n";
+ }
+
+ if ( (MPI_Comm_size(comm, &group_size)) != MPI_SUCCESS) {
+ pass = FALSE;
+ failure_mssg = "test_parallel_read: MPI_Comm_size failed.\n";
+ }
+
+ if ( mpi_rank == 0 ) {
+ if ( comm == MPI_COMM_WORLD ) {
+ TESTING("parallel file open test 1");
+ }
+ else {
+ TESTING("parallel file open test 2");
+ }
+ }
+
+ /* allocate space for the data_slice array */
+ if ( pass ) {
+ if ( (data_slice = (float *)HDmalloc(COUNT * sizeof(float))) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+ }
+
+
+ /* Select the file file name to read
+ * Please see the comments in the 'generate_test_file' function
+ * for more details...
+ */
+ if ( pass ) {
+
+ if ( comm == MPI_COMM_WORLD ) /* test 1 */
+ group_filename = FILENAMES[0];
+ else if ( group_id == 0 ) /* test 2 group 0 */
+ group_filename = FILENAMES[1];
+ else /* test 2 group 1 */
+ group_filename = FILENAMES[2];
+
+ HDassert(group_filename);
+ if ( h5_fixname(group_filename, H5P_DEFAULT, reloc_data_filename,
+ sizeof(reloc_data_filename)) == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname(1) failed.\n";
+ }
+ }
+
+ /* setup FAPL */
+ if ( pass ) {
+ if ( (fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_ACCESS) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_fapl_mpio() failed\n";
+ }
+ }
+
+ /* open the file -- should have user block, exercising the optimization */
+ if ( pass ) {
+ if ( (file_id = H5Fopen(reloc_data_filename,
+ H5F_ACC_RDONLY, fapl_id)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Fopen() failed\n";
+ }
+ }
+
+ /* open the data set */
+ if ( pass ) {
+ if ( (dset_id = H5Dopen2(file_id, "dataset0", H5P_DEFAULT)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed\n";
+ }
+ }
+
+ /* open the chunked data set */
+ if ( pass ) {
+ if ( (dset_id_ch = H5Dopen2(file_id, "dataset0_chunked", H5P_DEFAULT)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed\n";
+ }
+ }
+
+ /* setup memspace */
+ if ( pass ) {
+ dims[0] = count;
+ if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
+ }
+ }
+
+ /* setup filespace */
+ if ( pass ) {
+ if ( (filespace = H5Dget_space(dset_id)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dget_space(dataset) failed\n";
+ }
+ }
+
+ if ( pass ) {
+ offset = (hsize_t)group_rank * count;
+ if ( (H5Sselect_hyperslab(filespace, H5S_SELECT_SET,
+ &offset, NULL, &count, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed\n";
+ }
+ }
+
+ /* read this processes section of the data */
+ if ( pass ) {
+ if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
+ filespace, H5P_DEFAULT, data_slice)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
+ }
+
+ /* verify the data */
+ if ( pass ) {
+ nextValue = (float)((hsize_t)mpi_rank * count);
+ i = 0;
+ while ( ( pass ) && ( i < count ) ) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ( ( data_slice[i] > nextValue ) ||
+ ( data_slice[i] < nextValue ) ) {
+ pass = FALSE;
+ failure_mssg = "Unexpected dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ if ( pass || (memspace != -1) ) {
+ if ( H5Sclose(memspace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(memspace) failed.\n";
+ }
+ }
+
+ if ( pass || (filespace != -1) ) {
+ if ( H5Sclose(filespace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(filespace) failed.\n";
+ }
+ }
+
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ /*
+ * Test reading proc0-read-and-bcast with sub-communicators
+ */
+
+ /* Don't test with more than LIMIT_NPROC processes to avoid memory issues */
+
+ if( group_size <= LIMIT_NPROC ) {
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ hbool_t prop_value;
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ if ( (filespace = H5Dget_space(dset_id )) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dget_space failed.\n";
+ }
+
+ if ( (dset_size = H5Sget_simple_extent_npoints(filespace)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Sget_simple_extent_npoints failed.\n";
+ }
+
+ if ( (filetype = H5Dget_type(dset_id)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dget_type failed.\n";
+ }
+
+ if ( (filetype_size = H5Tget_size(filetype)) == 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Tget_size failed.\n";
+ }
+
+ if ( H5Tclose(filetype) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Tclose failed.\n";
+ };
+
+ if ( (data_slice = (float *)HDmalloc((size_t)dset_size*filetype_size)) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+
+ if ( pass ) {
+ if ( (dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.\n";
+ }
+ }
+
+ if ( pass ) {
+ if ( (H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if(H5Pinsert2(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
+ NULL, NULL, NULL, NULL, NULL, NULL) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pinsert2() failed\n";
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* read H5S_ALL section */
+ if ( pass ) {
+ if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, H5S_ALL,
+ H5S_ALL, dxpl_id, data_slice)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = FALSE;
+ if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if(prop_value != TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
+ }
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* verify the data */
+ if ( pass ) {
+
+ if ( comm == MPI_COMM_WORLD ) /* test 1 */
+ nextValue = 0;
+ else if ( group_id == 0 ) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
+
+ i = 0;
+ while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ( ( data_slice[i] > nextValue ) ||
+ ( data_slice[i] < nextValue ) ) {
+ pass = FALSE;
+ failure_mssg = "Unexpected dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ /* read H5S_ALL section for the chunked dataset */
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if(H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pset() failed\n";
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ for ( i = 0; i < (hsize_t)dset_size; i++) {
+ data_slice[i] = 0;
+ }
+ if ( pass ) {
+ if ( (H5Dread(dset_id_ch, H5T_NATIVE_FLOAT, H5S_ALL,
+ H5S_ALL, dxpl_id, data_slice)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = FALSE;
+ if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if(prop_value == TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly performed for chunked dataset\n";
+ }
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* verify the data */
+ if ( pass ) {
+
+ if ( comm == MPI_COMM_WORLD ) /* test 1 */
+ nextValue = 0;
+ else if ( group_id == 0 ) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)( mpi_size / 2 )*count);
+
+ i = 0;
+ while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ( ( data_slice[i] > nextValue ) ||
+ ( data_slice[i] < nextValue ) ) {
+ pass = FALSE;
+ failure_mssg = "Unexpected chunked dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ if ( pass || (filespace != -1) ) {
+ if ( H5Sclose(filespace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(filespace) failed.\n";
+ }
+ }
+
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ /*
+ * Read an H5S_ALL filespace into a hyperslab defined memory space
+ */
+
+ if ( (data_slice = (float *)HDmalloc((size_t)(dset_size*2)*filetype_size)) == NULL ) {
+ pass = FALSE;
+ failure_mssg = "malloc of data_slice failed.\n";
+ }
+
+ /* setup memspace */
+ if ( pass ) {
+ dims[0] = (hsize_t)dset_size*2;
+ if ( (memspace = H5Screate_simple(1, dims, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple(1, dims, NULL) failed\n";
+ }
+ }
+ if ( pass ) {
+ offset = (hsize_t)dset_size;
+ if ( (H5Sselect_hyperslab(memspace, H5S_SELECT_SET,
+ &offset, NULL, &offset, NULL)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
+ if(H5Pset(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pset() failed\n";
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* read this processes section of the data */
+ if ( pass ) {
+ if ( (H5Dread(dset_id, H5T_NATIVE_FLOAT, memspace,
+ H5S_ALL, dxpl_id, data_slice)) < 0 ) {
+ pass = FALSE;
+ failure_mssg = "H5Dread() failed\n";
+ }
+ }
+
+#ifdef H5_HAVE_INSTRUMENTED_LIBRARY
+ if ( pass ) {
+ prop_value = FALSE;
+ if(H5Pget(dxpl_id, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pget() failed\n";
+ }
+ if (pass) {
+ if(prop_value != TRUE) {
+ pass = FALSE;
+ failure_mssg = "rank 0 Bcast optimization was mistakenly not performed\n";
+ }
+ }
+ }
+#endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
+
+ /* verify the data */
+ if ( pass ) {
+
+ if ( comm == MPI_COMM_WORLD ) /* test 1 */
+ nextValue = 0;
+ else if ( group_id == 0 ) /* test 2 group 0 */
+ nextValue = 0;
+ else /* test 2 group 1 */
+ nextValue = (float)((hsize_t)(mpi_size / 2)*count);
+
+ i = (hsize_t)dset_size;
+ while ( ( pass ) && ( i < (hsize_t)dset_size ) ) {
+ /* what we really want is data_slice[i] != nextValue --
+ * the following is a circumlocution to shut up the
+ * the compiler.
+ */
+ if ( ( data_slice[i] > nextValue ) ||
+ ( data_slice[i] < nextValue ) ) {
+ pass = FALSE;
+ failure_mssg = "Unexpected dset contents.\n";
+ }
+ nextValue += 1;
+ i++;
+ }
+ }
+
+ if ( pass || (memspace != -1) ) {
+ if ( H5Sclose(memspace) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Sclose(memspace) failed.\n";
+ }
+ }
+
+ /* free data_slice if it has been allocated */
+ if ( data_slice != NULL ) {
+ HDfree(data_slice);
+ data_slice = NULL;
+ }
+
+ if ( pass || (dxpl_id != -1) ) {
+ if ( H5Pclose(dxpl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(dxpl_id) failed.\n";
+ }
+ }
+ }
+
+ /* close file, etc. */
+ if ( pass || (dset_id != -1) ) {
+ if ( H5Dclose(dset_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id) failed.\n";
+ }
+ }
+
+ if ( pass || (dset_id_ch != -1) ) {
+ if ( H5Dclose(dset_id_ch) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Dclose(dset_id_ch) failed.\n";
+ }
+ }
+
+ if ( pass || (file_id != -1) ) {
+ if ( H5Fclose(file_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Fclose(file_id) failed.\n";
+ }
+ }
+
+ if ( pass || (fapl_id != -1) ) {
+ if ( H5Pclose(fapl_id) < 0 ) {
+ pass = false;
+ failure_mssg = "H5Pclose(fapl_id) failed.\n";
+ }
+ }
+
+ /* collect results from other processes.
+ * Only overwrite the failure message if no previous error
+ * has been detected
+ */
+ local_failure = ( pass ? 0 : 1 );
+
+ if ( MPI_Allreduce( &local_failure, &global_failures, 1,
+ MPI_INT, MPI_SUM, MPI_COMM_WORLD) != MPI_SUCCESS ) {
+ if ( pass ) {
+ pass = FALSE;
+ failure_mssg = "MPI_Allreduce() failed.\n";
+ }
+ } else if ( ( pass ) && ( global_failures > 0 ) ) {
+ pass = FALSE;
+ failure_mssg = "One or more processes report failure.\n";
+ }
+
+ /* report results and finish cleanup */
+ if ( group_rank == 0 ) {
+ if ( pass ) {
+ PASSED();
+ } else {
+ H5_FAILED();
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
+ fcn_name, failure_mssg);
+ }
+ HDremove(reloc_data_filename);
+ }
+
+ return( ! pass );
+
+} /* test_parallel_read() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To implement a parallel test which validates whether the
+ * new superblock lookup functionality is working correctly.
+ *
+ * The test consists of creating two seperate HDF datasets
+ * in which random text is inserted at the start of each
+ * file using the 'j5jam' application. This forces the
+ * HDF5 file superblock to a non-zero offset.
+ * Having created the two independant files, we create two
+ * non-overlapping MPI groups, each of which is then tasked
+ * with the opening and validation of the data contained
+ * therein.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Richard Warren
+ * 10/1/17
+ *-------------------------------------------------------------------------
+ */
+
+int
+main( int argc, char **argv)
+{
+ int nerrs = 0;
+ int which_group = 0;
+ int mpi_rank;
+ int mpi_size;
+ int split_size;
+ MPI_Comm group_comm = MPI_COMM_NULL;
+
+ /* I don't believe that argv[0] can ever be NULL.
+ * It should thus be safe to dup and save as a check
+ * for cmake testing. Note that in our Cmake builds,
+ * all executables are located in the same directory.
+ * We assume (but we'll check) that the h5jam utility
+ * is in the directory as this executable. If that
+ * isn't true, then we can use a relative path that
+ * should be valid for the autotools environment.
+ */
+ test_argv0 = HDstrdup(argv[0]);
+
+ if ( (MPI_Init(&argc, &argv)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: Unable to initialize MPI\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_rank returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ if ( (MPI_Comm_size(MPI_COMM_WORLD, &mpi_size)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "FATAL: MPI_Comm_size returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ H5open();
+
+ if ( mpi_rank == 0 ) {
+ HDfprintf(stdout, "========================================\n");
+ HDfprintf(stdout, "Collective file open optimization tests\n");
+ HDfprintf(stdout, " mpi_size = %d\n", mpi_size);
+ HDfprintf(stdout, "========================================\n");
+ }
+
+ if ( mpi_size < 3 ) {
+
+ if ( mpi_rank == 0 ) {
+
+ HDprintf(" Need at least 3 processes. Exiting.\n");
+ }
+ goto finish;
+ }
+
+ /* ------ Create two (2) MPI groups ------
+ *
+ * We split MPI_COMM_WORLD into 2 more or less equal sized
+ * groups. The resulting communicators will be used to generate
+ * two HDF files which in turn will be opened in parallel and the
+ * contents verified in the second read test below.
+ */
+ split_size = mpi_size / 2;
+ which_group = (mpi_rank < split_size ? 0 : 1);
+
+ if ( (MPI_Comm_split(MPI_COMM_WORLD,
+ which_group,
+ 0,
+ &group_comm)) != MPI_SUCCESS) {
+
+ HDfprintf(stderr, "FATAL: MPI_Comm_split returned an error\n");
+ HDexit(EXIT_FAILURE);
+ }
+
+ /* ------ Generate all files ------ */
+
+ /* We generate the file used for test 1 */
+ nerrs += generate_test_file( MPI_COMM_WORLD, mpi_rank, which_group );
+
+ if ( nerrs > 0 ) {
+ if ( mpi_rank == 0 ) {
+ HDprintf(" Test(1) file construction failed -- skipping tests.\n");
+ }
+ goto finish;
+ }
+
+ /* We generate the file used for test 2 */
+ nerrs += generate_test_file( group_comm, mpi_rank, which_group );
+
+ if ( nerrs > 0 ) {
+ if ( mpi_rank == 0 ) {
+ HDprintf(" Test(2) file construction failed -- skipping tests.\n");
+ }
+ goto finish;
+ }
+
+ /* Now read the generated test file (stil using MPI_COMM_WORLD) */
+ nerrs += test_parallel_read( MPI_COMM_WORLD, mpi_rank, mpi_size, which_group);
+
+ if ( nerrs > 0 ) {
+ if ( mpi_rank == 0 ) {
+ HDprintf(" Parallel read test(1) failed -- skipping tests.\n");
+ }
+ goto finish;
+ }
+
+ /* Update the user on our progress so far. */
+ if ( mpi_rank == 0 ) {
+ HDprintf(" Test 1 of 2 succeeded\n");
+ HDprintf(" -- Starting multi-group parallel read test.\n");
+ }
+
+ /* run the 2nd set of tests */
+ nerrs += test_parallel_read(group_comm, mpi_rank, mpi_size, which_group);
+
+ if ( nerrs > 0 ) {
+ if ( mpi_rank == 0 ) {
+ HDprintf(" Multi-group read test(2) failed\n");
+ }
+ goto finish;
+ }
+
+ if ( mpi_rank == 0 ) {
+ HDprintf(" Test 2 of 2 succeeded\n");
+ }
+
+finish:
+
+ if ((group_comm != MPI_COMM_NULL) &&
+ (MPI_Comm_free(&group_comm)) != MPI_SUCCESS) {
+ HDfprintf(stderr, "MPI_Comm_free failed!\n");
+ }
+
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if ( mpi_rank == 0 ) { /* only process 0 reports */
+ const char *header = "Collective file open optimization tests";
+
+ HDfprintf(stdout, "===================================\n");
+ if ( nerrs > 0 ) {
+ HDfprintf(stdout, "***%s detected %d failures***\n", header, nerrs);
+ }
+ else {
+ HDfprintf(stdout, "%s finished with no failures\n", header);
+ }
+ HDfprintf(stdout, "===================================\n");
+ }
+
+ /* close HDF5 library */
+ if (H5close() != SUCCEED) {
+ HDfprintf(stdout, "H5close() failed. (Ignoring)\n");
+ }
+
+ /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
+ MPI_Finalize();
+
+ /* cannot just return (nerrs) because exit code is limited to 1byte */
+ return((nerrs > 0) ? EXIT_FAILURE : EXIT_SUCCESS );
+
+} /* main() */
diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c
index 719d150..da6bbe0 100644
--- a/testpar/t_prestart.c
+++ b/testpar/t_prestart.c
@@ -34,8 +34,8 @@ main (int argc, char **argv)
hid_t file_id, dset_id, grp_id;
hid_t fapl, sid, mem_dataspace;
herr_t ret;
- char filename[1024];
- int mpi_size, mpi_rank, ndims, i, j;
+ char filename[1024];
+ int mpi_size, mpi_rank, ndims;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
hsize_t dims[RANK];
@@ -43,15 +43,16 @@ main (int argc, char **argv)
hsize_t count[RANK];
hsize_t stride[RANK];
hsize_t block[RANK];
+ hsize_t i, j;
DATATYPE *data_array = NULL, *dataptr; /* data buffer */
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("proper shutdown of HDF5 library");
-
+
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl >= 0), "H5Pcreate succeeded");
@@ -73,21 +74,21 @@ main (int argc, char **argv)
ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
- VRFY(dims[0] == ROW_FACTOR*mpi_size, "Wrong dataset dimensions");
- VRFY(dims[1] == COL_FACTOR*mpi_size, "Wrong dataset dimensions");
+ VRFY(dims[0] == (hsize_t)(ROW_FACTOR*mpi_size), "Wrong dataset dimensions");
+ VRFY(dims[1] == (hsize_t)(COL_FACTOR*mpi_size), "Wrong dataset dimensions");
/* allocate memory for data buffer */
data_array = (DATATYPE *)HDmalloc(dims[0]*dims[1]*sizeof(DATATYPE));
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/mpi_size;
+ block[0] = dims[0]/(hsize_t)mpi_size;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank*block[0];
+ start[0] = (hsize_t)mpi_rank*block[0];
start[1] = 0;
ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
@@ -107,9 +108,9 @@ main (int argc, char **argv)
for (i=0; i < block[0]; i++){
for (j=0; j < block[1]; j++){
if(*dataptr != mpi_rank+1) {
- printf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
+ HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %d, got %d\n",
(unsigned long)i, (unsigned long)j,
- (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
+ (unsigned long)((hsize_t)i+start[0]), (unsigned long)((hsize_t)j+start[1]),
mpi_rank+1, *(dataptr));
nerrors ++;
}
@@ -120,14 +121,14 @@ main (int argc, char **argv)
HDremove(filename);
/* release data buffers */
- if(data_array)
+ if(data_array)
HDfree(data_array);
nerrors += GetTestNumErrs();
if(MAINPROCESS) {
if(0 == nerrors)
- PASSED()
+ PASSED();
else
H5_FAILED()
}
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index d5efa94..dde322d 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -33,12 +33,12 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
int send_size = 0;
/* first call to encode returns only the size of the buffer needed */
- ret = H5Pencode(orig_pl, NULL, &buf_size);
+ ret = H5Pencode2(orig_pl, NULL, &buf_size, H5P_DEFAULT);
VRFY((ret >= 0), "H5Pencode succeeded");
sbuf = (uint8_t *)HDmalloc(buf_size);
- ret = H5Pencode(orig_pl, sbuf, &buf_size);
+ ret = H5Pencode2(orig_pl, sbuf, &buf_size, H5P_DEFAULT);
VRFY((ret >= 0), "H5Pencode succeeded");
/* this is a temp fix to send this size_t */
@@ -53,7 +53,7 @@ test_encode_decode(hid_t orig_pl, int mpi_rank, int recv_proc)
void *rbuf;
MPI_Recv(&recv_size, 1, MPI_INT, 0, 123, MPI_COMM_WORLD, &status);
- buf_size = recv_size;
+ buf_size = (size_t)recv_size;
rbuf = (uint8_t *)HDmalloc(buf_size);
MPI_Recv(rbuf, recv_size, MPI_BYTE, 0, 124, MPI_COMM_WORLD, &status);
@@ -97,7 +97,7 @@ test_plist_ed(void)
int mpi_size, mpi_rank, recv_proc;
- hsize_t chunk_size = 16384; /* chunk size */
+ hsize_t chunk_size = 16384; /* chunk size */
double fill = 2.7f; /* Fill value */
size_t nslots = 521*2;
size_t nbytes = 1048576 * 10;
@@ -141,7 +141,7 @@ test_plist_ed(void)
herr_t ret; /* Generic return value */
if(VERBOSE_MED)
- printf("Encode/Decode DCPLs\n");
+ HDprintf("Encode/Decode DCPLs\n");
/* set up MPI parameters */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
@@ -165,16 +165,16 @@ test_plist_ed(void)
VRFY((ret>=0), "set fill-value succeeded");
max_size[0] = 100;
- ret = H5Pset_external(dcpl, "ext1.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext1.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext2.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext2.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext3.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext3.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
- ret = H5Pset_external(dcpl, "ext4.data", (off_t)0,
+ ret = H5Pset_external(dcpl, "ext4.data", (off_t)0,
(hsize_t)(max_size[0] * sizeof(int)/4));
VRFY((ret>=0), "set external succeeded");
diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c
index def7071..6a35fb2 100644
--- a/testpar/t_pshutdown.c
+++ b/testpar/t_pshutdown.c
@@ -51,11 +51,11 @@ main (int argc, char **argv)
MPI_Init(&argc, &argv);
MPI_Comm_size(comm, &mpi_size);
- MPI_Comm_rank(comm, &mpi_rank);
+ MPI_Comm_rank(comm, &mpi_rank);
if(MAINPROCESS)
TESTING("proper shutdown of HDF5 library");
-
+
/* Set up file access property list with parallel I/O access */
fapl = H5Pcreate(H5P_FILE_ACCESS);
VRFY((fapl >= 0), "H5Pcreate succeeded");
@@ -68,8 +68,8 @@ main (int argc, char **argv)
grp_id = H5Gcreate2(file_id, "Group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
VRFY((grp_id >= 0), "H5Gcreate succeeded");
- dims[0] = ROW_FACTOR*mpi_size;
- dims[1] = COL_FACTOR*mpi_size;
+ dims[0] = (hsize_t)ROW_FACTOR*(hsize_t)mpi_size;
+ dims[1] = (hsize_t)COL_FACTOR*(hsize_t)mpi_size;
sid = H5Screate_simple (RANK, dims, NULL);
VRFY((sid >= 0), "H5Screate_simple succeeded");
@@ -81,13 +81,13 @@ main (int argc, char **argv)
VRFY((data_array != NULL), "data_array HDmalloc succeeded");
/* Each process takes a slabs of rows. */
- block[0] = dims[0]/mpi_size;
+ block[0] = dims[0]/(hsize_t)mpi_size;
block[1] = dims[1];
stride[0] = block[0];
stride[1] = block[1];
count[0] = 1;
count[1] = 1;
- start[0] = mpi_rank*block[0];
+ start[0] = (hsize_t)mpi_rank*block[0];
start[1] = 0;
/* put some trivial data in the data_array */
@@ -107,7 +107,7 @@ main (int argc, char **argv)
VRFY((ret >= 0), "H5Dwrite succeeded");
/* release data buffers */
- if(data_array)
+ if(data_array)
HDfree(data_array);
MPI_Finalize();
@@ -116,7 +116,7 @@ main (int argc, char **argv)
if(MAINPROCESS) {
if(0 == nerrors)
- PASSED()
+ PASSED();
else
H5_FAILED()
}
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index d81d2be..34fcc72 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -12,7 +12,7 @@
/*
This program will test independant and collective reads and writes between
- selections of different rank that non-the-less are deemed as having the
+ selections of different rank that non-the-less are deemed as having the
same shape by H5Sselect_shape_same().
*/
@@ -22,33 +22,31 @@
#define H5S_TESTING
-#include "hdf5.h"
-#include "H5private.h"
-#include "testphdf5.h"
#include "H5Spkg.h" /* Dataspaces */
+#include "testphdf5.h"
/* On Lustre (and perhaps other parallel file systems?), we have severe
* slow downs if two or more processes attempt to access the same file system
* block. To minimize this problem, we set alignment in the shape same tests
- * to the default Lustre block size -- which greatly reduces contention in
+ * to the default Lustre block size -- which greatly reduces contention in
* the chunked dataset case.
*/
-#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
+#define SHAPE_SAME_TEST_ALIGNMENT ((hsize_t)(4 * 1024 * 1024))
-#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
+#define PAR_SS_DR_MAX_RANK 5 /* must update code if this changes */
struct hs_dr_pio_test_vars_t
{
- int mpi_size;
+ int mpi_size;
int mpi_rank;
MPI_Comm mpi_comm;
- MPI_Info mpi_info;
+ MPI_Info mpi_info;
int test_num;
int edge_size;
- int checker_edge_size;
+ int checker_edge_size;
int chunk_edge_size;
int small_rank;
int large_rank;
@@ -64,13 +62,13 @@ struct hs_dr_pio_test_vars_t
int small_ds_offset;
int large_ds_offset;
hid_t fid; /* HDF5 file ID */
- hid_t xfer_plist;
+ hid_t xfer_plist;
hid_t full_mem_small_ds_sid;
hid_t full_file_small_ds_sid;
hid_t mem_small_ds_sid;
hid_t file_small_ds_sid_0;
hid_t file_small_ds_sid_1;
- hid_t small_ds_slice_sid;
+ hid_t small_ds_slice_sid;
hid_t full_mem_large_ds_sid;
hid_t full_file_large_ds_sid;
hid_t mem_large_ds_sid;
@@ -78,7 +76,7 @@ struct hs_dr_pio_test_vars_t
hid_t file_large_ds_sid_1;
hid_t file_large_ds_process_slice_sid;
hid_t mem_large_ds_process_slice_sid;
- hid_t large_ds_slice_sid;
+ hid_t large_ds_slice_sid;
hid_t small_dataset; /* Dataset ID */
hid_t large_dataset; /* Dataset ID */
size_t small_ds_size;
@@ -96,25 +94,21 @@ struct hs_dr_pio_test_vars_t
hsize_t * count_ptr;
hsize_t * block_ptr;
int skips;
- int max_skips;
- int64_t total_tests;
- int64_t tests_run;
- int64_t tests_skipped;
+ int max_skips;
+ int64_t total_tests;
+ int64_t tests_run;
+ int64_t tests_skipped;
};
/*-------------------------------------------------------------------------
- * Function: hs_dr_pio_test__setup()
- *
- * Purpose: Do setup for tests of I/O to/from hyperslab selections of
- * different rank in the parallel case.
- *
- * Return: void
+ * Function: hs_dr_pio_test__setup()
*
- * Programmer: JRM -- 8/9/11
+ * Purpose: Do setup for tests of I/O to/from hyperslab selections of
+ * different rank in the parallel case.
*
- * Modifications:
+ * Return: void
*
- * None.
+ * Programmer: JRM -- 8/9/11
*
*-------------------------------------------------------------------------
*/
@@ -133,21 +127,21 @@ hs_dr_pio_test__setup(const int test_num,
const int express_test,
struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
const char *fcnName = "hs_dr_pio_test__setup()";
#endif /* CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG */
const char *filename;
- hbool_t mis_match = FALSE;
- int i;
+ hbool_t mis_match = FALSE;
+ int i;
int mrc;
- int mpi_rank; /* needed by the VRFY macro */
- uint32_t expected_value;
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
uint32_t * ptr_0;
uint32_t * ptr_1;
- hid_t acc_tpl; /* File access templates */
+ hid_t acc_tpl; /* File access templates */
hid_t small_ds_dcpl_id = H5P_DEFAULT;
hid_t large_ds_dcpl_id = H5P_DEFAULT;
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
HDassert( edge_size >= 6 );
HDassert( edge_size >= chunk_edge_size );
@@ -219,7 +213,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->small_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_size);
VRFY((tv_ptr->small_ds_buf_2 != NULL), "malloc of small_ds_buf_2 succeeded");
- tv_ptr->small_ds_slice_buf =
+ tv_ptr->small_ds_slice_buf =
(uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
VRFY((tv_ptr->small_ds_slice_buf != NULL), "malloc of small_ds_slice_buf succeeded");
@@ -232,7 +226,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->large_ds_buf_2 = (uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_size);
VRFY((tv_ptr->large_ds_buf_2 != NULL), "malloc of large_ds_buf_2 succeeded");
- tv_ptr->large_ds_slice_buf =
+ tv_ptr->large_ds_slice_buf =
(uint32_t *)HDmalloc(sizeof(uint32_t) * tv_ptr->large_ds_slice_size);
VRFY((tv_ptr->large_ds_slice_buf != NULL), "malloc of large_ds_slice_buf succeeded");
@@ -256,21 +250,21 @@ hs_dr_pio_test__setup(const int test_num,
filename = (const char *)GetTestParameters();
HDassert( filename != NULL );
-#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__SETUP__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "%d: test num = %d.\n", tv_ptr->mpi_rank, tv_ptr->test_num);
HDfprintf(stdout, "%d: mpi_size = %d.\n", tv_ptr->mpi_rank, tv_ptr->mpi_size);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%d: small/large rank = %d/%d, use_collective_io = %d.\n",
- tv_ptr->mpi_rank, tv_ptr->small_rank, tv_ptr->large_rank,
+ tv_ptr->mpi_rank, tv_ptr->small_rank, tv_ptr->large_rank,
(int)use_collective_io);
HDfprintf(stdout, "%d: edge_size = %d, chunk_edge_size = %d.\n",
tv_ptr->mpi_rank, tv_ptr->edge_size, tv_ptr->chunk_edge_size);
HDfprintf(stdout, "%d: checker_edge_size = %d.\n",
tv_ptr->mpi_rank, tv_ptr->checker_edge_size);
HDfprintf(stdout, "%d: small_ds_size = %d, large_ds_size = %d.\n",
- tv_ptr->mpi_rank, (int)(tv_ptr->small_ds_size),
+ tv_ptr->mpi_rank, (int)(tv_ptr->small_ds_size),
(int)(tv_ptr->large_ds_size));
HDfprintf(stdout, "%d: filename = %s.\n", tv_ptr->mpi_rank, filename);
}
@@ -305,78 +299,78 @@ hs_dr_pio_test__setup(const int test_num,
/* setup dims: */
tv_ptr->dims[0] = (hsize_t)(tv_ptr->mpi_size + 1);
- tv_ptr->dims[1] = tv_ptr->dims[2] =
+ tv_ptr->dims[1] = tv_ptr->dims[2] =
tv_ptr->dims[3] = tv_ptr->dims[4] = (hsize_t)(tv_ptr->edge_size);
/* Create small ds dataspaces */
- tv_ptr->full_mem_small_ds_sid =
+ tv_ptr->full_mem_small_ds_sid =
H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_mem_small_ds_sid != 0),
+ VRFY((tv_ptr->full_mem_small_ds_sid != 0),
"H5Screate_simple() full_mem_small_ds_sid succeeded");
- tv_ptr->full_file_small_ds_sid =
+ tv_ptr->full_file_small_ds_sid =
H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_file_small_ds_sid != 0),
+ VRFY((tv_ptr->full_file_small_ds_sid != 0),
"H5Screate_simple() full_file_small_ds_sid succeeded");
tv_ptr->mem_small_ds_sid = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_small_ds_sid != 0),
+ VRFY((tv_ptr->mem_small_ds_sid != 0),
"H5Screate_simple() mem_small_ds_sid succeeded");
tv_ptr->file_small_ds_sid_0 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_small_ds_sid_0 != 0),
+ VRFY((tv_ptr->file_small_ds_sid_0 != 0),
"H5Screate_simple() file_small_ds_sid_0 succeeded");
/* used by checker board tests only */
tv_ptr->file_small_ds_sid_1 = H5Screate_simple(tv_ptr->small_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_small_ds_sid_1 != 0),
+ VRFY((tv_ptr->file_small_ds_sid_1 != 0),
"H5Screate_simple() file_small_ds_sid_1 succeeded");
- tv_ptr->small_ds_slice_sid =
+ tv_ptr->small_ds_slice_sid =
H5Screate_simple(tv_ptr->small_rank - 1, &(tv_ptr->dims[1]), NULL);
- VRFY((tv_ptr->small_ds_slice_sid != 0),
+ VRFY((tv_ptr->small_ds_slice_sid != 0),
"H5Screate_simple() small_ds_slice_sid succeeded");
/* Create large ds dataspaces */
- tv_ptr->full_mem_large_ds_sid =
+ tv_ptr->full_mem_large_ds_sid =
H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_mem_large_ds_sid != 0),
+ VRFY((tv_ptr->full_mem_large_ds_sid != 0),
"H5Screate_simple() full_mem_large_ds_sid succeeded");
- tv_ptr->full_file_large_ds_sid =
+ tv_ptr->full_file_large_ds_sid =
H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->full_file_large_ds_sid != FAIL),
+ VRFY((tv_ptr->full_file_large_ds_sid != FAIL),
"H5Screate_simple() full_file_large_ds_sid succeeded");
tv_ptr->mem_large_ds_sid = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_large_ds_sid != FAIL),
+ VRFY((tv_ptr->mem_large_ds_sid != FAIL),
"H5Screate_simple() mem_large_ds_sid succeeded");
tv_ptr->file_large_ds_sid_0 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_sid_0 != FAIL),
+ VRFY((tv_ptr->file_large_ds_sid_0 != FAIL),
"H5Screate_simple() file_large_ds_sid_0 succeeded");
/* used by checker board tests only */
tv_ptr->file_large_ds_sid_1 = H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_sid_1 != FAIL),
+ VRFY((tv_ptr->file_large_ds_sid_1 != FAIL),
"H5Screate_simple() file_large_ds_sid_1 succeeded");
- tv_ptr->mem_large_ds_process_slice_sid =
+ tv_ptr->mem_large_ds_process_slice_sid =
H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL),
+ VRFY((tv_ptr->mem_large_ds_process_slice_sid != FAIL),
"H5Screate_simple() mem_large_ds_process_slice_sid succeeded");
- tv_ptr->file_large_ds_process_slice_sid =
+ tv_ptr->file_large_ds_process_slice_sid =
H5Screate_simple(tv_ptr->large_rank, tv_ptr->dims, NULL);
- VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL),
+ VRFY((tv_ptr->file_large_ds_process_slice_sid != FAIL),
"H5Screate_simple() file_large_ds_process_slice_sid succeeded");
- tv_ptr->large_ds_slice_sid =
+ tv_ptr->large_ds_slice_sid =
H5Screate_simple(tv_ptr->large_rank - 1, &(tv_ptr->dims[1]), NULL);
- VRFY((tv_ptr->large_ds_slice_sid != 0),
+ VRFY((tv_ptr->large_ds_slice_sid != 0),
"H5Screate_simple() large_ds_slice_sid succeeded");
@@ -386,33 +380,28 @@ hs_dr_pio_test__setup(const int test_num,
*/
if ( tv_ptr->chunk_edge_size > 0 ) {
- /* Under Lustre (and perhaps other parallel file systems?) we get
- * locking delays when two or more processes attempt to access the
+ /* Under Lustre (and perhaps other parallel file systems?) we get
+ * locking delays when two or more processes attempt to access the
* same file system block.
*
- * To minimize this problem, I have changed chunk_dims[0]
+ * To minimize this problem, I have changed chunk_dims[0]
* from (mpi_size + 1) to just when any sort of express test is
- * selected. Given the structure of the test, and assuming we
- * set the alignment large enough, this avoids the contention
- * issue by seeing to it that each chunk is only accessed by one
+ * selected. Given the structure of the test, and assuming we
+ * set the alignment large enough, this avoids the contention
+ * issue by seeing to it that each chunk is only accessed by one
* process.
*
- * One can argue as to whether this is a good thing to do in our
+ * One can argue as to whether this is a good thing to do in our
* tests, but for now it is necessary if we want the test to complete
* in a reasonable amount of time.
*
* JRM -- 9/16/10
*/
- if ( express_test == 0 ) {
- tv_ptr->chunk_dims[0] = 1;
+ tv_ptr->chunk_dims[0] = 1;
- } else {
-
- tv_ptr->chunk_dims[0] = 1;
- }
- tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] =
- tv_ptr->chunk_dims[3] =
+ tv_ptr->chunk_dims[1] = tv_ptr->chunk_dims[2] =
+ tv_ptr->chunk_dims[3] =
tv_ptr->chunk_dims[4] = (hsize_t)(tv_ptr->chunk_edge_size);
small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
@@ -511,7 +500,7 @@ hs_dr_pio_test__setup(const int test_num,
/* write the initial value of the small data set to file */
- ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid,
+ ret = H5Dwrite(tv_ptr->small_dataset, tv_ptr->dset_type, tv_ptr->mem_small_ds_sid,
tv_ptr->file_small_ds_sid_0, tv_ptr->xfer_plist, tv_ptr->small_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
@@ -524,8 +513,8 @@ hs_dr_pio_test__setup(const int test_num,
VRFY((mrc==MPI_SUCCESS), "Sync after small dataset writes");
}
- /* read the small data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
* data set and verifies it.
*/
ret = H5Dread(tv_ptr->small_dataset,
@@ -574,8 +563,8 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->count,
tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid_0, set) suceeded");
-
- /* In passing, setup the process slice data spaces as well */
+
+ /* In passing, setup the process slice dataspaces as well */
ret = H5Sselect_hyperslab(tv_ptr->mem_large_ds_process_slice_sid,
H5S_SELECT_SET,
@@ -583,7 +572,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->stride,
tv_ptr->count,
tv_ptr->block);
- VRFY((ret >= 0),
+ VRFY((ret >= 0),
"H5Sselect_hyperslab(mem_large_ds_process_slice_sid, set) suceeded");
ret = H5Sselect_hyperslab(tv_ptr->file_large_ds_process_slice_sid,
@@ -592,7 +581,7 @@ hs_dr_pio_test__setup(const int test_num,
tv_ptr->stride,
tv_ptr->count,
tv_ptr->block);
- VRFY((ret >= 0),
+ VRFY((ret >= 0),
"H5Sselect_hyperslab(file_large_ds_process_slice_sid, set) suceeded");
if ( MAINPROCESS ) { /* add an additional slice to the selections */
@@ -618,8 +607,8 @@ hs_dr_pio_test__setup(const int test_num,
/* write the initial value of the large data set to file */
- ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type,
- tv_ptr->mem_large_ds_sid, tv_ptr->file_large_ds_sid_0,
+ ret = H5Dwrite(tv_ptr->large_dataset, tv_ptr->dset_type,
+ tv_ptr->mem_large_ds_sid, tv_ptr->file_large_ds_sid_0,
tv_ptr->xfer_plist, tv_ptr->large_ds_buf_0);
if ( ret < 0 ) H5Eprint2(H5E_DEFAULT, stderr);
VRFY((ret >= 0), "H5Dwrite() large_dataset initial write succeeded");
@@ -633,8 +622,8 @@ hs_dr_pio_test__setup(const int test_num,
}
- /* read the large data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
* data set.
*/
ret = H5Dread(tv_ptr->large_dataset,
@@ -678,18 +667,14 @@ hs_dr_pio_test__setup(const int test_num,
/*-------------------------------------------------------------------------
- * Function: hs_dr_pio_test__takedown()
+ * Function: hs_dr_pio_test__takedown()
*
- * Purpose: Do takedown after tests of I/O to/from hyperslab selections
- * of different rank in the parallel case.
+ * Purpose: Do takedown after tests of I/O to/from hyperslab selections
+ * of different rank in the parallel case.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 9/18/09
- *
- * Modifications:
- *
- * None.
+ * Programmer: JRM -- 9/18/09
*
*-------------------------------------------------------------------------
*/
@@ -699,11 +684,11 @@ hs_dr_pio_test__setup(const int test_num,
static void
hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG
+#if HS_DR_PIO_TEST__TAKEDOWN__DEBUG
const char *fcnName = "hs_dr_pio_test__takedown()";
#endif /* HS_DR_PIO_TEST__TAKEDOWN__DEBUG */
- int mpi_rank; /* needed by the VRFY macro */
- herr_t ret; /* Generic return value */
+ int mpi_rank; /* needed by the VRFY macro */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -787,27 +772,23 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__d2m_l2s()
- *
- * Purpose: Part one of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Function: contig_hs_dr_pio_test__d2m_l2s()
*
- * In this function, we test this by reading small_rank - 1
- * slices from the on disk large cube, and verifying that the
- * data read is correct. Verify that H5S_select_shape_same()
- * returns true on the memory and file selections.
+ * Purpose: Part one of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Return: void
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
*
- * Programmer: JRM -- 9/10/11
+ * In this function, we test this by reading small_rank - 1
+ * slices from the on disk large cube, and verifying that the
+ * data read is correct. Verify that H5Sselect_shape_same()
+ * returns true on the memory and file selections.
*
- * Modifications:
+ * Return: void
*
- * None.
+ * Programmer: JRM -- 9/10/11
*
*-------------------------------------------------------------------------
*/
@@ -817,24 +798,24 @@ hs_dr_pio_test__takedown( struct hs_dr_pio_test_vars_t * tv_ptr)
static void
contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
- uint32_t expected_value;
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
+ uint32_t expected_value;
uint32_t * ptr_1;
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* We have already done a H5Sselect_all() on the data space
- * small_ds_slice_sid in the initialization phase, so no need to
+ /* We have already done a H5Sselect_all() on the dataspace
+ * small_ds_slice_sid in the initialization phase, so no need to
* call H5Sselect_all() again.
*/
@@ -859,16 +840,16 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* zero out the buffer we will be reading into */
HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout,
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout,
"%s reading slices from big cube on disk into small cube slice.\n",
fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set. However, in the parallel version, each
+ * of the large data set. However, in the parallel version, each
* process only works with that slice of the large cube indicated
- * by its rank -- hence we set the most slowly changing index to
+ * by its rank -- hence we set the most slowly changing index to
* mpi_rank, and don't itterate over it.
*/
@@ -881,9 +862,9 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -907,7 +888,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -921,14 +902,14 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
do {
if ( (tv_ptr->skips)++ < tv_ptr->max_skips ) { /* skip the test */
- (tv_ptr->tests_skipped)++;
+ (tv_ptr->tests_skipped)++;
} else { /* run the test */
tv_ptr->skips = 0; /* reset the skips counter */
- /* we know that small_rank - 1 >= 1 and that
- * large_rank > small_rank by the assertions at the head
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
* of this function. Thus no need for another inner loop.
*/
tv_ptr->start[0] = (hsize_t)i;
@@ -943,24 +924,23 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->stride_ptr,
tv_ptr->count_ptr,
tv_ptr->block_ptr);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Sselect_hyperslab(file_large_cube_sid) succeeded");
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->small_ds_slice_sid,
- tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* Read selection from disk */
-#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+#if CONTIG_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n",
fcnName,
@@ -981,7 +961,7 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
mis_match = FALSE;
ptr_1 = tv_ptr->small_ds_slice_buf;
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -1000,10 +980,10 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
expected_value++;
}
- VRFY((mis_match == FALSE),
+ VRFY((mis_match == FALSE),
"small slice read from large ds data good.");
- (tv_ptr->tests_run)++;
+ (tv_ptr->tests_run)++;
}
l++;
@@ -1028,27 +1008,23 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__d2m_s2l()
- *
- * Purpose: Part two of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Function: contig_hs_dr_pio_test__d2m_s2l()
*
- * In this function, we test this by reading slices of the
- * on disk small data set into slices through the in memory
- * large data set, and verify that the correct data (and
- * only the correct data) is read.
+ * Purpose: Part two of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Return: void
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
*
- * Programmer: JRM -- 8/10/11
+ * In this function, we test this by reading slices of the
+ * on disk small data set into slices through the in memory
+ * large data set, and verify that the correct data (and
+ * only the correct data) is read.
*
- * Modifications:
+ * Return: void
*
- * None.
+ * Programmer: JRM -- 8/10/11
*
*-------------------------------------------------------------------------
*/
@@ -1058,25 +1034,25 @@ contig_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__d2m_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* Read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
+ /* Read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
@@ -1102,8 +1078,8 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
VRFY((ret >= 0), "H5Sselect_hyperslab(file_small_ds_sid_0, set) suceeded");
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout,
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout,
"%s reading slices of on disk small data set into slices of big data set.\n",
fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
@@ -1131,11 +1107,11 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
+ * of the large data set that don't appear in the small data set.
*
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't itterate
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
* over it.
*/
@@ -1149,9 +1125,9 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -1175,7 +1151,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -1211,24 +1187,23 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->stride_ptr,
tv_ptr->count_ptr,
tv_ptr->block_ptr);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Sselect_hyperslab(mem_large_ds_sid) succeeded");
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0,
- tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* Read selection from disk */
-#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+#if CONTIG_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, (int)(tv_ptr->mpi_rank),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -1250,7 +1225,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
expected_value = (uint32_t)
((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index = (size_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -1283,7 +1258,7 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
+ VRFY((mis_match == FALSE),
"small slice read from large ds data good.");
(tv_ptr->tests_run)++;
@@ -1311,29 +1286,25 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__m2d_l2s()
- *
- * Purpose: Part three of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Function: contig_hs_dr_pio_test__m2d_l2s()
*
- * Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Purpose: Part three of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Do this by writing small_rank - 1 dimensional slices from
- * the in memory large data set to the on disk small cube
- * dataset. After each write, read the slice of the small
- * dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same()
- * returns true on the memory and file selections.
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
*
- * Return: void
+ * Do this by writing small_rank - 1 dimensional slices from
+ * the in memory large data set to the on disk small cube
+ * dataset. After each write, read the slice of the small
+ * dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same()
+ * returns true on the memory and file selections.
*
- * Programmer: JRM -- 8/10/11
+ * Return: void
*
- * Modifications:
- *
- * None.
+ * Programmer: JRM -- 8/10/11
*
*-------------------------------------------------------------------------
*/
@@ -1343,19 +1314,19 @@ contig_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_l2s()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -1363,12 +1334,12 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
- * Start by writing small_rank - 1 dimensional slices from the in memory large
- * data set to the on disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same() returns true on
+ * Start by writing small_rank - 1 dimensional slices from the in memory large
+ * data set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
@@ -1424,18 +1395,18 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout,
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout,
"%s writing slices from big ds to slices of small ds on disk.\n",
fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
+ * of the large data set that don't appear in the small data set.
*
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't itterate
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
* over it.
*/
@@ -1449,9 +1420,9 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -1476,7 +1447,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -1525,26 +1496,25 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->stride_ptr,
tv_ptr->count_ptr,
tv_ptr->block_ptr);
- VRFY((ret >= 0),
+ VRFY((ret >= 0),
"H5Sselect_hyperslab() mem_large_ds_sid succeeded.");
- /* verify that H5S_select_shape_same() reports the in
+ /* verify that H5Sselect_shape_same() reports the in
* memory slice through the cube selection and the
* on disk full square selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0,
- tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed.");
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed.");
- /* write the slice from the in memory large data set to the
+ /* write the slice from the in memory large data set to the
* slice of the on disk small dataset. */
-#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+#if CONTIG_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -1576,7 +1546,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1 = tv_ptr->small_ds_buf_1;
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -1611,7 +1581,7 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
+ VRFY((mis_match == FALSE),
"small slice write from large ds data good.");
(tv_ptr->tests_run)++;
@@ -1639,31 +1609,27 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__m2d_s2l()
- *
- * Purpose: Part four of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Function: contig_hs_dr_pio_test__m2d_s2l()
*
- * Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Purpose: Part four of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Do this by writing the contents of the process's slice of
- * the in memory small data set to slices of the on disk
- * large data set. After each write, read the process's
- * slice of the large data set back into memory, and verify
- * that it contains the expected data.
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
*
- * Verify that H5S_select_shape_same() returns true on the
- * memory and file selections.
+ * Do this by writing the contents of the process's slice of
+ * the in memory small data set to slices of the on disk
+ * large data set. After each write, read the process's
+ * slice of the large data set back into memory, and verify
+ * that it contains the expected data.
*
- * Return: void
+ * Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
*
- * Programmer: JRM -- 8/10/11
+ * Return: void
*
- * Modifications:
- *
- * None
+ * Programmer: JRM -- 8/10/11
*
*-------------------------------------------------------------------------
*/
@@ -1673,32 +1639,32 @@ contig_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t mis_match = FALSE;
- int i, j, k, l;
- size_t n;
- int mpi_rank; /* needed by the VRFY macro */
+ hbool_t mis_match = FALSE;
+ int i, j, k, l;
+ size_t n;
+ int mpi_rank; /* needed by the VRFY macro */
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
* and file selections.
*/
- /* select the slice of the in memory small data set associated with
+ /* select the slice of the in memory small data set associated with
* the process's mpi rank.
*/
tv_ptr->start[0] = (hsize_t)(tv_ptr->mpi_rank);
@@ -1745,8 +1711,8 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* zero out the in memory large ds */
HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout,
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout,
"%s writing process slices of small ds to slices of large ds on disk.\n",
fcnName);
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
@@ -1760,9 +1726,9 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -1786,7 +1752,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -1802,18 +1768,18 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
(tv_ptr->tests_skipped)++;
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
tv_ptr->start[0] = (hsize_t)i;
tv_ptr->start[1] = (hsize_t)j;
tv_ptr->start[2] = (hsize_t)k;
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
- HDfprintf(stdout,
- "%s:%d: skipping test with start = %d %d %d %d %d.\n",
+ HDfprintf(stdout,
+ "%s:%d: skipping test with start = %d %d %d %d %d.\n",
fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -1857,28 +1823,27 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->stride_ptr,
tv_ptr->count_ptr,
tv_ptr->block_ptr);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Sselect_hyperslab() target large ds slice succeeded");
- /* verify that H5S_select_shape_same() reports the in
+ /* verify that H5Sselect_shape_same() reports the in
* memory small data set slice selection and the
* on disk slice through the large data set selection
* as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->mem_small_ds_sid,
- tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
- /* write the small data set slice from memory to the
- * target slice of the disk data set
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
*/
-#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+#if CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
fcnName, (int)(tv_ptr->mpi_rank),
- (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
- (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
+ (int)(tv_ptr->start[0]), (int)(tv_ptr->start[1]),
+ (int)(tv_ptr->start[2]), (int)(tv_ptr->start[3]),
(int)(tv_ptr->start[4]));
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -1891,11 +1856,11 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->file_large_ds_sid_0,
tv_ptr->xfer_plist,
tv_ptr->small_ds_buf_0);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Dwrite of small ds slice to large ds succeeded");
- /* read this processes slice on the on disk large
+ /* read this processes slice on the on disk large
* data set into memory.
*/
@@ -1905,7 +1870,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->file_large_ds_process_slice_sid,
tv_ptr->xfer_plist,
tv_ptr->large_ds_buf_1);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Dread() of process slice of large ds succeeded");
@@ -1914,12 +1879,12 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
*/
ptr_1 = tv_ptr->large_ds_buf_1;
expected_value = (uint32_t)
- ((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
+ ((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index = (size_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
+ ((i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
- (j * tv_ptr->edge_size * tv_ptr->edge_size *
+ (j * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
(l * tv_ptr->edge_size));
@@ -1951,7 +1916,7 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((mis_match == FALSE),
+ VRFY((mis_match == FALSE),
"small ds slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
@@ -1979,29 +1944,14 @@ contig_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test__run_test()
- *
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel.
+ * Function: contig_hs_dr_pio_test__run_test()
*
- * Return: void
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel.
*
- * Programmer: JRM -- 9/18/09
+ * Return: void
*
- * Modifications:
- *
- * JRM -- 9/16/10
- * Added express_test parameter. Use it to control whether
- * we set up the chunks so that no chunk is shared between
- * processes, and also whether we set an alignment when we
- * create the test file.
- *
- * JRM -- 8/11/11
- * Refactored function heavily & broke it into six functions.
- * Added the skips_ptr, max_skips, total_tests_ptr,
- * tests_run_ptr, and tests_skiped_ptr parameters to support
- * skipping portions of the test according to the express
- * test value.
+ * Programmer: JRM -- 9/18/09
*
*-------------------------------------------------------------------------
*/
@@ -2023,13 +1973,12 @@ contig_hs_dr_pio_test__run_test(const int test_num,
int64_t * tests_run_ptr,
int64_t * tests_skipped_ptr)
{
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
const char *fcnName = "contig_hs_dr_pio_test__run_test()";
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- int mpi_rank;
- struct hs_dr_pio_test_vars_t test_vars =
+ struct hs_dr_pio_test_vars_t test_vars =
{
- /* int mpi_size = */ -1,
+ /* int mpi_size = */ -1,
/* int mpi_rank = */ -1,
/* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
/* MPI_Inf mpi_info = */ MPI_INFO_NULL,
@@ -2045,7 +1994,7 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* uint32_t * small_ds_buf_2 = */ NULL,
/* uint32_t * small_ds_slice_buf = */ NULL,
/* uint32_t * large_ds_buf_0 = */ NULL,
- /* uint32_t * large_ds_buf_1 = */ NULL,
+ /* uint32_t * large_ds_buf_1 = */ NULL,
/* uint32_t * large_ds_buf_2 = */ NULL,
/* uint32_t * large_ds_slice_buf = */ NULL,
/* int small_ds_offset = */ -1,
@@ -2082,8 +2031,8 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* hsize_t * stride_ptr = */ NULL,
/* hsize_t * count_ptr = */ NULL,
/* hsize_t * block_ptr = */ NULL,
- /* int skips = */ 0,
- /* int max_skips = */ 0,
+ /* int skips = */ 0,
+ /* int max_skips = */ 0,
/* int64_t total_tests = */ 0,
/* int64_t tests_run = */ 0,
/* int64_t tests_skipped = */ 0
@@ -2094,14 +2043,11 @@ contig_hs_dr_pio_test__run_test(const int test_num,
small_rank, large_rank, use_collective_io,
dset_type, express_test, tv_ptr);
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
/* initialize skips & max_skips */
tv_ptr->skips = *skips_ptr;
tv_ptr->max_skips = max_skips;
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: small rank = %d, large rank = %d.\n",
test_num, small_rank, large_rank);
@@ -2110,15 +2056,15 @@ contig_hs_dr_pio_test__run_test(const int test_num,
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
/* first, verify that we can read from disk correctly using selections
- * of different rank that H5S_select_shape_same() views as being of the
+ * of different rank that H5Sselect_shape_same() views as being of the
* same shape.
*
- * Start by reading small_rank - 1 dimensional slice from the on disk
- * large cube, and verifying that the data read is correct. Verify that
- * H5S_select_shape_same() returns true on the memory and file selections.
+ * Start by reading small_rank - 1 dimensional slice from the on disk
+ * large cube, and verifying that the data read is correct. Verify that
+ * H5Sselect_shape_same() returns true on the memory and file selections.
*/
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_l2s.\n", test_num);
}
@@ -2126,12 +2072,12 @@ contig_hs_dr_pio_test__run_test(const int test_num,
contig_hs_dr_pio_test__d2m_l2s(tv_ptr);
- /* Second, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
+ /* Second, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__d2m_s2l.\n", test_num);
}
@@ -2141,16 +2087,16 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify that it contains
- * the expected data. Verify that H5S_select_shape_same() returns true on
+ * set to the on disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify that it contains
+ * the expected data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_l2s.\n", test_num);
}
@@ -2158,25 +2104,25 @@ contig_hs_dr_pio_test__run_test(const int test_num,
contig_hs_dr_pio_test__m2d_l2s(tv_ptr);
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
* and file selections.
*/
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: running contig_hs_dr_pio_test__m2d_s2l.\n", test_num);
}
#endif /* CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
contig_hs_dr_pio_test__m2d_s2l(tv_ptr);
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
- HDfprintf(stdout,
- "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ HDfprintf(stdout,
+ "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
(long long)(tv_ptr->total_tests));
}
@@ -2184,7 +2130,7 @@ contig_hs_dr_pio_test__run_test(const int test_num,
hs_dr_pio_test__takedown(tv_ptr);
-#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
+#if CONTIG_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
HDfprintf(stdout, "test %d: Takedown complete.\n", test_num);
}
@@ -2201,58 +2147,44 @@ contig_hs_dr_pio_test__run_test(const int test_num,
/*-------------------------------------------------------------------------
- * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
- *
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel case.
- *
- * Return: void
+ * Function: contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
*
- * Programmer: JRM -- 9/18/09
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
*
- * Modifications:
+ * Return: void
*
- * Modified function to take a sample of the run times
- * of the different tests, and skip some of them if
- * run times are too long.
- *
- * We need to do this because Lustre runns very slowly
- * if two or more processes are banging on the same
- * block of memory.
- * JRM -- 9/10/10
- * Break this one big test into 4 smaller tests according
- * to {independent,collective}x{contigous,chunked} datasets.
- * AKC -- 2010/01/14
+ * Programmer: JRM -- 9/18/09
*
*-------------------------------------------------------------------------
*/
#define CONTIG_HS_DR_PIO_TEST__DEBUG 0
-void
+static void
contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
{
int express_test;
int local_express_test;
int mpi_rank = -1;
int mpi_size;
- int test_num = 0;
- int edge_size;
- int chunk_edge_size = 0;
- int small_rank;
- int large_rank;
- int mpi_result;
- int skips = 0;
- int max_skips = 0;
- /* The following table list the number of sub-tests skipped between
- * each test that is actually executed as a function of the express
+ int test_num = 0;
+ int edge_size;
+ int chunk_edge_size = 0;
+ int small_rank;
+ int large_rank;
+ int mpi_result;
+ int skips = 0;
+ int max_skips = 0;
+ /* The following table list the number of sub-tests skipped between
+ * each test that is actually executed as a function of the express
* test level. Note that any value in excess of 4880 will cause all
* sub tests to be skipped.
*/
int max_skips_tbl[4] = {0, 4, 64, 1024};
- hid_t dset_type = H5T_NATIVE_UINT;
- int64_t total_tests = 0;
- int64_t tests_run = 0;
+ hid_t dset_type = H5T_NATIVE_UINT;
+ int64_t total_tests = 0;
+ int64_t tests_run = 0;
int64_t tests_skipped = 0;
HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
@@ -2295,7 +2227,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size,
small_rank,
large_rank,
- FALSE,
+ FALSE,
dset_type,
express_test,
&skips,
@@ -2316,7 +2248,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size,
small_rank,
large_rank,
- TRUE,
+ TRUE,
dset_type,
express_test,
&skips,
@@ -2337,7 +2269,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size,
small_rank,
large_rank,
- FALSE,
+ FALSE,
dset_type,
express_test,
&skips,
@@ -2358,7 +2290,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
chunk_edge_size,
small_rank,
large_rank,
- TRUE,
+ TRUE,
dset_type,
express_test,
&skips,
@@ -2377,7 +2309,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
} /* end of switch(sstest_type) */
#if CONTIG_HS_DR_PIO_TEST__DEBUG
if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) {
- HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n",
+ HDfprintf(stdout, " run/skipped/total = %lld/%lld/%lld.\n",
tests_run, tests_skipped, total_tests);
}
#endif /* CONTIG_HS_DR_PIO_TEST__DEBUG */
@@ -2385,7 +2317,7 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
}
if ( ( MAINPROCESS ) && ( tests_skipped > 0 ) ) {
- HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n",
+ HDfprintf(stdout, " %lld of %lld subtests skipped to expedite testing.\n",
tests_skipped, total_tests);
}
@@ -2396,24 +2328,24 @@ contig_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
/****************************************************************
**
-** ckrbrd_hs_dr_pio_test__slct_ckrbrd():
-** Given a data space of tgt_rank, and dimensions:
+** ckrbrd_hs_dr_pio_test__slct_ckrbrd():
+** Given a dataspace of tgt_rank, and dimensions:
**
-** (mpi_size + 1), edge_size, ... , edge_size
+** (mpi_size + 1), edge_size, ... , edge_size
**
-** edge_size, and a checker_edge_size, select a checker
-** board selection of a sel_rank (sel_rank < tgt_rank)
-** dimensional slice through the data space parallel to the
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the dataspace parallel to the
** sel_rank fastest changing indicies, with origin (in the
-** higher indicies) as indicated by the start array.
+** higher indicies) as indicated by the start array.
**
-** Note that this function, like all its relatives, is
-** hard coded to presume a maximum data space rank of 5.
-** While this maximum is declared as a constant, increasing
-** it will require extensive coding in addition to changing
+** Note that this function, like all its relatives, is
+** hard coded to presume a maximum dataspace rank of 5.
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
** the value of the constant.
**
-** JRM -- 10/8/09
+** JRM -- 10/8/09
**
****************************************************************/
@@ -2428,22 +2360,22 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
const int sel_rank,
hsize_t sel_start[])
{
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- const char * fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
-#endif
- hbool_t first_selection = TRUE;
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char * fcnName = "ckrbrd_hs_dr_pio_test__slct_ckrbrd():";
+#endif
+ hbool_t first_selection = TRUE;
int i, j, k, l, m;
- int n_cube_offset;
- int sel_offset;
- const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
+ int n_cube_offset;
+ int sel_offset;
+ const int test_max_rank = PAR_SS_DR_MAX_RANK; /* must update code if */
/* this changes */
- hsize_t base_count;
+ hsize_t base_count;
hsize_t offset_count;
- hsize_t start[PAR_SS_DR_MAX_RANK];
- hsize_t stride[PAR_SS_DR_MAX_RANK];
- hsize_t count[PAR_SS_DR_MAX_RANK];
- hsize_t block[PAR_SS_DR_MAX_RANK];
- herr_t ret; /* Generic return value */
+ hsize_t start[PAR_SS_DR_MAX_RANK];
+ hsize_t stride[PAR_SS_DR_MAX_RANK];
+ hsize_t count[PAR_SS_DR_MAX_RANK];
+ hsize_t block[PAR_SS_DR_MAX_RANK];
+ herr_t ret; /* Generic return value */
HDassert( edge_size >= 6 );
HDassert( 0 < checker_edge_size );
@@ -2460,14 +2392,14 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
HDassert( n_cube_offset >= 0 );
HDassert( n_cube_offset <= sel_offset );
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
HDfprintf(stdout, "%s:%d: edge_size/checker_edge_size = %d/%d\n",
fcnName, mpi_rank, edge_size, checker_edge_size);
- HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
fcnName, mpi_rank, sel_rank, sel_offset);
- HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: tgt_rank/n_cube_offset = %d/%d.\n",
fcnName, mpi_rank, tgt_rank, n_cube_offset);
-#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
+#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* First, compute the base count (which assumes start == 0
* for the associated offset) and offset_count (which
@@ -2497,7 +2429,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
}
/* Now set up the stride and block arrays, and portions of the start
- * and count arrays that will not be altered during the selection of
+ * and count arrays that will not be altered during the selection of
* the checker board.
*/
i = 0;
@@ -2529,7 +2461,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
i++;
}
-
+
i = 0;
do {
if ( 0 >= sel_offset ) {
@@ -2548,7 +2480,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
}
j = 0;
- do {
+ do {
if ( 1 >= sel_offset ) {
if ( j == 0 ) {
@@ -2617,62 +2549,62 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
if ( ((i + j + k + l + m) % 2) == 0 ) {
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
- HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n",
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+ HDfprintf(stdout, "%s%d: *** first_selection = %d ***\n",
fcnName, mpi_rank, (int)first_selection);
HDfprintf(stdout, "%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n",
fcnName, mpi_rank, i, j, k, l, m);
- HDfprintf(stdout,
- "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)start[0], (int)start[1],
+ HDfprintf(stdout,
+ "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)start[0], (int)start[1],
(int)start[2], (int)start[3], (int)start[4]);
- HDfprintf(stdout,
- "%s:%d: stride = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)stride[0], (int)stride[1],
+ HDfprintf(stdout,
+ "%s:%d: stride = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)stride[0], (int)stride[1],
(int)stride[2], (int)stride[3], (int)stride[4]);
- HDfprintf(stdout,
- "%s:%d: count = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)count[0], (int)count[1],
+ HDfprintf(stdout,
+ "%s:%d: count = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)count[0], (int)count[1],
(int)count[2], (int)count[3], (int)count[4]);
- HDfprintf(stdout,
- "%s:%d: block = %d %d %d %d %d.\n",
- fcnName, mpi_rank, (int)block[0], (int)block[1],
+ HDfprintf(stdout,
+ "%s:%d: block = %d %d %d %d %d.\n",
+ fcnName, mpi_rank, (int)block[0], (int)block[1],
(int)block[2], (int)block[3], (int)block[4]);
- HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n",
+ HDfprintf(stdout, "%s:%d: n-cube extent dims = %d.\n",
fcnName, mpi_rank,
H5Sget_simple_extent_ndims(tgt_sid));
- HDfprintf(stdout, "%s:%d: selection rank = %d.\n",
+ HDfprintf(stdout, "%s:%d: selection rank = %d.\n",
fcnName, mpi_rank, sel_rank);
#endif
if ( first_selection ) {
- first_selection = FALSE;
+ first_selection = FALSE;
ret = H5Sselect_hyperslab
(
- tgt_sid,
+ tgt_sid,
H5S_SELECT_SET,
- &(start[n_cube_offset]),
- &(stride[n_cube_offset]),
- &(count[n_cube_offset]),
+ &(start[n_cube_offset]),
+ &(stride[n_cube_offset]),
+ &(count[n_cube_offset]),
&(block[n_cube_offset])
);
-
+
VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
} else {
ret = H5Sselect_hyperslab
(
- tgt_sid,
+ tgt_sid,
H5S_SELECT_OR,
- &(start[n_cube_offset]),
- &(stride[n_cube_offset]),
- &(count[n_cube_offset]),
+ &(start[n_cube_offset]),
+ &(stride[n_cube_offset]),
+ &(count[n_cube_offset]),
&(block[n_cube_offset])
);
-
+
VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
}
@@ -2704,12 +2636,12 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
} while ( ( i <= 1 ) &&
( 0 >= sel_offset ) );
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
#endif /* CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG */
- /* Clip the selection back to the data space proper. */
+ /* Clip the selection back to the dataspace proper. */
for ( i = 0; i < test_max_rank; i++ ) {
@@ -2724,7 +2656,7 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
-#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__SELECT_CHECKER_BOARD__DEBUG
HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
HDfprintf(stdout, "%s%d: done.\n", fcnName, mpi_rank);
@@ -2737,57 +2669,57 @@ ckrbrd_hs_dr_pio_test__slct_ckrbrd(const int mpi_rank,
/****************************************************************
**
-** ckrbrd_hs_dr_pio_test__verify_data():
+** ckrbrd_hs_dr_pio_test__verify_data():
**
-** Examine the supplied buffer to see if it contains the
-** expected data. Return TRUE if it does, and FALSE
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
** otherwise.
**
-** The supplied buffer is presumed to this process's slice
-** of the target data set. Each such slice will be an
-** n-cube of rank (rank -1) and the supplied edge_size with
-** origin (mpi_rank, 0, ... , 0) in the target data set.
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
**
-** Further, the buffer is presumed to be the result of reading
-** or writing a checker board selection of an m (1 <= m <
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
-** of the target data set. Also, this slice must be parallel
-** to the fastest changing indicies.
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indicies.
**
-** It is further presumed that the buffer was zeroed before
-** the read/write, and that the full target data set (i.e.
-** the buffer/data set for all processes) was initialized
-** with the natural numbers listed in order from the origin
-** along the fastest changing axis.
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
**
** Thus for a 20x10x10 dataset, the value stored in location
-** (x, y, z) (assuming that z is the fastest changing index
-** and x the slowest) is assumed to be:
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
**
-** (10 * 10 * x) + (10 * y) + z
+** (10 * 10 * x) + (10 * y) + z
**
-** Further, supposing that this is process 10, this process's
-** slice of the dataset would be a 10 x 10 2-cube with origin
-** (10, 0, 0) in the data set, and would be initialize (prior
-** to the checkerboard selection) as follows:
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
**
-** 1000, 1001, 1002, ... 1008, 1009
-** 1010, 1011, 1012, ... 1018, 1019
-** . . . . .
-** . . . . .
-** . . . . .
-** 1090, 1091, 1092, ... 1098, 1099
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
**
-** In the case of a read from the processors slice of another
-** data set of different rank, the values expected will have
-** to be adjusted accordingly. This is done via the
-** first_expected_val parameter.
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
**
-** Finally, the function presumes that the first element
-** of the buffer resides either at the origin of either
-** a selected or an unselected checker. (Translation:
-** if partial checkers appear in the buffer, they will
-** intersect the edges of the n-cube oposite the origin.)
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube oposite the origin.)
**
****************************************************************/
@@ -2802,7 +2734,7 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
hbool_t buf_starts_in_checker)
{
#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- const char * fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
+ const char * fcnName = "ckrbrd_hs_dr_pio_test__verify_data():";
#endif
hbool_t good_data = TRUE;
hbool_t in_checker;
@@ -2821,9 +2753,9 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
HDassert( checker_edge_size <= edge_size );
HDassert( test_max_rank <= PAR_SS_DR_MAX_RANK );
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
- int mpi_rank;
+ int mpi_rank;
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
@@ -2875,7 +2807,7 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
y = 0;
start_in_checker[3] = start_in_checker[2];
do
- {
+ {
if ( y >= checker_edge_size ) {
start_in_checker[3] = ! start_in_checker[3];
@@ -2884,13 +2816,13 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
m = 0;
z = 0;
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
#endif
in_checker = start_in_checker[3];
do
{
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
HDfprintf(stdout, " %d", (int)(*val_ptr));
#endif
if ( z >= checker_edge_size ) {
@@ -2898,21 +2830,21 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
in_checker = ! in_checker;
z = 0;
}
-
+
if ( in_checker ) {
-
+
if ( *val_ptr != expected_value ) {
good_data = FALSE;
}
-
+
/* zero out buffer for re-use */
*val_ptr = 0;
} else if ( *val_ptr != 0 ) {
good_data = FALSE;
-
+
/* zero out buffer for re-use */
*val_ptr = 0;
@@ -2922,10 +2854,10 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
expected_value++;
m++;
z++;
-
+
} while ( ( rank >= (test_max_rank - 4) ) &&
( m < edge_size ) );
-#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
+#if CKRBRD_HS_DR_PIO_TEST__VERIFY_DATA__DEBUG
HDfprintf(stdout, "\n");
#endif
l++;
@@ -2951,28 +2883,24 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__d2m_l2s()
+ * Function: ckrbrd_hs_dr_pio_test__d2m_l2s()
*
- * Purpose: Part one of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
+ * Purpose: Part one of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Verify that we can read from disk correctly using checker
- * board selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * Verify that we can read from disk correctly using checker
+ * board selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
*
- * In this function, we test this by reading small_rank - 1
- * checker board slices from the on disk large cube, and
- * verifying that the data read is correct. Verify that
- * H5S_select_shape_same() returns true on the memory and
- * file selections.
+ * In this function, we test this by reading small_rank - 1
+ * checker board slices from the on disk large cube, and
+ * verifying that the data read is correct. Verify that
+ * H5Sselect_shape_same() returns true on the memory and
+ * file selections.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 9/15/11
- *
- * Modifications:
- *
- * None.
+ * Programmer: JRM -- 9/15/11
*
*-------------------------------------------------------------------------
*/
@@ -2982,29 +2910,29 @@ ckrbrd_hs_dr_pio_test__verify_data(uint32_t * buf_ptr,
static void
ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_l2s()";
uint32_t * ptr_0;
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
- hbool_t data_ok = FALSE;
- int i, j, k, l;
- uint32_t expected_value;
- int mpi_rank; /* needed by VRFY */
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
+ uint32_t expected_value;
+ int mpi_rank; /* needed by VRFY */
hsize_t sel_start[PAR_SS_DR_MAX_RANK];
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
/* first, verify that we can read from disk correctly using selections
- * of different rank that H5S_select_shape_same() views as being of the
+ * of different rank that H5Sselect_shape_same() views as being of the
* same shape.
*
- * Start by reading a (small_rank - 1)-D checker board slice from this
- * processes slice of the on disk large data set, and verifying that the
- * data read is correct. Verify that H5S_select_shape_same() returns
+ * Start by reading a (small_rank - 1)-D checker board slice from this
+ * processes slice of the on disk large data set, and verifying that the
+ * data read is correct. Verify that H5Sselect_shape_same() returns
* true on the memory and file selections.
*
* The first step is to set up the needed checker board selection in the
@@ -3025,7 +2953,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* zero out the buffer we will be reading into */
HDmemset(tv_ptr->small_ds_slice_buf, 0, sizeof(uint32_t) * tv_ptr->small_ds_slice_size);
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
HDfprintf(stdout, "%s:%d: initial small_ds_slice_buf = ",
fcnName, tv_ptr->mpi_rank);
ptr_0 = tv_ptr->small_ds_slice_buf;
@@ -3034,7 +2962,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_0++;
}
HDfprintf(stdout, "\n");
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read slices of the large cube.
@@ -3054,15 +2982,15 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
}
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout,
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout,
"%s:%d: reading slice from big ds on disk into small ds slice.\n",
fcnName, tv_ptr->mpi_rank);
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set. However, in the parallel version, each
+ * of the large data set. However, in the parallel version, each
* process only works with that slice of the large cube indicated
- * by its rank -- hence we set the most slowly changing index to
+ * by its rank -- hence we set the most slowly changing index to
* mpi_rank, and don't itterate over it.
*/
@@ -3075,9 +3003,9 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -3101,7 +3029,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -3121,8 +3049,8 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->skips = 0; /* reset the skips counter */
- /* we know that small_rank - 1 >= 1 and that
- * large_rank > small_rank by the assertions at the head
+ /* we know that small_rank - 1 >= 1 and that
+ * large_rank > small_rank by the assertions at the head
* of this function. Thus no need for another inner loop.
*/
tv_ptr->start[0] = (hsize_t)i;
@@ -3148,24 +3076,23 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start
);
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->small_ds_slice_sid,
- tv_ptr->file_large_ds_sid_0);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->small_ds_slice_sid, tv_ptr->file_large_ds_sid_0);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* Read selection from disk */
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n", fcnName,
- tv_ptr->mpi_rank, tv_ptr->start[0], tv_ptr->start[1],
+ tv_ptr->mpi_rank, tv_ptr->start[0], tv_ptr->start[1],
tv_ptr->start[2], tv_ptr->start[3], tv_ptr->start[4]);
HDfprintf(stdout, "%s slice/file extent dims = %d/%d.\n",
fcnName,
H5Sget_simple_extent_ndims(tv_ptr->small_ds_slice_sid),
H5Sget_simple_extent_ndims(tv_ptr->file_large_ds_sid_0));
-#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
+#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
ret = H5Dread(tv_ptr->large_dataset,
H5T_NATIVE_UINT32,
@@ -3175,15 +3102,15 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->small_ds_slice_buf);
VRFY((ret >= 0), "H5Dread() slice from large ds succeeded.");
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: H5Dread() returns.\n",
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: H5Dread() returns.\n",
fcnName, tv_ptr->mpi_rank);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_L2S__DEBUG */
/* verify that expected data is retrieved */
expected_value = (uint32_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
+ ((i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -3199,7 +3126,7 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
(hbool_t)TRUE
);
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"small slice read from large ds data good.");
(tv_ptr->tests_run)++;
@@ -3227,27 +3154,23 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__d2m_s2l()
- *
- * Purpose: Part two of a series of tests of I/O to/from hyperslab
- * selections of different rank in the parallel.
- *
- * Verify that we can read from disk correctly using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Function: ckrbrd_hs_dr_pio_test__d2m_s2l()
*
- * In this function, we test this by reading checker board
- * slices of the on disk small data set into slices through
- * the in memory large data set, and verify that the correct
- * data (and only the correct data) is read.
+ * Purpose: Part two of a series of tests of I/O to/from hyperslab
+ * selections of different rank in the parallel.
*
- * Return: void
+ * Verify that we can read from disk correctly using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
*
- * Programmer: JRM -- 8/15/11
+ * In this function, we test this by reading checker board
+ * slices of the on disk small data set into slices through
+ * the in memory large data set, and verify that the correct
+ * data (and only the correct data) is read.
*
- * Modifications:
+ * Return: void
*
- * None.
+ * Programmer: JRM -- 8/15/11
*
*-------------------------------------------------------------------------
*/
@@ -3257,27 +3180,27 @@ ckrbrd_hs_dr_pio_test__d2m_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__d2m_s2l()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
- hbool_t data_ok = FALSE;
- int i, j, k, l;
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
size_t u;
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
+ int mpi_rank; /* needed by VRFY */
hsize_t sel_start[PAR_SS_DR_MAX_RANK];
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* similarly, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
@@ -3292,8 +3215,8 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->small_rank - 1,
sel_start);
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout,
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout,
"%s reading slices of on disk small data set into slices of big data set.\n",
fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG */
@@ -3303,7 +3226,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/* set up start, stride, count, and block -- note that we will
* change start[] so as to read the slice of the small data set
- * into different slices of the process slice of the large data
+ * into different slices of the process slice of the large data
* set.
*/
for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
@@ -3322,11 +3245,11 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
+ * of the large data set that don't appear in the small data set.
*
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't itterate
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
* over it.
*/
@@ -3340,9 +3263,9 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -3366,7 +3289,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -3414,20 +3337,19 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
);
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_0,
- tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_0, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
/* Read selection from disk */
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
- tv_ptr->start[3], tv_ptr->start[4]);
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, tv_ptr->mpi_rank,
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
+ tv_ptr->start[3], tv_ptr->start[4]);
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
H5Sget_simple_extent_ndims(tv_ptr->large_ds_slice_sid),
@@ -3446,21 +3368,21 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
*/
data_ok = TRUE;
ptr_1 = tv_ptr->large_ds_buf_1;
- expected_value =
+ expected_value =
(uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index = (size_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
(l * tv_ptr->edge_size));
stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__D2M_S2L__DEBUG
{
int m, n;
- HDfprintf(stdout, "%s:%d: expected_value = %d.\n",
+ HDfprintf(stdout, "%s:%d: expected_value = %d.\n",
fcnName, tv_ptr->mpi_rank, expected_value);
HDfprintf(stdout, "%s:%d: start/stop index = %d/%d.\n",
fcnName, tv_ptr->mpi_rank, start_index, stop_index);
@@ -3495,7 +3417,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"slice read from small to large ds data good(1).");
data_ok = ckrbrd_hs_dr_pio_test__verify_data
@@ -3508,7 +3430,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
(hbool_t)TRUE
);
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"slice read from small to large ds data good(2).");
@@ -3527,7 +3449,7 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
ptr_1++;
}
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"slice read from small to large ds data good(3).");
(tv_ptr->tests_run)++;
@@ -3555,31 +3477,27 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__m2d_l2s()
- *
- * Purpose: Part three of a series of tests of I/O to/from checker
- * board hyperslab selections of different rank in the
- * parallel.
+ * Function: ckrbrd_hs_dr_pio_test__m2d_l2s()
*
- * Verify that we can write from memory to file using checker
- * board selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * Purpose: Part three of a series of tests of I/O to/from checker
+ * board hyperslab selections of different rank in the
+ * parallel.
*
- * Do this by writing small_rank - 1 dimensional checker
- * board slices from the in memory large data set to the on
- * disk small cube dataset. After each write, read the
- * slice of the small dataset back from disk, and verify
- * that it contains the expected data. Verify that
- * H5S_select_shape_same() returns true on the memory and
- * file selections.
+ * Verify that we can write from memory to file using checker
+ * board selections of different rank that
+ * H5Sselect_shape_same() views as being of the same shape.
*
- * Return: void
+ * Do this by writing small_rank - 1 dimensional checker
+ * board slices from the in memory large data set to the on
+ * disk small cube dataset. After each write, read the
+ * slice of the small dataset back from disk, and verify
+ * that it contains the expected data. Verify that
+ * H5Sselect_shape_same() returns true on the memory and
+ * file selections.
*
- * Programmer: JRM -- 8/15/11
+ * Return: void
*
- * Modifications:
- *
- * None.
+ * Programmer: JRM -- 8/15/11
*
*-------------------------------------------------------------------------
*/
@@ -3589,21 +3507,20 @@ ckrbrd_hs_dr_pio_test__d2m_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_l2s()";
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
- int i, j, k, l;
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
size_t u;
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
+ int mpi_rank; /* needed by VRFY */
hsize_t sel_start[PAR_SS_DR_MAX_RANK];
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
@@ -3611,12 +3528,12 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small dataset. After each write, read the slice of
- * the small dataset back from disk, and verify that it contains the
- * expected data. Verify that H5S_select_shape_same() returns true on
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
@@ -3684,18 +3601,18 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
HDmemset(tv_ptr->small_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->small_ds_size);
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout,
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout,
"%s writing checker boards selections of slices from big ds to slices of small ds on disk.\n",
fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG */
/* in serial versions of this test, we loop through all the dimensions
- * of the large data set that don't appear in the small data set.
+ * of the large data set that don't appear in the small data set.
*
- * However, in the parallel version, each process only works with that
- * slice of the large (and small) data set indicated by its rank -- hence
- * we set the most slowly changing index to mpi_rank, and don't itterate
+ * However, in the parallel version, each process only works with that
+ * slice of the large (and small) data set indicated by its rank -- hence
+ * we set the most slowly changing index to mpi_rank, and don't itterate
* over it.
*/
@@ -3709,9 +3626,9 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -3736,7 +3653,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -3760,7 +3677,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
* by the assertions at the head of this function. Thus no
* need for another inner loop.
*/
-
+
/* zero out this rank's slice of the on disk small data set */
ret = H5Dwrite(tv_ptr->small_dataset,
H5T_NATIVE_UINT32,
@@ -3769,7 +3686,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->xfer_plist,
tv_ptr->small_ds_buf_2);
VRFY((ret >= 0), "H5Dwrite() zero slice to small ds succeeded.");
-
+
/* select the portion of the in memory large cube from which we
* are going to write data.
*/
@@ -3778,13 +3695,13 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->start[2] = (hsize_t)k;
tv_ptr->start[3] = (hsize_t)l;
tv_ptr->start[4] = 0;
-
+
HDassert((tv_ptr->start[0] == 0)||(0 < tv_ptr->small_ds_offset + 1));
HDassert((tv_ptr->start[1] == 0)||(1 < tv_ptr->small_ds_offset + 1));
HDassert((tv_ptr->start[2] == 0)||(2 < tv_ptr->small_ds_offset + 1));
HDassert((tv_ptr->start[3] == 0)||(3 < tv_ptr->small_ds_offset + 1));
HDassert((tv_ptr->start[4] == 0)||(4 < tv_ptr->small_ds_offset + 1));
-
+
ckrbrd_hs_dr_pio_test__slct_ckrbrd
(
tv_ptr->mpi_rank,
@@ -3795,26 +3712,25 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->small_rank - 1,
tv_ptr->start
);
-
-
- /* verify that H5S_select_shape_same() reports the in
- * memory checkerboard selection of the slice through the
+
+
+ /* verify that H5Sselect_shape_same() reports the in
+ * memory checkerboard selection of the slice through the
* large dataset and the checkerboard selection of the process
* slice of the small data set as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->file_small_ds_sid_1,
- tv_ptr->mem_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed.");
-
-
- /* write the checker board selection of the slice from the in
- * memory large data set to the slice of the on disk small
- * dataset.
+ check = H5Sselect_shape_same(tv_ptr->file_small_ds_sid_1, tv_ptr->mem_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed.");
+
+
+ /* write the checker board selection of the slice from the in
+ * memory large data set to the slice of the on disk small
+ * dataset.
*/
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_L2S__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
tv_ptr->start[3], tv_ptr->start[4]);
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -3828,8 +3744,8 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->xfer_plist,
tv_ptr->large_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() slice to large ds succeeded.");
-
-
+
+
/* read the on disk process slice of the small dataset into memory */
ret = H5Dread(tv_ptr->small_dataset,
H5T_NATIVE_UINT32,
@@ -3838,30 +3754,28 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->xfer_plist,
tv_ptr->small_ds_buf_1);
VRFY((ret >= 0), "H5Dread() slice from small ds succeeded.");
-
-
+
+
/* verify that expected data is retrieved */
-
- mis_match = FALSE;
-
+
expected_value = (uint32_t)(
- (i * tv_ptr->edge_size * tv_ptr->edge_size *
+ (i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
(l * tv_ptr->edge_size));
-
+
start_index = (size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size;
stop_index = start_index + tv_ptr->small_ds_slice_size - 1;
-
+
HDassert( start_index < stop_index );
HDassert( stop_index <= tv_ptr->small_ds_size );
-
+
data_ok = TRUE;
-
+
ptr_1 = tv_ptr->small_ds_buf_1;
for ( u = 0; u < start_index; u++, ptr_1++ ) {
-
+
if ( *ptr_1 != 0 ) {
data_ok = FALSE;
@@ -3890,7 +3804,7 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
}
}
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"large slice write slice to small slice data good.");
(tv_ptr->tests_run)++;
@@ -3918,31 +3832,27 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__m2d_s2l()
- *
- * Purpose: Part four of a series of tests of I/O to/from checker
- * board hyperslab selections of different rank in the parallel.
- *
- * Verify that we can write from memory to file using
- * selections of different rank that H5S_select_shape_same()
- * views as being of the same shape.
+ * Function: ckrbrd_hs_dr_pio_test__m2d_s2l()
*
- * Do this by writing checker board selections of the contents
- * of the process's slice of the in memory small data set to
- * slices of the on disk large data set. After each write,
- * read the process's slice of the large data set back into
- * memory, and verify that it contains the expected data.
+ * Purpose: Part four of a series of tests of I/O to/from checker
+ * board hyperslab selections of different rank in the parallel.
*
- * Verify that H5S_select_shape_same() returns true on the
- * memory and file selections.
+ * Verify that we can write from memory to file using
+ * selections of different rank that H5Sselect_shape_same()
+ * views as being of the same shape.
*
- * Return: void
+ * Do this by writing checker board selections of the contents
+ * of the process's slice of the in memory small data set to
+ * slices of the on disk large data set. After each write,
+ * read the process's slice of the large data set back into
+ * memory, and verify that it contains the expected data.
*
- * Programmer: JRM -- 8/15/11
+ * Verify that H5Sselect_shape_same() returns true on the
+ * memory and file selections.
*
- * Modifications:
+ * Return: void
*
- * None
+ * Programmer: JRM -- 8/15/11
*
*-------------------------------------------------------------------------
*/
@@ -3952,31 +3862,30 @@ ckrbrd_hs_dr_pio_test__m2d_l2s(struct hs_dr_pio_test_vars_t * tv_ptr)
static void
ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
{
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__m2d_s2l()";
#endif /* CONTIG_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
- int i, j, k, l;
+ hbool_t data_ok = FALSE;
+ int i, j, k, l;
size_t u;
size_t start_index;
size_t stop_index;
- uint32_t expected_value;
+ uint32_t expected_value;
uint32_t * ptr_1;
- int mpi_rank; /* needed by VRFY */
+ int mpi_rank; /* needed by VRFY */
hsize_t sel_start[PAR_SS_DR_MAX_RANK];
htri_t check; /* Shape comparison return value */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* initialize the local copy of mpi_rank */
mpi_rank = tv_ptr->mpi_rank;
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
* and file selections.
*/
@@ -4009,7 +3918,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->block);
VRFY((ret >= 0), "H5Sselect_hyperslab(tv_ptr->mem_large_ds_sid, set) suceeded");
- /* setup a checkerboard selection of the slice of the in memory small
+ /* setup a checkerboard selection of the slice of the in memory small
* data set associated with the process's mpi rank.
*/
@@ -4025,7 +3934,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
sel_start);
/* set up start, stride, count, and block -- note that we will
- * change start[] so as to write checkerboard selections of slices
+ * change start[] so as to write checkerboard selections of slices
* of the small data set to slices of the large data set.
*/
for ( i = 0; i < PAR_SS_DR_MAX_RANK; i++ ) {
@@ -4047,7 +3956,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
HDmemset(tv_ptr->large_ds_buf_1, 0, sizeof(uint32_t) * tv_ptr->large_ds_size);
#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s writing process checkerboard selections of slices of small ds to process slices of large ds on disk.\n",
fcnName);
#endif /* CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG */
@@ -4061,9 +3970,9 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
i = 0;
}
- /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
+ /* since large_rank is at most PAR_SS_DR_MAX_RANK, no need to
* loop over it -- either we are setting i to mpi_rank, or
- * we are setting it to zero. It will not change during the
+ * we are setting it to zero. It will not change during the
* test.
*/
@@ -4087,7 +3996,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
do {
- /* since small rank >= 2 and large_rank > small_rank, we
+ /* since small rank >= 2 and large_rank > small_rank, we
* have large_rank >= 3. Since PAR_SS_DR_MAX_RANK == 5
* (baring major re-orgaization), this gives us:
*
@@ -4152,23 +4061,22 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
);
- /* verify that H5S_select_shape_same() reports the in
+ /* verify that H5Sselect_shape_same() reports the in
* memory small data set slice selection and the
* on disk slice through the large data set selection
* as having the same shape.
*/
- check = H5S_select_shape_same_test(tv_ptr->mem_small_ds_sid,
- tv_ptr->file_large_ds_sid_1);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed");
+ check = H5Sselect_shape_same(tv_ptr->mem_small_ds_sid, tv_ptr->file_large_ds_sid_1);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed");
- /* write the small data set slice from memory to the
- * target slice of the disk data set
+ /* write the small data set slice from memory to the
+ * target slice of the disk data set
*/
-#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
- HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
+#if CHECKER_BOARD_HS_DR_PIO_TEST__M2D_S2L__DEBUG
+ HDfprintf(stdout, "%s:%d: start = %d %d %d %d %d.\n",
fcnName, tv_ptr->mpi_rank,
- tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
+ tv_ptr->start[0], tv_ptr->start[1], tv_ptr->start[2],
tv_ptr->start[3], tv_ptr->start[4]);
HDfprintf(stdout, "%s:%d: mem/file extent dims = %d/%d.\n",
fcnName, tv_ptr->mpi_rank,
@@ -4181,11 +4089,11 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->file_large_ds_sid_1,
tv_ptr->xfer_plist,
tv_ptr->small_ds_buf_0);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Dwrite of small ds slice to large ds succeeded");
- /* read this processes slice on the on disk large
+ /* read this processes slice on the on disk large
* data set into memory.
*/
@@ -4195,18 +4103,18 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
tv_ptr->file_large_ds_sid_0,
tv_ptr->xfer_plist,
tv_ptr->large_ds_buf_1);
- VRFY((ret != FAIL),
+ VRFY((ret != FAIL),
"H5Dread() of process slice of large ds succeeded");
/* verify that the expected data and only the
* expected data was read.
*/
- expected_value =
+ expected_value =
(uint32_t)((size_t)(tv_ptr->mpi_rank) * tv_ptr->small_ds_slice_size);
start_index = (size_t)
- ((i * tv_ptr->edge_size * tv_ptr->edge_size *
+ ((i * tv_ptr->edge_size * tv_ptr->edge_size *
tv_ptr->edge_size * tv_ptr->edge_size) +
(j * tv_ptr->edge_size * tv_ptr->edge_size * tv_ptr->edge_size) +
(k * tv_ptr->edge_size * tv_ptr->edge_size) +
@@ -4217,8 +4125,6 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
HDassert( stop_index < tv_ptr->large_ds_size );
- mis_match = FALSE;
-
data_ok = TRUE;
ptr_1 = tv_ptr->large_ds_buf_1;
@@ -4252,7 +4158,7 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
}
}
- VRFY((data_ok == TRUE),
+ VRFY((data_ok == TRUE),
"small ds cb slice write to large ds slice data good.");
(tv_ptr->tests_run)++;
@@ -4280,22 +4186,14 @@ ckrbrd_hs_dr_pio_test__m2d_s2l(struct hs_dr_pio_test_vars_t * tv_ptr)
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test__run_test()
+ * Function: ckrbrd_hs_dr_pio_test__run_test()
*
- * Purpose: Test I/O to/from checkerboard selections of hyperslabs of
- * different rank in the parallel.
+ * Purpose: Test I/O to/from checkerboard selections of hyperslabs of
+ * different rank in the parallel.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 10/10/09
- *
- * Modifications:
- *
- * JRM -- 9/16/10
- * Added the express_test parameter. Use it to control
- * whether we set an alignment, and whether we allocate
- * chunks such that no two processes will normally touch
- * the same chunk.
+ * Programmer: JRM -- 10/10/09
*
*-------------------------------------------------------------------------
*/
@@ -4322,10 +4220,9 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
const char *fcnName = "ckrbrd_hs_dr_pio_test__run_test()";
#endif /* CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG */
- int mpi_rank; /* needed by VRFY */
- struct hs_dr_pio_test_vars_t test_vars =
+ struct hs_dr_pio_test_vars_t test_vars =
{
- /* int mpi_size = */ -1,
+ /* int mpi_size = */ -1,
/* int mpi_rank = */ -1,
/* MPI_Comm mpi_comm = */ MPI_COMM_NULL,
/* MPI_Inf mpi_info = */ MPI_INFO_NULL,
@@ -4341,7 +4238,7 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* uint32_t * small_ds_buf_2 = */ NULL,
/* uint32_t * small_ds_slice_buf = */ NULL,
/* uint32_t * large_ds_buf_0 = */ NULL,
- /* uint32_t * large_ds_buf_1 = */ NULL,
+ /* uint32_t * large_ds_buf_1 = */ NULL,
/* uint32_t * large_ds_buf_2 = */ NULL,
/* uint32_t * large_ds_slice_buf = */ NULL,
/* int small_ds_offset = */ -1,
@@ -4378,24 +4275,20 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* hsize_t * stride_ptr = */ NULL,
/* hsize_t * count_ptr = */ NULL,
/* hsize_t * block_ptr = */ NULL,
- /* int skips = */ 0,
- /* int max_skips = */ 0,
+ /* int skips = */ 0,
+ /* int max_skips = */ 0,
/* int64_t total_tests = */ 0,
/* int64_t tests_run = */ 0,
/* int64_t tests_skipped = */ 0
};
struct hs_dr_pio_test_vars_t * tv_ptr = &test_vars;
- hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size,
- chunk_edge_size, small_rank, large_rank,
- use_collective_io, dset_type, express_test,
+ hs_dr_pio_test__setup(test_num, edge_size, checker_edge_size,
+ chunk_edge_size, small_rank, large_rank,
+ use_collective_io, dset_type, express_test,
tv_ptr);
- /* initialize the local copy of mpi_rank */
- mpi_rank = tv_ptr->mpi_rank;
-
-
/* initialize skips & max_skips */
tv_ptr->skips = *skips_ptr;
tv_ptr->max_skips = max_skips;
@@ -4411,12 +4304,12 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* first, verify that we can read from disk correctly using selections
- * of different rank that H5S_select_shape_same() views as being of the
+ * of different rank that H5Sselect_shape_same() views as being of the
* same shape.
*
- * Start by reading a (small_rank - 1)-D slice from this processes slice
- * of the on disk large data set, and verifying that the data read is
- * correct. Verify that H5S_select_shape_same() returns true on the
+ * Start by reading a (small_rank - 1)-D slice from this processes slice
+ * of the on disk large data set, and verifying that the data read is
+ * correct. Verify that H5Sselect_shape_same() returns true on the
* memory and file selections.
*
* The first step is to set up the needed checker board selection in the
@@ -4426,8 +4319,8 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
ckrbrd_hs_dr_pio_test__d2m_l2s(tv_ptr);
- /* similarly, read slices of the on disk small data set into slices
- * through the in memory large data set, and verify that the correct
+ /* similarly, read slices of the on disk small data set into slices
+ * through the in memory large data set, and verify that the correct
* data (and only the correct data) is read.
*/
@@ -4436,23 +4329,23 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/* now we go in the opposite direction, verifying that we can write
* from memory to file using selections of different rank that
- * H5S_select_shape_same() views as being of the same shape.
+ * H5Sselect_shape_same() views as being of the same shape.
*
* Start by writing small_rank - 1 D slices from the in memory large data
- * set to the on disk small dataset. After each write, read the slice of
- * the small dataset back from disk, and verify that it contains the
- * expected data. Verify that H5S_select_shape_same() returns true on
+ * set to the on disk small dataset. After each write, read the slice of
+ * the small dataset back from disk, and verify that it contains the
+ * expected data. Verify that H5Sselect_shape_same() returns true on
* the memory and file selections.
*/
ckrbrd_hs_dr_pio_test__m2d_l2s(tv_ptr);
- /* Now write the contents of the process's slice of the in memory
- * small data set to slices of the on disk large data set. After
+ /* Now write the contents of the process's slice of the in memory
+ * small data set to slices of the on disk large data set. After
* each write, read the process's slice of the large data set back
- * into memory, and verify that it contains the expected data.
- * Verify that H5S_select_shape_same() returns true on the memory
+ * into memory, and verify that it contains the expected data.
+ * Verify that H5Sselect_shape_same() returns true on the memory
* and file selections.
*/
@@ -4461,8 +4354,8 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
#if CKRBRD_HS_DR_PIO_TEST__RUN_TEST__DEBUG
if ( MAINPROCESS ) {
- HDfprintf(stdout,
- "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
+ HDfprintf(stdout,
+ "test %d: Subtests complete -- tests run/skipped/total = %lld/%lld/%lld.\n",
test_num, (long long)(tv_ptr->tests_run), (long long)(tv_ptr->tests_skipped),
(long long)(tv_ptr->total_tests));
}
@@ -4487,28 +4380,14 @@ ckrbrd_hs_dr_pio_test__run_test(const int test_num,
/*-------------------------------------------------------------------------
- * Function: ckrbrd_hs_dr_pio_test()
- *
- * Purpose: Test I/O to/from hyperslab selections of different rank in
- * the parallel case.
+ * Function: ckrbrd_hs_dr_pio_test()
*
- * Return: void
+ * Purpose: Test I/O to/from hyperslab selections of different rank in
+ * the parallel case.
*
- * Programmer: JRM -- 9/18/09
+ * Return: void
*
- * Modifications:
- *
- * Modified function to take a sample of the run times
- * of the different tests, and skip some of them if
- * run times are too long.
- *
- * We need to do this because Lustre runns very slowly
- * if two or more processes are banging on the same
- * block of memory.
- * JRM -- 9/10/10
- * Break this one big test into 4 smaller tests according
- * to {independent,collective}x{contigous,chunked} datasets.
- * AKC -- 2010/01/17
+ * Programmer: JRM -- 9/18/09
*
*-------------------------------------------------------------------------
*/
@@ -4518,16 +4397,16 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
{
int express_test;
int local_express_test;
- int mpi_size = -1;
+ int mpi_size = -1;
int mpi_rank = -1;
- int test_num = 0;
- int edge_size;
+ int test_num = 0;
+ int edge_size;
int checker_edge_size = 3;
- int chunk_edge_size = 0;
- int small_rank = 3;
- int large_rank = 4;
- int mpi_result;
- hid_t dset_type = H5T_NATIVE_UINT;
+ int chunk_edge_size = 0;
+ int small_rank = 3;
+ int large_rank = 4;
+ int mpi_result;
+ hid_t dset_type = H5T_NATIVE_UINT;
int skips = 0;
int max_skips = 0;
/* The following table list the number of sub-tests skipped between
@@ -4566,13 +4445,13 @@ ckrbrd_hs_dr_pio_test(ShapeSameTestMethods sstest_type)
max_skips = max_skips_tbl[local_express_test];
}
-#if 0
+#if 0
{
int DebugWait = 1;
-
+
while (DebugWait) ;
}
-#endif
+#endif
for ( large_rank = 3; large_rank <= PAR_SS_DR_MAX_RANK; large_rank++ ) {
@@ -4702,15 +4581,15 @@ int dim0;
int dim1;
int chunkdim0;
int chunkdim1;
-int nerrors = 0; /* errors count */
-int ndatasets = 300; /* number of datasets to create*/
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
int ngroups = 512; /* number of groups to create in root
* group. */
-int facc_type = FACC_MPIO; /*Test file access type */
+int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-H5E_auto2_t old_func; /* previous error handler */
-void *old_client_data; /* previous error handler arg.*/
+H5E_auto2_t old_func; /* previous error handler */
+void *old_client_data; /* previous error handler arg.*/
/* other option flags */
@@ -4722,10 +4601,10 @@ void *old_client_data; /* previous error handler arg.*/
#define NFILENAME 2
#define PARATESTFILE filenames[0]
const char *FILENAME[NFILENAME]={
- "ShapeSameTest",
- NULL};
-char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
+ "ShapeSameTest",
+ NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
#ifdef USE_PAUSE
/* pause the process for a moment to allow debugger to attach if desired. */
@@ -4738,7 +4617,7 @@ void pause_proc(void)
{
int pid;
- h5_stat_t statbuf;
+ h5_stat_t statbuf;
char greenlight[] = "go";
int maxloop = 10;
int loops = 0;
@@ -4755,15 +4634,15 @@ void pause_proc(void)
MPI_Get_processor_name(mpi_name, &mpi_namelen);
if (MAINPROCESS)
- while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
- if (!loops++){
- printf("Proc %d (%*s, %d): to debug, attach %d\n",
- mpi_rank, mpi_namelen, mpi_name, pid, pid);
- }
- printf("waiting(%ds) for file %s ...\n", time_int, greenlight);
- fflush(stdout);
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
+ if (!loops++){
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n",
+ mpi_rank, mpi_namelen, mpi_name, pid, pid);
+ }
+ HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ fflush(stdout);
HDsleep(time_int);
- }
+ }
MPI_Barrier(MPI_COMM_WORLD);
}
@@ -4775,7 +4654,7 @@ int MPI_Init(int *argc, char ***argv)
pause_proc();
return (ret_code);
}
-#endif /* USE_PAUSE */
+#endif /* USE_PAUSE */
/*
@@ -4784,18 +4663,18 @@ int MPI_Init(int *argc, char ***argv)
static void
usage(void)
{
- printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
- "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
- printf("\t-m<n_datasets>"
- "\tset number of datasets for the multiple dataset test\n");
- printf("\t-n<n_groups>"
+ HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ HDprintf("\t-m<n_datasets>"
+ "\tset number of datasets for the multiple dataset test\n");
+ HDprintf("\t-n<n_groups>"
"\tset number of groups for the multiple group test\n");
- printf("\t-f <prefix>\tfilename prefix\n");
- printf("\t-2\t\tuse Split-file together with MPIO\n");
- printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
- ROW_FACTOR, COL_FACTOR);
- printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
- printf("\n");
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+ HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
+ ROW_FACTOR, COL_FACTOR);
+ HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
+ HDprintf("\n");
}
@@ -4805,7 +4684,7 @@ usage(void)
static int
parse_options(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -4816,107 +4695,107 @@ parse_options(int argc, char **argv)
chunkdim1 = (dim1+9)/10;
while (--argc){
- if (**(++argv) != '-'){
- break;
- }else{
- switch(*(*argv+1)){
- case 'm': ndatasets = atoi((*argv+1)+1);
- if (ndatasets < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'n': ngroups = atoi((*argv+1)+1);
- if (ngroups < 0){
+ if (**(++argv) != '-'){
+ break;
+ }else{
+ switch(*(*argv+1)){
+ case 'm': ndatasets = atoi((*argv+1)+1);
+ if (ndatasets < 0){
+ nerrors++;
+ return(1);
+ }
+ break;
+ case 'n': ngroups = atoi((*argv+1)+1);
+ if (ngroups < 0){
nerrors++;
return(1);
- }
+ }
break;
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
- break;
- case 'i': /* Collective MPI-IO access with independent IO */
- dxfer_coll_type = DXFER_INDEPENDENT_IO;
- break;
- case '2': /* Use the split-file driver with MPIO access */
- /* Can use $HDF5_METAPREFIX to define the */
- /* meta-file-prefix. */
- facc_type = FACC_MPIO | FACC_SPLIT;
- break;
- case 'd': /* dimensizes */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- dim0 = atoi(*(++argv))*mpi_size;
- argc--;
- dim1 = atoi(*(++argv))*mpi_size;
- /* set default chunkdim sizes too */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
- break;
- case 'c': /* chunk dimensions */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- chunkdim0 = atoi(*(++argv));
- argc--;
- chunkdim1 = atoi(*(++argv));
- break;
- case 'h': /* print help message--return with nerrors set */
- return(1);
- default: printf("Illegal option(%s)\n", *argv);
- nerrors++;
- return(1);
- }
- }
+ case 'f': if (--argc < 1) {
+ nerrors++;
+ return(1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return(1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2){
+ nerrors++;
+ return(1);
+ }
+ dim0 = atoi(*(++argv))*mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv))*mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0+9)/10;
+ chunkdim1 = (dim1+9)/10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2){
+ nerrors++;
+ return(1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return(1);
+ default: HDprintf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return(1);
+ }
+ }
} /*while*/
/* check validity of dimension and chunk sizes */
if (dim0 <= 0 || dim1 <= 0){
- printf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
- nerrors++;
- return(1);
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return(1);
}
if (chunkdim0 <= 0 || chunkdim1 <= 0){
- printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
- nerrors++;
- return(1);
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return(1);
}
/* Make sure datasets can be divided into equal portions by the processes */
if ((dim0 % mpi_size) || (dim1 % mpi_size)){
- if (MAINPROCESS)
- printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
- dim0, dim1, mpi_size);
- nerrors++;
- return(1);
+ if (MAINPROCESS)
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
+ dim0, dim1, mpi_size);
+ nerrors++;
+ return(1);
}
/* compose the test filenames */
{
- int i, n;
-
- n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
-
- for (i=0; i < n; i++)
- if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
- == NULL){
- printf("h5_fixname failed\n");
- nerrors++;
- return(1);
- }
- printf("Test filenames are:\n");
- for (i=0; i < n; i++)
- printf(" %s\n", filenames[i]);
+ int i, n;
+
+ n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i=0; i < n; i++)
+ if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
+ == NULL){
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return(1);
+ }
+ HDprintf("Test filenames are:\n");
+ for (i=0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
}
return(0);
@@ -4931,7 +4810,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
hid_t ret_pl = -1;
herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -4940,36 +4819,36 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
+ return (ret_pl);
if (l_facc_type == FACC_MPIO){
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
- VRFY((ret >= 0), "");
+ VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
- VRFY((ret >= 0), "");
- return(ret_pl);
+ VRFY((ret >= 0), "");
+ return(ret_pl);
}
if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return(ret_pl);
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return(ret_pl);
}
/* unknown file access types */
@@ -5037,7 +4916,7 @@ sschecker4(void)
int main(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
#ifndef H5_HAVE_WIN32_API
/* Un-buffer the stdout and stderr */
@@ -5053,10 +4932,10 @@ int main(int argc, char **argv)
dim1 = COL_FACTOR*mpi_size;
if (MAINPROCESS){
- printf("===================================\n");
- printf("Shape Same Tests Start\n");
- printf(" express_test = %d.\n", GetTestExpress());
- printf("===================================\n");
+ HDprintf("===================================\n");
+ HDprintf("Shape Same Tests Start\n");
+ HDprintf(" express_test = %d.\n", GetTestExpress());
+ HDprintf("===================================\n");
}
/* Attempt to turn off atexit post processing so that in case errors
@@ -5065,7 +4944,7 @@ int main(int argc, char **argv)
* calls. By then, MPI calls may not work.
*/
if (H5dont_atexit() < 0){
- printf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
+ HDprintf("%d: Failed to turn off atexit processing. Continue.\n", mpi_rank);
};
H5open();
h5_show_hostname();
@@ -5074,26 +4953,24 @@ int main(int argc, char **argv)
TestInit(argv[0], usage, parse_options);
/* Shape Same tests using contigous hyperslab */
-#if 1
AddTest("sscontig1", sscontig1, NULL,
- "Cntg hslab, ind IO, cntg dsets", PARATESTFILE);
+ "Cntg hslab, ind IO, cntg dsets", PARATESTFILE);
AddTest("sscontig2", sscontig2, NULL,
- "Cntg hslab, col IO, cntg dsets", PARATESTFILE);
+ "Cntg hslab, col IO, cntg dsets", PARATESTFILE);
AddTest("sscontig3", sscontig3, NULL,
- "Cntg hslab, ind IO, chnk dsets", PARATESTFILE);
+ "Cntg hslab, ind IO, chnk dsets", PARATESTFILE);
AddTest("sscontig4", sscontig4, NULL,
- "Cntg hslab, col IO, chnk dsets", PARATESTFILE);
-#endif
+ "Cntg hslab, col IO, chnk dsets", PARATESTFILE);
/* Shape Same tests using checker board hyperslab */
AddTest("sschecker1", sschecker1, NULL,
- "Check hslab, ind IO, cntg dsets", PARATESTFILE);
+ "Check hslab, ind IO, cntg dsets", PARATESTFILE);
AddTest("sschecker2", sschecker2, NULL,
- "Check hslab, col IO, cntg dsets", PARATESTFILE);
+ "Check hslab, col IO, cntg dsets", PARATESTFILE);
AddTest("sschecker3", sschecker3, NULL,
- "Check hslab, ind IO, chnk dsets", PARATESTFILE);
+ "Check hslab, ind IO, chnk dsets", PARATESTFILE);
AddTest("sschecker4", sschecker4, NULL,
- "Check hslab, col IO, chnk dsets", PARATESTFILE);
+ "Check hslab, col IO, chnk dsets", PARATESTFILE);
/* Display testing information */
TestInfo(argv[0]);
@@ -5106,9 +4983,9 @@ int main(int argc, char **argv)
TestParseCmdLine(argc, argv);
if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){
- printf("===================================\n"
- " Using Independent I/O with file set view to replace collective I/O \n"
- "===================================\n");
+ HDprintf("===================================\n"
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
}
@@ -5133,16 +5010,16 @@ int main(int argc, char **argv)
{
int temp;
MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
- nerrors=temp;
+ nerrors=temp;
}
- if (MAINPROCESS){ /* only process 0 reports */
- printf("===================================\n");
- if (nerrors)
- printf("***Shape Same tests detected %d errors***\n", nerrors);
- else
- printf("Shape Same tests finished with no errors\n");
- printf("===================================\n");
+ if (MAINPROCESS){ /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrors)
+ HDprintf("***Shape Same tests detected %d errors***\n", nerrors);
+ else
+ HDprintf("Shape Same tests finished with no errors\n");
+ HDprintf("===================================\n");
}
/* close HDF5 library */
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index a42df95..8d2b61c 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -23,38 +23,35 @@
2) We will read two datasets with the same hyperslab selection settings,
1. independent read to read independent output,
independent read to read collecive output,
- Compare the result,
- If the result is the same, then collective write succeeds.
+ Compare the result,
+ If the result is the same, then collective write succeeds.
2. collective read to read independent output,
independent read to read independent output,
- Compare the result,
- If the result is the same, then collective read succeeds.
+ Compare the result,
+ If the result is the same, then collective read succeeds.
*/
-#include "hdf5.h"
#include "H5private.h"
#include "testphdf5.h"
static void coll_write_test(int chunk_factor);
-static void coll_read_test(int chunk_factor);
+static void coll_read_test(void);
/*-------------------------------------------------------------------------
- * Function: coll_irregular_cont_write
+ * Function: coll_irregular_cont_write
*
- * Purpose: Wrapper to test the collectively irregular hyperslab write in
- contiguous storage
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in
+ * contiguous storage
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
- *
- * Modifications:
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
*-------------------------------------------------------------------------
*/
@@ -69,19 +66,17 @@ coll_irregular_cont_write(void)
/*-------------------------------------------------------------------------
- * Function: coll_irregular_cont_read
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab read in
- contiguous storage
+ * Function: coll_irregular_cont_read
*
- * Return: Success: 0
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in
+ * contiguous storage
*
- * Failure: -1
+ * Return: Success: 0
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Failure: -1
*
- * Modifications:
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
*-------------------------------------------------------------------------
*/
@@ -89,25 +84,23 @@ void
coll_irregular_cont_read(void)
{
- coll_read_test(0);
+ coll_read_test();
}
/*-------------------------------------------------------------------------
- * Function: coll_irregular_simple_chunk_write
+ * Function: coll_irregular_simple_chunk_write
*
- * Purpose: Wrapper to test the collectively irregular hyperslab write in
- chunk storage(1 chunk)
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in
+ * chunk storage(1 chunk)
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
- *
- * Modifications:
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
*-------------------------------------------------------------------------
*/
@@ -122,19 +115,17 @@ coll_irregular_simple_chunk_write(void)
/*-------------------------------------------------------------------------
- * Function: coll_irregular_simple_chunk_read
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
- storage(1 chunk)
+ * Function: coll_irregular_simple_chunk_read
*
- * Return: Success: 0
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
+ * storage(1 chunk)
*
- * Failure: -1
+ * Return: Success: 0
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Failure: -1
*
- * Modifications:
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
*-------------------------------------------------------------------------
*/
@@ -142,24 +133,22 @@ void
coll_irregular_simple_chunk_read(void)
{
- coll_read_test(1);
+ coll_read_test();
}
/*-------------------------------------------------------------------------
- * Function: coll_irregular_complex_chunk_write
+ * Function: coll_irregular_complex_chunk_write
*
- * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk
- storage(4 chunks)
+ * Purpose: Wrapper to test the collectively irregular hyperslab write in chunk
+ * storage(4 chunks)
*
- * Return: Success: 0
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
- *
- * Modifications:
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
*-------------------------------------------------------------------------
*/
@@ -174,19 +163,17 @@ coll_irregular_complex_chunk_write(void)
/*-------------------------------------------------------------------------
- * Function: coll_irregular_complex_chunk_read
- *
- * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
- storage(1 chunk)
+ * Function: coll_irregular_complex_chunk_read
*
- * Return: Success: 0
+ * Purpose: Wrapper to test the collectively irregular hyperslab read in chunk
+ * storage(1 chunk)
*
- * Failure: -1
+ * Return: Success: 0
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Failure: -1
*
- * Modifications:
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
*-------------------------------------------------------------------------
*/
@@ -194,26 +181,24 @@ void
coll_irregular_complex_chunk_read(void)
{
- coll_read_test(4);
+ coll_read_test();
}
/*-------------------------------------------------------------------------
- * Function: coll_write_test
+ * Function: coll_write_test
*
- * Purpose: To test the collectively irregular hyperslab write in chunk
- storage
+ * Purpose: To test the collectively irregular hyperslab write in chunk
+ * storage
* Input: number of chunks on each dimension
- if number is equal to 0, contiguous storage
- * Return: Success: 0
+ * if number is equal to 0, contiguous storage
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
- *
- * Modifications: Oct 18th, 2005
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
*-------------------------------------------------------------------------
*/
@@ -225,19 +210,11 @@ void coll_write_test(int chunk_factor)
hid_t file, datasetc,dataseti; /* File and dataset identifiers */
hid_t mspaceid1, mspaceid, fspaceid,fspaceid1; /* Dataspace identifiers */
- hsize_t mdim1[1],fsdim[2],mdim[2];
-
-#if 0
- hsize_t mdim1[] = {MSPACE1_DIM}; /* Dimension size of the first dataset
- (in memory) */
- hsize_t fsdim[] = {FSPACE_DIM1, FSPACE_DIM2}; /* Dimension sizes of the dataset
- (on disk) */
-
- hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
- dataset in memory when we
- read selection from the
- dataset on the disk */
-#endif
+ hsize_t mdim1[1]; /* Dimension size of the first dataset (in memory) */
+ hsize_t fsdim[2]; /* Dimension sizes of the dataset (on disk) */
+ hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
+ * read selection from the dataset on the disk
+ */
hsize_t start[2]; /* Start of hyperslab */
hsize_t stride[2]; /* Stride of hyperslab */
@@ -246,18 +223,12 @@ void coll_write_test(int chunk_factor)
hsize_t chunk_dims[2];
herr_t ret;
- unsigned i;
+ int i;
int fillvalue = 0; /* Fill value for the dataset */
-#if 0
- int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
- int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
- dataset */
- int vector[MSPACE1_DIM];
-#endif
-
-
- int *matrix_out, *matrix_out1, *vector;
+ int *matrix_out = NULL;
+ int *matrix_out1 = NULL; /* Buffer to read from the dataset */
+ int *vector = NULL;
int mpi_size,mpi_rank;
@@ -275,19 +246,19 @@ void coll_write_test(int chunk_factor)
* Buffers' initialization.
*/
- mdim1[0] = MSPACE1_DIM *mpi_size;
+ mdim1[0] = (hsize_t)(MSPACE1_DIM*mpi_size);
mdim[0] = MSPACE_DIM1;
- mdim[1] = MSPACE_DIM2*mpi_size;
+ mdim[1] = (hsize_t)(MSPACE_DIM2*mpi_size);
fsdim[0] = FSPACE_DIM1;
- fsdim[1] = FSPACE_DIM2*mpi_size;
+ fsdim[1] = (hsize_t)(FSPACE_DIM2*mpi_size);
- vector = (int*)HDmalloc(sizeof(int)*mdim1[0]*mpi_size);
- matrix_out = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size);
- matrix_out1 = (int*)HDmalloc(sizeof(int)*mdim[0]*mdim[1]*mpi_size);
+ vector = (int*)HDmalloc(sizeof(int)*(size_t)mdim1[0]*(size_t)mpi_size);
+ matrix_out = (int*)HDmalloc(sizeof(int)*(size_t)mdim[0]*(size_t)mdim[1]*(size_t)mpi_size);
+ matrix_out1 = (int*)HDmalloc(sizeof(int)*(size_t)mdim[0]*(size_t)mdim[1]*(size_t)mpi_size);
- HDmemset(vector,0,sizeof(int)*mdim1[0]*mpi_size);
+ HDmemset(vector,0,sizeof(int)*(size_t)mdim1[0]*(size_t)mpi_size);
vector[0] = vector[MSPACE1_DIM*mpi_size - 1] = -1;
- for (i = 1; i < MSPACE1_DIM*mpi_size - 1; i++) vector[i] = i;
+ for (i = 1; i < MSPACE1_DIM*mpi_size - 1; i++) H5_CHECKED_ASSIGN(vector[i], int, i, unsigned);
/* Grab file access property list */
facc_plist = create_faccess_plist(comm, info, facc_type);
@@ -309,8 +280,8 @@ void coll_write_test(int chunk_factor)
VRFY((ret >= 0),"Fill value creation property list succeeded");
if(chunk_factor != 0) {
- chunk_dims[0] = fsdim[0] / chunk_factor;
- chunk_dims[1] = fsdim[1] / chunk_factor;
+ chunk_dims[0] = fsdim[0] / (hsize_t)chunk_factor;
+ chunk_dims[1] = fsdim[1] / (hsize_t)chunk_factor;
ret = H5Pset_chunk(dcrt_plist, 2, chunk_dims);
VRFY((ret >= 0),"chunk creation property list succeeded");
}
@@ -346,7 +317,7 @@ void coll_write_test(int chunk_factor)
*/
start[0] = FHSTART0;
- start[1] = FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1;
+ start[1] = (hsize_t)(FHSTART1 + mpi_rank * FHSTRIDE1 * FHCOUNT1);
stride[0] = FHSTRIDE0;
stride[1] = FHSTRIDE1;
count[0] = FHCOUNT0;
@@ -367,7 +338,7 @@ void coll_write_test(int chunk_factor)
*/
start[0] = SHSTART0;
- start[1] = SHSTART1+SHCOUNT1*SHBLOCK1*mpi_rank;
+ start[1] = (hsize_t)(SHSTART1+SHCOUNT1*SHBLOCK1*mpi_rank);
stride[0] = SHSTRIDE0;
stride[1] = SHSTRIDE1;
count[0] = SHCOUNT0;
@@ -505,7 +476,7 @@ void coll_write_test(int chunk_factor)
*
*/
start[0] = RFFHSTART0;
- start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1;
+ start[1] = (hsize_t)(RFFHSTART1+mpi_rank*RFFHCOUNT1);
block[0] = RFFHBLOCK0;
block[1] = RFFHBLOCK1;
stride[0] = RFFHSTRIDE0;
@@ -532,7 +503,7 @@ void coll_write_test(int chunk_factor)
*/
start[0] = RFSHSTART0;
- start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank;
+ start[1] = (hsize_t)(RFSHSTART1+RFSHCOUNT1*mpi_rank);
block[0] = RFSHBLOCK0;
block[1] = RFSHBLOCK1;
stride[0] = RFSHSTRIDE0;
@@ -571,7 +542,7 @@ void coll_write_test(int chunk_factor)
start[0] = RMFHSTART0;
- start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1;
+ start[1] = (hsize_t)(RMFHSTART1+mpi_rank*RMFHCOUNT1);
block[0] = RMFHBLOCK0;
block[1] = RMFHBLOCK1;
stride[0] = RMFHSTRIDE0;
@@ -594,7 +565,7 @@ void coll_write_test(int chunk_factor)
*
*/
start[0] = RMSHSTART0;
- start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1;
+ start[1] = (hsize_t)(RMSHSTART1+mpi_rank*RMSHCOUNT1);
block[0] = RMSHBLOCK0;
block[1] = RMSHBLOCK1;
stride[0] = RMSHSTRIDE0;
@@ -609,19 +580,19 @@ void coll_write_test(int chunk_factor)
* Initialize data buffer.
*/
- HDmemset(matrix_out,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
- HDmemset(matrix_out1,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
+ HDmemset(matrix_out,0,sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
+ HDmemset(matrix_out1,0,sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
/*
* Read data back to the buffer matrix_out.
*/
ret = H5Dread(datasetc, H5T_NATIVE_INT, mspaceid, fspaceid,
- H5P_DEFAULT, matrix_out);
+ H5P_DEFAULT, matrix_out);
VRFY((ret >= 0),"H5D independent read succeed");
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid,
- H5P_DEFAULT, matrix_out1);
+ H5P_DEFAULT, matrix_out1);
VRFY((ret >= 0),"H5D independent read succeed");
ret = 0;
@@ -664,30 +635,34 @@ void coll_write_test(int chunk_factor)
ret = H5Fclose(file);
VRFY((ret >= 0),"");
+ if (vector)
+ HDfree(vector);
+ if (matrix_out)
+ HDfree(matrix_out);
+ if (matrix_out1)
+ HDfree(matrix_out1);
+
return ;
}
/*-------------------------------------------------------------------------
- * Function: coll_read_test
+ * Function: coll_read_test
*
- * Purpose: To test the collectively irregular hyperslab read in chunk
- storage
+ * Purpose: To test the collectively irregular hyperslab read in chunk
+ * storage
* Input: number of chunks on each dimension
- if number is equal to 0, contiguous storage
- * Return: Success: 0
+ * if number is equal to 0, contiguous storage
+ * Return: Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Unknown
- * Dec 2nd, 2004
+ * Programmer: Unknown
+ * Dec 2nd, 2004
*
- * Modifications: Oct 18th, 2005
- * Note: This test must be used with the correpsonding
- coll_write_test.
*-------------------------------------------------------------------------
*/
static void
-coll_read_test(int chunk_factor)
+coll_read_test(void)
{
const char *filename;
@@ -697,30 +672,21 @@ coll_read_test(int chunk_factor)
/* Dimension sizes of the dataset (on disk) */
-#if 0
- hsize_t mdim[] = {MSPACE_DIM1, MSPACE_DIM2}; /* Dimension sizes of the
- dataset in memory when we
- read selection from the
- dataset on the disk */
+ hsize_t mdim[2]; /* Dimension sizes of the dataset in memory when we
+ * read selection from the dataset on the disk
+ */
-#endif
- hsize_t mdim[2];
hsize_t start[2]; /* Start of hyperslab */
hsize_t stride[2]; /* Stride of hyperslab */
hsize_t count[2]; /* Block count */
hsize_t block[2]; /* Block sizes */
herr_t ret;
- unsigned i;
+ int i;
int *matrix_out;
- int *matrix_out1;
-#if 0
- int matrix_out[MSPACE_DIM1][MSPACE_DIM2];
- int matrix_out1[MSPACE_DIM1][MSPACE_DIM2]; /* Buffer to read from the
- dataset */
+ int *matrix_out1; /* Buffer to read from the dataset */
-#endif
int mpi_size,mpi_rank;
MPI_Comm comm = MPI_COMM_WORLD;
@@ -738,9 +704,9 @@ coll_read_test(int chunk_factor)
/* Initialize the buffer */
mdim[0] = MSPACE_DIM1;
- mdim[1] = MSPACE_DIM2*mpi_size;
- matrix_out =(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
- matrix_out1=(int*)HDmalloc(sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
+ mdim[1] = (hsize_t)(MSPACE_DIM2*mpi_size);
+ matrix_out =(int*)HDmalloc(sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
+ matrix_out1=(int*)HDmalloc(sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
/*** For testing collective hyperslab selection read ***/
@@ -775,7 +741,7 @@ coll_read_test(int chunk_factor)
*
*/
start[0] = RFFHSTART0;
- start[1] = RFFHSTART1+mpi_rank*RFFHCOUNT1;
+ start[1] = (hsize_t)(RFFHSTART1+mpi_rank*RFFHCOUNT1);
block[0] = RFFHBLOCK0;
block[1] = RFFHBLOCK1;
stride[0] = RFFHSTRIDE0;
@@ -795,7 +761,7 @@ coll_read_test(int chunk_factor)
*
*/
start[0] = RFSHSTART0;
- start[1] = RFSHSTART1+RFSHCOUNT1*mpi_rank;
+ start[1] = (hsize_t)(RFSHSTART1+RFSHCOUNT1*mpi_rank);
block[0] = RFSHBLOCK0;
block[1] = RFSHBLOCK1;
stride[0] = RFSHSTRIDE0;
@@ -825,7 +791,7 @@ coll_read_test(int chunk_factor)
*/
start[0] = RMFHSTART0;
- start[1] = RMFHSTART1+mpi_rank*RMFHCOUNT1;
+ start[1] = (hsize_t)(RMFHSTART1+mpi_rank*RMFHCOUNT1);
block[0] = RMFHBLOCK0;
block[1] = RMFHBLOCK1;
stride[0] = RMFHSTRIDE0;
@@ -847,7 +813,7 @@ coll_read_test(int chunk_factor)
*
*/
start[0] = RMSHSTART0;
- start[1] = RMSHSTART1+mpi_rank*RMSHCOUNT1;
+ start[1] = (hsize_t)(RMSHSTART1+mpi_rank*RMSHCOUNT1);
block[0] = RMSHBLOCK0;
block[1] = RMSHBLOCK1;
stride[0] = RMSHSTRIDE0;
@@ -862,8 +828,8 @@ coll_read_test(int chunk_factor)
* Initialize data buffer.
*/
- HDmemset(matrix_out,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
- HDmemset(matrix_out1,0,sizeof(int)*MSPACE_DIM1*MSPACE_DIM2*mpi_size);
+ HDmemset(matrix_out,0,sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
+ HDmemset(matrix_out1,0,sizeof(int)*(size_t)MSPACE_DIM1*(size_t)MSPACE_DIM2*(size_t)mpi_size);
/*
* Read data back to the buffer matrix_out.
@@ -882,7 +848,7 @@ coll_read_test(int chunk_factor)
/* Collective read */
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
- dxfer_plist, matrix_out);
+ dxfer_plist, matrix_out);
VRFY((ret >= 0),"H5D collecive read succeed");
ret = H5Pclose(dxfer_plist);
@@ -890,7 +856,7 @@ coll_read_test(int chunk_factor)
/* Independent read */
ret = H5Dread(dataseti, H5T_NATIVE_INT, mspaceid, fspaceid1,
- H5P_DEFAULT, matrix_out1);
+ H5P_DEFAULT, matrix_out1);
VRFY((ret >= 0),"H5D independent read succeed");
ret = 0;
@@ -901,6 +867,12 @@ coll_read_test(int chunk_factor)
VRFY((ret >= 0),"H5D contiguous irregular collective read succeed");
/*
+ * Free read buffers.
+ */
+ HDfree(matrix_out);
+ HDfree(matrix_out1);
+
+ /*
* Close memory file and memory dataspaces.
*/
ret = H5Sclose(mspaceid);
@@ -927,37 +899,39 @@ coll_read_test(int chunk_factor)
ret = H5Fclose(file);
VRFY((ret >= 0),"");
- return ;
+ return;
}
/****************************************************************
**
-** lower_dim_size_comp_test__select_checker_board():
+** lower_dim_size_comp_test__select_checker_board():
**
-** Given a data space of tgt_rank, and dimensions:
+** Given a dataspace of tgt_rank, and dimensions:
**
-** (mpi_size + 1), edge_size, ... , edge_size
+** (mpi_size + 1), edge_size, ... , edge_size
**
-** edge_size, and a checker_edge_size, select a checker
-** board selection of a sel_rank (sel_rank < tgt_rank)
-** dimensional slice through the data space parallel to the
+** edge_size, and a checker_edge_size, select a checker
+** board selection of a sel_rank (sel_rank < tgt_rank)
+** dimensional slice through the dataspace parallel to the
** sel_rank fastest changing indicies, with origin (in the
-** higher indicies) as indicated by the start array.
+** higher indicies) as indicated by the start array.
**
-** Note that this function, is hard coded to presume a
-** maximum data space rank of 5.
+** Note that this function, is hard coded to presume a
+** maximum dataspace rank of 5.
**
-** While this maximum is declared as a constant, increasing
-** it will require extensive coding in addition to changing
+** While this maximum is declared as a constant, increasing
+** it will require extensive coding in addition to changing
** the value of the constant.
**
-** JRM -- 11/11/09
+** JRM -- 11/11/09
**
****************************************************************/
-#define LDSCT_DS_RANK 5
+#define LDSCT_DS_RANK 5
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
#define LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK 0
+#endif
#define LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG 0
@@ -971,32 +945,32 @@ lower_dim_size_comp_test__select_checker_board(
const int sel_rank,
hsize_t sel_start[])
{
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- const char * fcnName =
- "lower_dim_size_comp_test__select_checker_board():";
-#endif
- hbool_t first_selection = TRUE;
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ const char * fcnName =
+ "lower_dim_size_comp_test__select_checker_board():";
+#endif
+ hbool_t first_selection = TRUE;
int i, j, k, l, m;
- int ds_offset;
- int sel_offset;
- const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
+ int ds_offset;
+ int sel_offset;
+ const int test_max_rank = LDSCT_DS_RANK; /* must update code if */
/* this changes */
- hsize_t base_count;
+ hsize_t base_count;
hsize_t offset_count;
- hsize_t start[LDSCT_DS_RANK];
- hsize_t stride[LDSCT_DS_RANK];
- hsize_t count[LDSCT_DS_RANK];
- hsize_t block[LDSCT_DS_RANK];
- herr_t ret; /* Generic return value */
+ hsize_t start[LDSCT_DS_RANK];
+ hsize_t stride[LDSCT_DS_RANK];
+ hsize_t count[LDSCT_DS_RANK];
+ hsize_t block[LDSCT_DS_RANK];
+ herr_t ret; /* Generic return value */
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: dims/checker_edge_size = %d %d %d %d %d / %d\n",
fcnName, mpi_rank, (int)dims[0], (int)dims[1], (int)dims[2],
(int)dims[3], (int)dims[4], checker_edge_size);
}
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
HDassert( 0 < checker_edge_size );
HDassert( 0 < sel_rank );
@@ -1014,14 +988,14 @@ lower_dim_size_comp_test__select_checker_board(
HDassert( (hsize_t)checker_edge_size <= dims[sel_offset] );
HDassert( dims[sel_offset] == 10 );
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: sel_rank/sel_offset = %d/%d.\n",
fcnName, mpi_rank, sel_rank, sel_offset);
- HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: tgt_rank/ds_offset = %d/%d.\n",
fcnName, mpi_rank, tgt_rank, ds_offset);
}
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* First, compute the base count (which assumes start == 0
* for the associated offset) and offset_count (which
@@ -1036,32 +1010,32 @@ lower_dim_size_comp_test__select_checker_board(
* pre-C99 compilers again.
*/
- base_count = dims[sel_offset] / (checker_edge_size * 2);
+ base_count = dims[sel_offset] / (hsize_t)(checker_edge_size * 2);
- if ( (dims[sel_rank] % (checker_edge_size * 2)) > 0 ) {
+ if ( (dims[sel_rank] % (hsize_t)(checker_edge_size * 2)) > 0 ) {
base_count++;
}
- offset_count =
- (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) /
+ offset_count =
+ (hsize_t)((dims[sel_offset] - (hsize_t)checker_edge_size) /
((hsize_t)(checker_edge_size * 2)));
- if ( ((dims[sel_rank] - (hsize_t)checker_edge_size) %
+ if ( ((dims[sel_rank] - (hsize_t)checker_edge_size) %
((hsize_t)(checker_edge_size * 2))) > 0 ) {
offset_count++;
}
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n",
+ HDfprintf(stdout, "%s:%d: base_count/offset_count = %d/%d.\n",
fcnName, mpi_rank, base_count, offset_count);
}
-#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
+#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
/* Now set up the stride and block arrays, and portions of the start
- * and count arrays that will not be altered during the selection of
+ * and count arrays that will not be altered during the selection of
* the checker board.
*/
i = 0;
@@ -1093,7 +1067,7 @@ lower_dim_size_comp_test__select_checker_board(
i++;
}
-
+
i = 0;
do {
if ( 0 >= sel_offset ) {
@@ -1112,7 +1086,7 @@ lower_dim_size_comp_test__select_checker_board(
}
j = 0;
- do {
+ do {
if ( 1 >= sel_offset ) {
if ( j == 0 ) {
@@ -1181,78 +1155,78 @@ lower_dim_size_comp_test__select_checker_board(
if ( ((i + j + k + l + m) % 2) == 0 ) {
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
- if ( mpi_rank ==
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+ if ( mpi_rank ==
LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s%d: *** first_selection = %d ***\n",
+ HDfprintf(stdout,
+ "%s%d: *** first_selection = %d ***\n",
fcnName, mpi_rank, (int)first_selection);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: i/j/k/l/m = %d/%d/%d/%d/%d\n",
fcnName, mpi_rank, i, j, k, l, m);
- HDfprintf(stdout,
- "%s:%d: start = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)start[0], (int)start[1],
- (int)start[2], (int)start[3],
+ HDfprintf(stdout,
+ "%s:%d: start = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)start[0], (int)start[1],
+ (int)start[2], (int)start[3],
(int)start[4]);
- HDfprintf(stdout,
- "%s:%d: stride = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)stride[0], (int)stride[1],
- (int)stride[2], (int)stride[3],
+ HDfprintf(stdout,
+ "%s:%d: stride = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)stride[0], (int)stride[1],
+ (int)stride[2], (int)stride[3],
(int)stride[4]);
- HDfprintf(stdout,
- "%s:%d: count = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)count[0], (int)count[1],
- (int)count[2], (int)count[3],
+ HDfprintf(stdout,
+ "%s:%d: count = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)count[0], (int)count[1],
+ (int)count[2], (int)count[3],
(int)count[4]);
- HDfprintf(stdout,
- "%s:%d: block = %d %d %d %d %d.\n",
- fcnName, mpi_rank,
- (int)block[0], (int)block[1],
- (int)block[2], (int)block[3],
+ HDfprintf(stdout,
+ "%s:%d: block = %d %d %d %d %d.\n",
+ fcnName, mpi_rank,
+ (int)block[0], (int)block[1],
+ (int)block[2], (int)block[3],
(int)block[4]);
- HDfprintf(stdout,
- "%s:%d: n-cube extent dims = %d.\n",
+ HDfprintf(stdout,
+ "%s:%d: n-cube extent dims = %d.\n",
fcnName, mpi_rank,
H5Sget_simple_extent_ndims(tgt_sid));
- HDfprintf(stdout,
- "%s:%d: selection rank = %d.\n",
+ HDfprintf(stdout,
+ "%s:%d: selection rank = %d.\n",
fcnName, mpi_rank, sel_rank);
}
#endif
if ( first_selection ) {
- first_selection = FALSE;
+ first_selection = FALSE;
ret = H5Sselect_hyperslab
(
- tgt_sid,
+ tgt_sid,
H5S_SELECT_SET,
- &(start[ds_offset]),
- &(stride[ds_offset]),
- &(count[ds_offset]),
+ &(start[ds_offset]),
+ &(stride[ds_offset]),
+ &(count[ds_offset]),
&(block[ds_offset])
);
-
+
VRFY((ret != FAIL), "H5Sselect_hyperslab(SET) succeeded");
} else {
ret = H5Sselect_hyperslab
(
- tgt_sid,
+ tgt_sid,
H5S_SELECT_OR,
- &(start[ds_offset]),
- &(stride[ds_offset]),
- &(count[ds_offset]),
+ &(start[ds_offset]),
+ &(stride[ds_offset]),
+ &(count[ds_offset]),
&(block[ds_offset])
);
-
+
VRFY((ret != FAIL), "H5Sselect_hyperslab(OR) succeeded");
}
@@ -1284,14 +1258,14 @@ lower_dim_size_comp_test__select_checker_board(
} while ( ( i <= 1 ) &&
( 0 >= sel_offset ) );
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG */
- /* Clip the selection back to the data space proper. */
+ /* Clip the selection back to the dataspace proper. */
for ( i = 0; i < test_max_rank; i++ ) {
@@ -1306,7 +1280,7 @@ lower_dim_size_comp_test__select_checker_board(
VRFY((ret != FAIL), "H5Sselect_hyperslab(AND) succeeded");
-#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__SELECT_CHECKER_BOARD__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s%d: H5Sget_select_npoints(tgt_sid) = %d.\n",
fcnName, mpi_rank, (int)H5Sget_select_npoints(tgt_sid));
@@ -1321,57 +1295,57 @@ lower_dim_size_comp_test__select_checker_board(
/****************************************************************
**
-** lower_dim_size_comp_test__verify_data():
+** lower_dim_size_comp_test__verify_data():
**
-** Examine the supplied buffer to see if it contains the
-** expected data. Return TRUE if it does, and FALSE
+** Examine the supplied buffer to see if it contains the
+** expected data. Return TRUE if it does, and FALSE
** otherwise.
**
-** The supplied buffer is presumed to this process's slice
-** of the target data set. Each such slice will be an
-** n-cube of rank (rank -1) and the supplied edge_size with
-** origin (mpi_rank, 0, ... , 0) in the target data set.
+** The supplied buffer is presumed to this process's slice
+** of the target data set. Each such slice will be an
+** n-cube of rank (rank -1) and the supplied edge_size with
+** origin (mpi_rank, 0, ... , 0) in the target data set.
**
-** Further, the buffer is presumed to be the result of reading
-** or writing a checker board selection of an m (1 <= m <
+** Further, the buffer is presumed to be the result of reading
+** or writing a checker board selection of an m (1 <= m <
** rank) dimensional slice through this processes slice
-** of the target data set. Also, this slice must be parallel
-** to the fastest changing indicies.
+** of the target data set. Also, this slice must be parallel
+** to the fastest changing indicies.
**
-** It is further presumed that the buffer was zeroed before
-** the read/write, and that the full target data set (i.e.
-** the buffer/data set for all processes) was initialized
-** with the natural numbers listed in order from the origin
-** along the fastest changing axis.
+** It is further presumed that the buffer was zeroed before
+** the read/write, and that the full target data set (i.e.
+** the buffer/data set for all processes) was initialized
+** with the natural numbers listed in order from the origin
+** along the fastest changing axis.
**
** Thus for a 20x10x10 dataset, the value stored in location
-** (x, y, z) (assuming that z is the fastest changing index
-** and x the slowest) is assumed to be:
+** (x, y, z) (assuming that z is the fastest changing index
+** and x the slowest) is assumed to be:
**
-** (10 * 10 * x) + (10 * y) + z
+** (10 * 10 * x) + (10 * y) + z
**
-** Further, supposing that this is process 10, this process's
-** slice of the dataset would be a 10 x 10 2-cube with origin
-** (10, 0, 0) in the data set, and would be initialize (prior
-** to the checkerboard selection) as follows:
+** Further, supposing that this is process 10, this process's
+** slice of the dataset would be a 10 x 10 2-cube with origin
+** (10, 0, 0) in the data set, and would be initialize (prior
+** to the checkerboard selection) as follows:
**
-** 1000, 1001, 1002, ... 1008, 1009
-** 1010, 1011, 1012, ... 1018, 1019
-** . . . . .
-** . . . . .
-** . . . . .
-** 1090, 1091, 1092, ... 1098, 1099
+** 1000, 1001, 1002, ... 1008, 1009
+** 1010, 1011, 1012, ... 1018, 1019
+** . . . . .
+** . . . . .
+** . . . . .
+** 1090, 1091, 1092, ... 1098, 1099
**
-** In the case of a read from the processors slice of another
-** data set of different rank, the values expected will have
-** to be adjusted accordingly. This is done via the
-** first_expected_val parameter.
+** In the case of a read from the processors slice of another
+** data set of different rank, the values expected will have
+** to be adjusted accordingly. This is done via the
+** first_expected_val parameter.
**
-** Finally, the function presumes that the first element
-** of the buffer resides either at the origin of either
-** a selected or an unselected checker. (Translation:
-** if partial checkers appear in the buffer, they will
-** intersect the edges of the n-cube oposite the origin.)
+** Finally, the function presumes that the first element
+** of the buffer resides either at the origin of either
+** a selected or an unselected checker. (Translation:
+** if partial checkers appear in the buffer, they will
+** intersect the edges of the n-cube oposite the origin.)
**
****************************************************************/
@@ -1379,7 +1353,7 @@ lower_dim_size_comp_test__select_checker_board(
static hbool_t
lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
const int mpi_rank,
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
const int rank,
@@ -1389,8 +1363,8 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
hbool_t buf_starts_in_checker)
{
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- const char * fcnName =
- "lower_dim_size_comp_test__verify_data():";
+ const char * fcnName =
+ "lower_dim_size_comp_test__verify_data():";
#endif
hbool_t good_data = TRUE;
hbool_t in_checker;
@@ -1409,16 +1383,16 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
HDassert( checker_edge_size <= edge_size );
HDassert( test_max_rank <= LDSCT_DS_RANK );
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s mpi_rank = %d.\n", fcnName, mpi_rank);
HDfprintf(stdout, "%s rank = %d.\n", fcnName, rank);
HDfprintf(stdout, "%s edge_size = %d.\n", fcnName, edge_size);
- HDfprintf(stdout, "%s checker_edge_size = %d.\n",
+ HDfprintf(stdout, "%s checker_edge_size = %d.\n",
fcnName, checker_edge_size);
- HDfprintf(stdout, "%s first_expected_val = %d.\n",
+ HDfprintf(stdout, "%s first_expected_val = %d.\n",
fcnName, (int)first_expected_val);
- HDfprintf(stdout, "%s starts_in_checker = %d.\n",
+ HDfprintf(stdout, "%s starts_in_checker = %d.\n",
fcnName, (int)buf_starts_in_checker);
}
#endif
@@ -1463,7 +1437,7 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
y = 0;
start_in_checker[3] = start_in_checker[2];
do
- {
+ {
if ( y >= checker_edge_size ) {
start_in_checker[3] = ! start_in_checker[3];
@@ -1472,8 +1446,8 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
m = 0;
z = 0;
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%d, %d, %d, %d, %d:", i, j, k, l, m);
}
@@ -1481,8 +1455,8 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
in_checker = start_in_checker[3];
do
{
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, " %d", (int)(*val_ptr));
}
@@ -1492,21 +1466,21 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
in_checker = ! in_checker;
z = 0;
}
-
+
if ( in_checker ) {
-
+
if ( *val_ptr != expected_value ) {
good_data = FALSE;
}
-
+
/* zero out buffer for re-use */
*val_ptr = 0;
} else if ( *val_ptr != 0 ) {
good_data = FALSE;
-
+
/* zero out buffer for re-use */
*val_ptr = 0;
@@ -1516,11 +1490,11 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
expected_value++;
m++;
z++;
-
+
} while ( ( rank >= (test_max_rank - 4) ) &&
( m < edge_size ) );
-#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
- if ( mpi_rank ==
+#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
+ if ( mpi_rank ==
LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "\n");
}
@@ -1548,22 +1522,20 @@ lower_dim_size_comp_test__verify_data(uint32_t * buf_ptr,
/*-------------------------------------------------------------------------
- * Function: lower_dim_size_comp_test__run_test()
- *
- * Purpose: Verify that a bug in the computation of the size of the
- * lower dimensions of a data space in H5S_obtain_datatype()
- * has been corrected.
+ * Function: lower_dim_size_comp_test__run_test()
*
- * Return: void
+ * Purpose: Verify that a bug in the computation of the size of the
+ * lower dimensions of a dataspace in H5S_obtain_datatype()
+ * has been corrected.
*
- * Programmer: JRM -- 11/11/09
+ * Return: void
*
- * Modifications:
+ * Programmer: JRM -- 11/11/09
*
*-------------------------------------------------------------------------
*/
-#define LDSCT_DS_RANK 5
+#define LDSCT_DS_RANK 5
#define LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG 0
static void
@@ -1571,21 +1543,21 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
const hbool_t use_collective_io,
const hid_t dset_type)
{
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
const char *fcnName = "lower_dim_size_comp_test__run_test()";
- int rank;
- hsize_t dims[32];
- hsize_t max_dims[32];
+ int rank;
+ hsize_t dims[32];
+ hsize_t max_dims[32];
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
const char *filename;
- hbool_t data_ok = FALSE;
- hbool_t mis_match = FALSE;
+ hbool_t data_ok = FALSE;
+ hbool_t mis_match = FALSE;
int i;
int start_index;
int stop_index;
- int mrc;
- int mpi_rank;
- int mpi_size;
+ int mrc;
+ int mpi_rank;
+ int mpi_size;
MPI_Comm mpi_comm = MPI_COMM_NULL;
MPI_Info mpi_info = MPI_INFO_NULL;
hid_t fid; /* HDF5 file ID */
@@ -1594,7 +1566,9 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
size_t small_ds_size;
size_t small_ds_slice_size;
size_t large_ds_size;
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
size_t large_ds_slice_size;
+#endif
uint32_t expected_value;
uint32_t * small_ds_buf_0 = NULL;
uint32_t * small_ds_buf_1 = NULL;
@@ -1635,7 +1609,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
mpi_comm = MPI_COMM_WORLD;
mpi_info = MPI_INFO_NULL;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: chunk_edge_size = %d.\n",
fcnName, mpi_rank, (int)chunk_edge_size);
@@ -1648,15 +1622,16 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
small_ds_size = (size_t)((mpi_size + 1) * 1 * 1 * 10 * 10);
small_ds_slice_size = (size_t) ( 1 * 1 * 10 * 10);
large_ds_size = (size_t)((mpi_size + 1) * 10 * 10 * 10 * 10);
+
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
large_ds_slice_size = (size_t) (10 * 10 * 10 * 10);
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: small ds size / slice size = %d / %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)small_ds_size, (int)small_ds_slice_size);
HDfprintf(stdout, "%s:%d: large ds size / slice size = %d / %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)large_ds_size, (int)large_ds_slice_size);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -1739,7 +1714,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_dims[3] = 10;
large_dims[4] = 10;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: small_dims[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)small_dims[0], (int)small_dims[1],
@@ -1748,41 +1723,41 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
fcnName, mpi_rank, (int)large_dims[0], (int)large_dims[1],
(int)large_dims[2], (int)large_dims[3], (int)large_dims[4]);
}
-#endif
+#endif
- /* create data spaces */
+ /* create dataspaces */
full_mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((full_mem_small_ds_sid != 0),
+ VRFY((full_mem_small_ds_sid != 0),
"H5Screate_simple() full_mem_small_ds_sid succeeded");
full_file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((full_file_small_ds_sid != 0),
+ VRFY((full_file_small_ds_sid != 0),
"H5Screate_simple() full_file_small_ds_sid succeeded");
mem_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((mem_small_ds_sid != 0),
+ VRFY((mem_small_ds_sid != 0),
"H5Screate_simple() mem_small_ds_sid succeeded");
file_small_ds_sid = H5Screate_simple(5, small_dims, NULL);
- VRFY((file_small_ds_sid != 0),
+ VRFY((file_small_ds_sid != 0),
"H5Screate_simple() file_small_ds_sid succeeded");
full_mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((full_mem_large_ds_sid != 0),
+ VRFY((full_mem_large_ds_sid != 0),
"H5Screate_simple() full_mem_large_ds_sid succeeded");
full_file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((full_file_large_ds_sid != 0),
+ VRFY((full_file_large_ds_sid != 0),
"H5Screate_simple() full_file_large_ds_sid succeeded");
mem_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((mem_large_ds_sid != 0),
+ VRFY((mem_large_ds_sid != 0),
"H5Screate_simple() mem_large_ds_sid succeeded");
file_large_ds_sid = H5Screate_simple(5, large_dims, NULL);
- VRFY((file_large_ds_sid != 0),
+ VRFY((file_large_ds_sid != 0),
"H5Screate_simple() file_large_ds_sid succeeded");
@@ -1812,14 +1787,14 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
small_chunk_dims[1] = small_chunk_dims[2] = (hsize_t)1;
small_chunk_dims[3] = small_chunk_dims[4] = (hsize_t)chunk_edge_size;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: small chunk dims[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)small_chunk_dims[0],
- (int)small_chunk_dims[1], (int)small_chunk_dims[2],
+ fcnName, mpi_rank, (int)small_chunk_dims[0],
+ (int)small_chunk_dims[1], (int)small_chunk_dims[2],
(int)small_chunk_dims[3], (int)small_chunk_dims[4]);
}
-#endif
+#endif
small_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((ret != FAIL), "H5Pcreate() small_ds_dcpl_id succeeded");
@@ -1831,18 +1806,18 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((ret != FAIL), "H5Pset_chunk() small_ds_dcpl_id succeeded");
large_chunk_dims[0] = (hsize_t)(1);
- large_chunk_dims[1] = large_chunk_dims[2] =
- large_chunk_dims[3] = large_chunk_dims[4] = (hsize_t)chunk_edge_size;
+ large_chunk_dims[1] = large_chunk_dims[2] =
+ large_chunk_dims[3] = large_chunk_dims[4] = (hsize_t)chunk_edge_size;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: large chunk dims[] = %d %d %d %d %d\n",
- fcnName, mpi_rank, (int)large_chunk_dims[0],
- (int)large_chunk_dims[1], (int)large_chunk_dims[2],
+ fcnName, mpi_rank, (int)large_chunk_dims[0],
+ (int)large_chunk_dims[1], (int)large_chunk_dims[2],
(int)large_chunk_dims[3], (int)large_chunk_dims[4]);
}
-#endif
+#endif
large_ds_dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
VRFY((ret != FAIL), "H5Pcreate() large_ds_dcpl_id succeeded");
@@ -1868,11 +1843,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_ds_dcpl_id, H5P_DEFAULT);
VRFY((ret >= 0), "H5Dcreate2() large_dataset succeeded");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: small/large ds id = %d / %d.\n",
- fcnName, mpi_rank, (int)small_dataset,
+ HDfprintf(stdout,
+ "%s:%d: small/large ds id = %d / %d.\n",
+ fcnName, mpi_rank, (int)small_dataset,
(int)large_dataset);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -1906,10 +1881,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block[0] = block[1] = block[2] = 1;
block[3] = block[4] = 10;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: settings for small data set initialization.\n",
+ HDfprintf(stdout,
+ "%s:%d: settings for small data set initialization.\n",
fcnName, mpi_rank);
HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)start[0], (int)start[1],
@@ -1947,10 +1922,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
start[0] = 0;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: added settings for main process.\n",
+ HDfprintf(stdout,
+ "%s:%d: added settings for main process.\n",
fcnName, mpi_rank);
HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)start[0], (int)start[1],
@@ -1992,23 +1967,23 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
/* write the initial value of the small data set to file */
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: writing init value of small ds to file.\n",
fcnName, mpi_rank);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Dwrite(small_dataset,
- dset_type,
- mem_small_ds_sid,
+ ret = H5Dwrite(small_dataset,
+ dset_type,
+ mem_small_ds_sid,
file_small_ds_sid,
- xfer_plist,
+ xfer_plist,
small_ds_buf_0);
VRFY((ret >= 0), "H5Dwrite() small_dataset initial write succeeded");
- /* read the small data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
+ /* read the small data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
* data set and verifies it.
*/
ret = H5Dread(small_dataset,
@@ -2061,10 +2036,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block[0] = (hsize_t)1;
block[1] = block[2] = block[3] = block[4] = (hsize_t)10;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: settings for large data set initialization.\n",
+ HDfprintf(stdout,
+ "%s:%d: settings for large data set initialization.\n",
fcnName, mpi_rank);
HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)start[0], (int)start[1],
@@ -2097,15 +2072,15 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block);
VRFY((ret >= 0), "H5Sselect_hyperslab(file_large_ds_sid, set) suceeded");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)H5Sget_select_npoints(mem_large_ds_sid));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)H5Sget_select_npoints(file_large_ds_sid));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -2114,10 +2089,10 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
start[0] = (hsize_t)0;
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
- "%s:%d: added settings for main process.\n",
+ HDfprintf(stdout,
+ "%s:%d: added settings for main process.\n",
fcnName, mpi_rank);
HDfprintf(stdout, "%s:%d: start[] = %d %d %d %d %d\n",
fcnName, mpi_rank, (int)start[0], (int)start[1],
@@ -2150,21 +2125,21 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
block);
VRFY((ret>= 0), "H5Sselect_hyperslab(file_large_ds_sid, or) suceeded");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s%d: H5Sget_select_npoints(mem_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)H5Sget_select_npoints(mem_large_ds_sid));
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s%d: H5Sget_select_npoints(file_large_ds_sid) = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)H5Sget_select_npoints(file_large_ds_sid));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
}
- /* try clipping the selection back to the large data space proper */
+ /* try clipping the selection back to the large dataspace proper */
start[0] = start[1] = start[2] = start[3] = start[4] = (hsize_t)0;
stride[0] = (hsize_t)(2 * (mpi_size + 1));
@@ -2183,19 +2158,19 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
start, stride, count, block);
VRFY((ret != FAIL),"H5Sselect_hyperslab(file_large_ds_sid, and) succeeded");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
rank = H5Sget_simple_extent_dims(mem_large_ds_sid, dims, max_dims);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: mem_large_ds_sid dims[%d] = %d %d %d %d %d\n",
- fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
+ fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
(int)dims[2], (int)dims[3], (int)dims[4]);
rank = H5Sget_simple_extent_dims(file_large_ds_sid, dims, max_dims);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: file_large_ds_sid dims[%d] = %d %d %d %d %d\n",
- fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
+ fcnName, mpi_rank, rank, (int)dims[0], (int)dims[1],
(int)dims[2], (int)dims[3], (int)dims[4]);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
@@ -2208,26 +2183,26 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
/* write the initial value of the large data set to file */
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
- if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+ if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: writing init value of large ds to file.\n",
fcnName, mpi_rank);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: large_dataset = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)large_dataset);
- HDfprintf(stdout,
+ HDfprintf(stdout,
"%s:%d: mem_large_ds_sid = %d, file_large_ds_sid = %d.\n",
- fcnName, mpi_rank,
+ fcnName, mpi_rank,
(int)mem_large_ds_sid, (int)file_large_ds_sid);
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG */
- ret = H5Dwrite(large_dataset,
- dset_type,
- mem_large_ds_sid,
+ ret = H5Dwrite(large_dataset,
+ dset_type,
+ mem_large_ds_sid,
file_large_ds_sid,
- xfer_plist,
+ xfer_plist,
large_ds_buf_0);
if ( ret < 0 ) H5Eprint2(H5E_DEFAULT, stderr);
@@ -2238,8 +2213,8 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc==MPI_SUCCESS), "Sync after large dataset writes");
- /* read the large data set back to verify that it contains the
- * expected data. Note that each process reads in the entire
+ /* read the large data set back to verify that it contains the
+ * expected data. Note that each process reads in the entire
* data set.
*/
ret = H5Dread(large_dataset,
@@ -2278,13 +2253,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
/***********************************/
- /* read a checkerboard selection of the process slice of the
- * small on disk data set into the process slice of the large
+ /* read a checkerboard selection of the process slice of the
+ * small on disk data set into the process slice of the large
* in memory data set, and verify the data read.
*/
small_sel_start[0] = (hsize_t)(mpi_rank + 1);
- small_sel_start[1] = small_sel_start[2] =
+ small_sel_start[1] = small_sel_start[2] =
small_sel_start[3] = small_sel_start[4] = 0;
lower_dim_size_comp_test__select_checker_board(mpi_rank,
@@ -2296,9 +2271,9 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
small_sel_start);
expected_value = (uint32_t)
- ((small_sel_start[0] * small_dims[1] * small_dims[2] *
+ ((small_sel_start[0] * small_dims[1] * small_dims[2] *
small_dims[3] * small_dims[4]) +
- (small_sel_start[1] * small_dims[2] * small_dims[3] *
+ (small_sel_start[1] * small_dims[2] * small_dims[3] *
small_dims[4]) +
(small_sel_start[2] * small_dims[3] * small_dims[4]) +
(small_sel_start[3] * small_dims[4]) +
@@ -2318,12 +2293,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_sel_start);
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(mem_large_ds_sid,
- file_small_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed (1)");
+ check = H5Sselect_shape_same(mem_large_ds_sid, file_small_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed (1)");
ret = H5Dread(small_dataset,
@@ -2335,7 +2309,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((ret >= 0), "H5Sread() slice from small ds succeeded.");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
}
@@ -2345,9 +2319,9 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
data_ok = TRUE;
- start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] *
+ start_index = (int)((large_sel_start[0] * large_dims[1] * large_dims[2] *
large_dims[3] * large_dims[4]) +
- (large_sel_start[1] * large_dims[2] * large_dims[3] *
+ (large_sel_start[1] * large_dims[2] * large_dims[3] *
large_dims[4]) +
(large_sel_start[2] * large_dims[3] * large_dims[4]) +
(large_sel_start[3] * large_dims[4]) +
@@ -2409,13 +2383,13 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
- /* read a checkerboard selection of a slice of the process slice of
- * the large on disk data set into the process slice of the small
+ /* read a checkerboard selection of a slice of the process slice of
+ * the large on disk data set into the process slice of the small
* in memory data set, and verify the data read.
*/
small_sel_start[0] = (hsize_t)(mpi_rank + 1);
- small_sel_start[1] = small_sel_start[2] =
+ small_sel_start[1] = small_sel_start[2] =
small_sel_start[3] = small_sel_start[4] = 0;
lower_dim_size_comp_test__select_checker_board(mpi_rank,
@@ -2439,12 +2413,11 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
large_sel_start);
- /* verify that H5S_select_shape_same() reports the two
+ /* verify that H5Sselect_shape_same() reports the two
* selections as having the same shape.
*/
- check = H5S_select_shape_same_test(mem_small_ds_sid,
- file_large_ds_sid);
- VRFY((check == TRUE), "H5S_select_shape_same_test passed (2)");
+ check = H5Sselect_shape_same(mem_small_ds_sid, file_large_ds_sid);
+ VRFY((check == TRUE), "H5Sselect_shape_same passed (2)");
ret = H5Dread(large_dataset,
@@ -2456,7 +2429,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
VRFY((ret >= 0), "H5Sread() slice from large ds succeeded.");
-#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
+#if LOWER_DIM_SIZE_COMP_TEST__RUN_TEST__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
HDfprintf(stdout, "%s:%d: H5Dread() returns.\n", fcnName, mpi_rank);
}
@@ -2467,9 +2440,9 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
data_ok = TRUE;
expected_value = (uint32_t)
- ((large_sel_start[0] * large_dims[1] * large_dims[2] *
+ ((large_sel_start[0] * large_dims[1] * large_dims[2] *
large_dims[3] * large_dims[4]) +
- (large_sel_start[1] * large_dims[2] * large_dims[3] *
+ (large_sel_start[1] * large_dims[2] * large_dims[3] *
large_dims[4]) +
(large_sel_start[2] * large_dims[3] * large_dims[4]) +
(large_sel_start[3] * large_dims[4]) +
@@ -2522,7 +2495,7 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
#if LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG
if ( mpi_rank == LOWER_DIM_SIZE_COMP_TEST_DEBUG_TARGET_RANK ) {
- HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n",
+ HDfprintf(stdout, "%s:%d: unexpected value at index %d: %d.\n",
fcnName, mpi_rank, (int)i, (int)(*ptr_1));
}
#endif /* LOWER_DIM_SIZE_COMP_TEST__VERIFY_DATA__DEBUG */
@@ -2590,17 +2563,15 @@ lower_dim_size_comp_test__run_test(const int chunk_edge_size,
/*-------------------------------------------------------------------------
- * Function: lower_dim_size_comp_test()
+ * Function: lower_dim_size_comp_test()
*
- * Purpose: Test to see if an error in the computation of the size
- * of the lower dimensions in H5S_obtain_datatype() has
- * been corrected.
+ * Purpose: Test to see if an error in the computation of the size
+ * of the lower dimensions in H5S_obtain_datatype() has
+ * been corrected.
*
- * Return: void
+ * Return: void
*
- * Programmer: JRM -- 11/11/09
- *
- * Modifications:
+ * Programmer: JRM -- 11/11/09
*
*-------------------------------------------------------------------------
*/
@@ -2609,66 +2580,56 @@ void
lower_dim_size_comp_test(void)
{
/* const char *fcnName = "lower_dim_size_comp_test()"; */
- int chunk_edge_size = 0;
- int use_collective_io = 1;
- hid_t dset_type = H5T_NATIVE_UINT;
-#if 0
- HDsleep(60);
-#endif
- HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
- for ( use_collective_io = (hbool_t)0;
- (int)use_collective_io <= 1;
- (hbool_t)(use_collective_io++) ) {
+ int chunk_edge_size = 0;
+ int use_collective_io;
+ HDcompile_assert(sizeof(uint32_t) == sizeof(unsigned));
+ for(use_collective_io = 0; use_collective_io <= 1; use_collective_io++) {
chunk_edge_size = 0;
lower_dim_size_comp_test__run_test(chunk_edge_size,
(hbool_t)use_collective_io,
- dset_type);
-
+ H5T_NATIVE_UINT);
chunk_edge_size = 5;
lower_dim_size_comp_test__run_test(chunk_edge_size,
(hbool_t)use_collective_io,
- dset_type);
- }
+ H5T_NATIVE_UINT);
+ } /* end for */
return;
-
} /* lower_dim_size_comp_test() */
/*-------------------------------------------------------------------------
- * Function: link_chunk_collective_io_test()
- *
- * Purpose: Test to verify that an error in MPI type management in
- * H5D_link_chunk_collective_io() has been corrected.
- * In this bug, we used to free MPI types regardless of
- * whether they were basic or derived.
+ * Function: link_chunk_collective_io_test()
*
- * This test is based on a bug report kindly provided by
- * Rob Latham of the MPICH team and ANL.
+ * Purpose: Test to verify that an error in MPI type management in
+ * H5D_link_chunk_collective_io() has been corrected.
+ * In this bug, we used to free MPI types regardless of
+ * whether they were basic or derived.
*
- * The basic thrust of the test is to cause a process
- * to participate in a collective I/O in which it:
+ * This test is based on a bug report kindly provided by
+ * Rob Latham of the MPICH team and ANL.
*
- * 1) Reads or writes exactly one chunk,
+ * The basic thrust of the test is to cause a process
+ * to participate in a collective I/O in which it:
*
- * 2) Has no in memory buffer for any other chunk.
+ * 1) Reads or writes exactly one chunk,
*
- * The test differers from Rob Latham's bug report in
- * that is runs with an arbitrary number of proceeses,
- * and uses a 1 dimensional dataset.
+ * 2) Has no in memory buffer for any other chunk.
*
- * Return: void
+ * The test differers from Rob Latham's bug report in
+ * that is runs with an arbitrary number of proceeses,
+ * and uses a 1 dimensional dataset.
*
- * Programmer: JRM -- 12/16/09
+ * Return: void
*
- * Modifications:
+ * Programmer: JRM -- 12/16/09
*
*-------------------------------------------------------------------------
*/
-#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
+#define LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE 16
void
link_chunk_collective_io_test(void)
@@ -2676,8 +2637,8 @@ link_chunk_collective_io_test(void)
/* const char *fcnName = "link_chunk_collective_io_test()"; */
const char *filename;
hbool_t mis_match = FALSE;
- int i;
- int mrc;
+ int i;
+ int mrc;
int mpi_rank;
int mpi_size;
MPI_Comm mpi_comm = MPI_COMM_WORLD;
@@ -2728,7 +2689,7 @@ link_chunk_collective_io_test(void)
/* setup dims */
dims[0] = ((hsize_t)mpi_size) * ((hsize_t)(LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE));
- /* setup mem and file data spaces */
+ /* setup mem and file dataspaces */
write_mem_ds_sid = H5Screate_simple(1, chunk_dims, NULL);
VRFY((write_mem_ds_sid != 0),
"H5Screate_simple() write_mem_ds_sid succeeded");
@@ -2767,8 +2728,8 @@ link_chunk_collective_io_test(void)
for ( i = 0; i < LINK_CHUNK_COLLECTIVE_IO_TEST_CHUNK_SIZE; i++ ) {
local_data_written[i] = expected_value;
- local_data_read[i] = 0.0;
- expected_value += 1.0;
+ local_data_read[i] = 0.0;
+ expected_value += 1.0;
}
/* select the file and mem spaces */
@@ -2794,15 +2755,15 @@ link_chunk_collective_io_test(void)
VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
/* write the data set */
- ret = H5Dwrite(dset_id,
- H5T_NATIVE_DOUBLE,
- write_mem_ds_sid,
+ ret = H5Dwrite(dset_id,
+ H5T_NATIVE_DOUBLE,
+ write_mem_ds_sid,
file_ds_sid,
- xfer_plist,
+ xfer_plist,
local_data_written);
VRFY((ret >= 0), "H5Dwrite() dataset initial write succeeded");
-
+
/* sync with the other processes before checking data */
mrc = MPI_Barrier(MPI_COMM_WORLD);
VRFY((mrc==MPI_SUCCESS), "Sync after dataset write");
@@ -2830,7 +2791,7 @@ link_chunk_collective_io_test(void)
if ( diff >= 0.001 ) {
mis_match = TRUE;
- }
+ }
}
VRFY( (mis_match == FALSE), "dataset data good.");
diff --git a/testpar/testpar.h b/testpar/testpar.h
index 84c073f..f76de51 100644
--- a/testpar/testpar.h
+++ b/testpar/testpar.h
@@ -32,9 +32,9 @@
*/
#define MESG(mesg) \
if (VERBOSE_MED && *mesg != '\0') \
- printf("%s\n", mesg)
+ HDprintf("%s\n", mesg)
-/*
+/*
* VRFY: Verify if the condition val is true.
* If it is true, then call MESG to print mesg, depending on the verbose
* level.
@@ -44,23 +44,27 @@
* This will allow program to continue and can be used for debugging.
* (The "do {...} while(0)" is to group all the statements as one unit.)
*/
-#define VRFY(val, mesg) do { \
+#define VRFY_IMPL(val, mesg, rankvar) do { \
if (val) { \
- MESG(mesg); \
- } else { \
- printf("Proc %d: ", mpi_rank); \
- printf("*** Parallel ERROR ***\n"); \
- printf(" VRFY (%s) failed at line %4d in %s\n", \
+ MESG(mesg); \
+ } \
+ else { \
+ HDprintf("Proc %d: ", rankvar); \
+ HDprintf("*** Parallel ERROR ***\n"); \
+ HDprintf(" VRFY (%s) failed at line %4d in %s\n", \
mesg, (int)__LINE__, __FILE__); \
++nerrors; \
fflush(stdout); \
if (!VERBOSE_MED) { \
- printf("aborting MPI processes\n"); \
+ HDprintf("aborting MPI processes\n"); \
MPI_Abort(MPI_COMM_WORLD, 1); \
} \
} \
} while(0)
+#define VRFY_G(val, mesg) VRFY_IMPL(val, mesg, mpi_rank_g)
+#define VRFY(val, mesg) VRFY_IMPL(val, mesg, mpi_rank)
+
/*
* Checking for information purpose.
* If val is false, print mesg; else nothing.
@@ -70,9 +74,9 @@
if (val) { \
MESG(mesg); \
} else { \
- printf("Proc %d: ", mpi_rank); \
- printf("*** PHDF5 REMARK (not an error) ***\n"); \
- printf(" Condition (%s) failed at line %4d in %s\n", \
+ HDprintf("Proc %d: ", mpi_rank); \
+ HDprintf("*** PHDF5 REMARK (not an error) ***\n"); \
+ HDprintf(" Condition (%s) failed at line %4d in %s\n", \
mesg, (int)__LINE__, __FILE__); \
fflush(stdout); \
} \
@@ -80,10 +84,10 @@
#define MPI_BANNER(mesg) do { \
if (VERBOSE_MED || MAINPROCESS){ \
- printf("--------------------------------\n"); \
- printf("Proc %d: ", mpi_rank); \
- printf("*** %s\n", mesg); \
- printf("--------------------------------\n"); \
+ HDprintf("--------------------------------\n"); \
+ HDprintf("Proc %d: ", mpi_rank); \
+ HDprintf("*** %s\n", mesg); \
+ HDprintf("--------------------------------\n"); \
} \
} while(0)
diff --git a/testpar/testpflush.sh.in b/testpar/testpflush.sh.in
new file mode 100644
index 0000000..02f0e26
--- /dev/null
+++ b/testpar/testpflush.sh.in
@@ -0,0 +1,64 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+#
+# Test script for the parallel flush test
+#
+# The parallel flush test uses two programs to test flush operations
+# in parallel HDF5. The first program purposely exits without calling
+# MPI_Finalize(), which is an error under the MPI standard and mpiexec
+# in some implementations will return an error code even though all
+# processes exit successfully. This script lets us swallow the error
+# from the first program.
+#
+# True errors in the first program will be detected as errors in the
+# second program, so watch out for that.
+#
+# Programmer: Dana Robinson
+# Fall 2018
+
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Turn the $$ we use to avoid Autotools munging into $
+#
+# Allowing $$ to substitute in both the RUNPARALLEL string and the
+# regexp is intentional. There doesn't seem to be a way around
+# this using quote shenanigans. The downside is that there is a remote
+# chance that the shell's pid will match a number in the RUNPARALLEL
+# variable, but that seems less likely to cause problems than expecting
+# library builders to specify two almost identical versions of the
+# RUNPARALLEL command, one for use in scripts and one via Makefiles.
+RUNPARALLELSCRIPT=`echo "@RUNPARALLEL@" | sed "s/$$/\$/g"`
+
+# ==========================================
+# Run the first parallel flush test program
+# (note that we ignore any errors here)
+# ==========================================
+echo "*** NOTE ***********************************************************"
+echo "You may see complaints from mpiexec et al. that not all processes"
+echo "called MPI_Finalize(). This is an intended characteristic of the"
+echo "test and should not be considered an error."
+echo "********************************************************************"
+eval ${RUNPARALLELSCRIPT} ./t_pflush1
+
+
+# ===========================================
+# Run the second parallel flush test program
+# The return code of this call is the return
+# code of the script.
+# ===========================================
+eval ${RUNPARALLELSCRIPT} ./t_pflush2
+
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index 87d9056..b89c790 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -26,15 +26,15 @@ int dim0;
int dim1;
int chunkdim0;
int chunkdim1;
-int nerrors = 0; /* errors count */
-int ndatasets = 300; /* number of datasets to create*/
+int nerrors = 0; /* errors count */
+int ndatasets = 300; /* number of datasets to create*/
int ngroups = 512; /* number of groups to create in root
- * group. */
-int facc_type = FACC_MPIO; /*Test file access type */
+ * group. */
+int facc_type = FACC_MPIO; /*Test file access type */
int dxfer_coll_type = DXFER_COLLECTIVE_IO;
-H5E_auto2_t old_func; /* previous error handler */
-void *old_client_data; /* previous error handler arg.*/
+H5E_auto2_t old_func; /* previous error handler */
+void *old_client_data; /* previous error handler arg.*/
/* other option flags */
@@ -46,10 +46,10 @@ void *old_client_data; /* previous error handler arg.*/
#define NFILENAME 2
#define PARATESTFILE filenames[0]
const char *FILENAME[NFILENAME]={
- "ParaTest",
- NULL};
-char filenames[NFILENAME][PATH_MAX];
-hid_t fapl; /* file access property list */
+ "ParaTest",
+ NULL};
+char filenames[NFILENAME][PATH_MAX];
+hid_t fapl; /* file access property list */
#ifdef USE_PAUSE
/* pause the process for a moment to allow debugger to attach if desired. */
@@ -62,7 +62,7 @@ void pause_proc(void)
{
int pid;
- h5_stat_t statbuf;
+ h5_stat_t statbuf;
char greenlight[] = "go";
int maxloop = 10;
int loops = 0;
@@ -79,15 +79,15 @@ void pause_proc(void)
MPI_Get_processor_name(mpi_name, &mpi_namelen);
if (MAINPROCESS)
- while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
- if (!loops++){
- printf("Proc %d (%*s, %d): to debug, attach %d\n",
- mpi_rank, mpi_namelen, mpi_name, pid, pid);
- }
- printf("waiting(%ds) for file %s ...\n", time_int, greenlight);
- fflush(stdout);
+ while ((HDstat(greenlight, &statbuf) == -1) && loops < maxloop){
+ if (!loops++){
+ HDprintf("Proc %d (%*s, %d): to debug, attach %d\n",
+ mpi_rank, mpi_namelen, mpi_name, pid, pid);
+ }
+ HDprintf("waiting(%ds) for file %s ...\n", time_int, greenlight);
+ HDfflush(stdout);
HDsleep(time_int);
- }
+ }
MPI_Barrier(MPI_COMM_WORLD);
}
@@ -99,7 +99,7 @@ int MPI_Init(int *argc, char ***argv)
pause_proc();
return (ret_code);
}
-#endif /* USE_PAUSE */
+#endif /* USE_PAUSE */
/*
@@ -108,18 +108,18 @@ int MPI_Init(int *argc, char ***argv)
static void
usage(void)
{
- printf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
- "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
- printf("\t-m<n_datasets>"
- "\tset number of datasets for the multiple dataset test\n");
- printf("\t-n<n_groups>"
- "\tset number of groups for the multiple group test\n");
- printf("\t-f <prefix>\tfilename prefix\n");
- printf("\t-2\t\tuse Split-file together with MPIO\n");
- printf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
- ROW_FACTOR, COL_FACTOR);
- printf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
- printf("\n");
+ HDprintf(" [-r] [-w] [-m<n_datasets>] [-n<n_groups>] "
+ "[-o] [-f <prefix>] [-d <dim0> <dim1>]\n");
+ HDprintf("\t-m<n_datasets>"
+ "\tset number of datasets for the multiple dataset test\n");
+ HDprintf("\t-n<n_groups>"
+ "\tset number of groups for the multiple group test\n");
+ HDprintf("\t-f <prefix>\tfilename prefix\n");
+ HDprintf("\t-2\t\tuse Split-file together with MPIO\n");
+ HDprintf("\t-d <factor0> <factor1>\tdataset dimensions factors. Defaults (%d,%d)\n",
+ ROW_FACTOR, COL_FACTOR);
+ HDprintf("\t-c <dim0> <dim1>\tdataset chunk dimensions. Defaults (dim0/10,dim1/10)\n");
+ HDprintf("\n");
}
@@ -129,7 +129,7 @@ usage(void)
static int
parse_options(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -140,107 +140,107 @@ parse_options(int argc, char **argv)
chunkdim1 = (dim1+9)/10;
while (--argc){
- if (**(++argv) != '-'){
- break;
- }else{
- switch(*(*argv+1)){
- case 'm': ndatasets = atoi((*argv+1)+1);
- if (ndatasets < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'n': ngroups = atoi((*argv+1)+1);
- if (ngroups < 0){
- nerrors++;
- return(1);
- }
- break;
- case 'f': if (--argc < 1) {
- nerrors++;
- return(1);
- }
- if (**(++argv) == '-') {
- nerrors++;
- return(1);
- }
- paraprefix = *argv;
- break;
- case 'i': /* Collective MPI-IO access with independent IO */
- dxfer_coll_type = DXFER_INDEPENDENT_IO;
- break;
- case '2': /* Use the split-file driver with MPIO access */
- /* Can use $HDF5_METAPREFIX to define the */
- /* meta-file-prefix. */
- facc_type = FACC_MPIO | FACC_SPLIT;
- break;
- case 'd': /* dimensizes */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- dim0 = atoi(*(++argv))*mpi_size;
- argc--;
- dim1 = atoi(*(++argv))*mpi_size;
- /* set default chunkdim sizes too */
- chunkdim0 = (dim0+9)/10;
- chunkdim1 = (dim1+9)/10;
- break;
- case 'c': /* chunk dimensions */
- if (--argc < 2){
- nerrors++;
- return(1);
- }
- chunkdim0 = atoi(*(++argv));
- argc--;
- chunkdim1 = atoi(*(++argv));
- break;
- case 'h': /* print help message--return with nerrors set */
- return(1);
- default: printf("Illegal option(%s)\n", *argv);
- nerrors++;
- return(1);
- }
- }
+ if (**(++argv) != '-'){
+ break;
+ }else{
+ switch(*(*argv+1)){
+ case 'm': ndatasets = atoi((*argv+1)+1);
+ if (ndatasets < 0){
+ nerrors++;
+ return(1);
+ }
+ break;
+ case 'n': ngroups = atoi((*argv+1)+1);
+ if (ngroups < 0){
+ nerrors++;
+ return(1);
+ }
+ break;
+ case 'f': if (--argc < 1) {
+ nerrors++;
+ return(1);
+ }
+ if (**(++argv) == '-') {
+ nerrors++;
+ return(1);
+ }
+ paraprefix = *argv;
+ break;
+ case 'i': /* Collective MPI-IO access with independent IO */
+ dxfer_coll_type = DXFER_INDEPENDENT_IO;
+ break;
+ case '2': /* Use the split-file driver with MPIO access */
+ /* Can use $HDF5_METAPREFIX to define the */
+ /* meta-file-prefix. */
+ facc_type = FACC_MPIO | FACC_SPLIT;
+ break;
+ case 'd': /* dimensizes */
+ if (--argc < 2){
+ nerrors++;
+ return(1);
+ }
+ dim0 = atoi(*(++argv))*mpi_size;
+ argc--;
+ dim1 = atoi(*(++argv))*mpi_size;
+ /* set default chunkdim sizes too */
+ chunkdim0 = (dim0+9)/10;
+ chunkdim1 = (dim1+9)/10;
+ break;
+ case 'c': /* chunk dimensions */
+ if (--argc < 2){
+ nerrors++;
+ return(1);
+ }
+ chunkdim0 = atoi(*(++argv));
+ argc--;
+ chunkdim1 = atoi(*(++argv));
+ break;
+ case 'h': /* print help message--return with nerrors set */
+ return(1);
+ default: HDprintf("Illegal option(%s)\n", *argv);
+ nerrors++;
+ return(1);
+ }
+ }
} /*while*/
/* check validity of dimension and chunk sizes */
if (dim0 <= 0 || dim1 <= 0){
- printf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
- nerrors++;
- return(1);
+ HDprintf("Illegal dim sizes (%d, %d)\n", dim0, dim1);
+ nerrors++;
+ return(1);
}
if (chunkdim0 <= 0 || chunkdim1 <= 0){
- printf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
- nerrors++;
- return(1);
+ HDprintf("Illegal chunkdim sizes (%d, %d)\n", chunkdim0, chunkdim1);
+ nerrors++;
+ return(1);
}
/* Make sure datasets can be divided into equal portions by the processes */
if ((dim0 % mpi_size) || (dim1 % mpi_size)){
- if (MAINPROCESS)
- printf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
- dim0, dim1, mpi_size);
- nerrors++;
- return(1);
+ if (MAINPROCESS)
+ HDprintf("dim0(%d) and dim1(%d) must be multiples of processes(%d)\n",
+ dim0, dim1, mpi_size);
+ nerrors++;
+ return(1);
}
/* compose the test filenames */
{
- int i, n;
-
- n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
-
- for (i=0; i < n; i++)
- if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
- == NULL){
- printf("h5_fixname failed\n");
- nerrors++;
- return(1);
- }
- printf("Test filenames are:\n");
- for (i=0; i < n; i++)
- printf(" %s\n", filenames[i]);
+ int i, n;
+
+ n = sizeof(FILENAME)/sizeof(FILENAME[0]) - 1; /* exclude the NULL */
+
+ for (i=0; i < n; i++)
+ if (h5_fixname(FILENAME[i],fapl,filenames[i],sizeof(filenames[i]))
+ == NULL){
+ HDprintf("h5_fixname failed\n");
+ nerrors++;
+ return(1);
+ }
+ HDprintf("Test filenames are:\n");
+ for (i=0; i < n; i++)
+ HDprintf(" %s\n", filenames[i]);
}
return(0);
@@ -255,7 +255,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
{
hid_t ret_pl = -1;
herr_t ret; /* generic return value */
- int mpi_rank; /* mpi variables */
+ int mpi_rank; /* mpi variables */
/* need the rank for error checking macros */
MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
@@ -264,36 +264,36 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
if (l_facc_type == FACC_DEFAULT)
- return (ret_pl);
+ return (ret_pl);
if (l_facc_type == FACC_MPIO){
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(ret_pl, comm, info);
- VRFY((ret >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(ret_pl, comm, info);
+ VRFY((ret >= 0), "");
ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
- VRFY((ret >= 0), "");
+ VRFY((ret >= 0), "");
ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
- VRFY((ret >= 0), "");
- return(ret_pl);
+ VRFY((ret >= 0), "");
+ return(ret_pl);
}
if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
- hid_t mpio_pl;
-
- mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((mpio_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
- VRFY((ret >= 0), "");
-
- /* setup file access template */
- ret_pl = H5Pcreate (H5P_FILE_ACCESS);
- VRFY((ret_pl >= 0), "");
- /* set Parallel access with communicator */
- ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
- VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
- H5Pclose(mpio_pl);
- return(ret_pl);
+ hid_t mpio_pl;
+
+ mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((mpio_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
+ VRFY((ret >= 0), "");
+
+ /* setup file access template */
+ ret_pl = H5Pcreate (H5P_FILE_ACCESS);
+ VRFY((ret_pl >= 0), "");
+ /* set Parallel access with communicator */
+ ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
+ VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
+ H5Pclose(mpio_pl);
+ return(ret_pl);
}
/* unknown file access types */
@@ -303,7 +303,7 @@ create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
int main(int argc, char **argv)
{
- int mpi_size, mpi_rank; /* mpi variables */
+ int mpi_size, mpi_rank; /* mpi variables */
H5Ptest_param_t ndsets_params, ngroups_params;
H5Ptest_param_t collngroups_params;
H5Ptest_param_t io_mode_confusion_params;
@@ -323,18 +323,18 @@ int main(int argc, char **argv)
dim1 = COL_FACTOR*mpi_size;
if (MAINPROCESS){
- printf("===================================\n");
- printf("PHDF5 TESTS START\n");
- printf("===================================\n");
+ HDprintf("===================================\n");
+ HDprintf("PHDF5 TESTS START\n");
+ HDprintf("===================================\n");
}
/* Attempt to turn off atexit post processing so that in case errors
- * happen during the test and the process is aborted, it will not get
- * hang in the atexit post processing in which it may try to make MPI
- * calls. By then, MPI calls may not work.
- */
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
if (H5dont_atexit() < 0){
- printf("Failed to turn off atexit processing. Continue.\n");
+ HDprintf("Failed to turn off atexit processing. Continue.\n");
};
H5open();
h5_show_hostname();
@@ -344,10 +344,10 @@ int main(int argc, char **argv)
/* Tests are generally arranged from least to most complexity... */
AddTest("mpiodup", test_fapl_mpio_dup, NULL,
- "fapl_mpio duplicate", NULL);
+ "fapl_mpio duplicate", NULL);
AddTest("split", test_split_comm_access, NULL,
- "dataset using split communicators", PARATESTFILE);
+ "dataset using split communicators", PARATESTFILE);
#ifdef PB_OUT /* temporary: disable page buffering when parallel */
AddTest("page_buffer", test_page_buffer_access, NULL,
@@ -355,141 +355,141 @@ int main(int argc, char **argv)
#endif
AddTest("props", test_file_properties, NULL,
- "Coll Metadata file property settings", PARATESTFILE);
+ "Coll Metadata file property settings", PARATESTFILE);
AddTest("idsetw", dataset_writeInd, NULL,
- "dataset independent write", PARATESTFILE);
+ "dataset independent write", PARATESTFILE);
AddTest("idsetr", dataset_readInd, NULL,
- "dataset independent read", PARATESTFILE);
+ "dataset independent read", PARATESTFILE);
AddTest("cdsetw", dataset_writeAll, NULL,
- "dataset collective write", PARATESTFILE);
+ "dataset collective write", PARATESTFILE);
AddTest("cdsetr", dataset_readAll, NULL,
- "dataset collective read", PARATESTFILE);
+ "dataset collective read", PARATESTFILE);
AddTest("eidsetw", extend_writeInd, NULL,
- "extendible dataset independent write", PARATESTFILE);
+ "extendible dataset independent write", PARATESTFILE);
AddTest("eidsetr", extend_readInd, NULL,
- "extendible dataset independent read", PARATESTFILE);
+ "extendible dataset independent read", PARATESTFILE);
AddTest("ecdsetw", extend_writeAll, NULL,
- "extendible dataset collective write", PARATESTFILE);
+ "extendible dataset collective write", PARATESTFILE);
AddTest("ecdsetr", extend_readAll, NULL,
- "extendible dataset collective read", PARATESTFILE);
+ "extendible dataset collective read", PARATESTFILE);
AddTest("eidsetw2", extend_writeInd2, NULL,
- "extendible dataset independent write #2", PARATESTFILE);
+ "extendible dataset independent write #2", PARATESTFILE);
AddTest("selnone", none_selection_chunk, NULL,
"chunked dataset with none-selection", PARATESTFILE);
AddTest("calloc", test_chunk_alloc, NULL,
"parallel extend Chunked allocation on serial file", PARATESTFILE);
AddTest("fltread", test_filter_read, NULL,
- "parallel read of dataset written serially with filters", PARATESTFILE);
+ "parallel read of dataset written serially with filters", PARATESTFILE);
#ifdef H5_HAVE_FILTER_DEFLATE
AddTest("cmpdsetr", compress_readAll, NULL,
- "compressed dataset collective read", PARATESTFILE);
+ "compressed dataset collective read", PARATESTFILE);
#endif /* H5_HAVE_FILTER_DEFLATE */
AddTest("zerodsetr", zero_dim_dset, NULL,
- "zero dim dset", PARATESTFILE);
+ "zero dim dset", PARATESTFILE);
ndsets_params.name = PARATESTFILE;
ndsets_params.count = ndatasets;
AddTest("ndsetw", multiple_dset_write, NULL,
- "multiple datasets write", &ndsets_params);
+ "multiple datasets write", &ndsets_params);
ngroups_params.name = PARATESTFILE;
ngroups_params.count = ngroups;
AddTest("ngrpw", multiple_group_write, NULL,
- "multiple groups write", &ngroups_params);
+ "multiple groups write", &ngroups_params);
AddTest("ngrpr", multiple_group_read, NULL,
- "multiple groups read", &ngroups_params);
+ "multiple groups read", &ngroups_params);
AddTest("compact", compact_dataset, NULL,
- "compact dataset test", PARATESTFILE);
+ "compact dataset test", PARATESTFILE);
collngroups_params.name = PARATESTFILE;
collngroups_params.count = ngroups;
- AddTest("cngrpw", collective_group_write, NULL,
- "collective group and dataset write", &collngroups_params);
- AddTest("ingrpr", independent_group_read, NULL,
- "independent group and dataset read", &collngroups_params);
+ /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
+ AddTest("cngrpw-ingrpr", collective_group_write_independent_group_read, NULL,
+ "collective grp/dset write - independent grp/dset read",
+ &collngroups_params);
#ifndef H5_HAVE_WIN32_API
AddTest("bigdset", big_dataset, NULL,
"big dataset test", PARATESTFILE);
#else
- printf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n");
+ HDprintf("big dataset test will be skipped on Windows (JIRA HDDFV-8064)\n");
#endif
AddTest("fill", dataset_fillvalue, NULL,
- "dataset fill value", PARATESTFILE);
+ "dataset fill value", PARATESTFILE);
AddTest("cchunk1",
- coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE);
+ coll_chunk1,NULL, "simple collective chunk io",PARATESTFILE);
AddTest("cchunk2",
- coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE);
+ coll_chunk2,NULL, "noncontiguous collective chunk io",PARATESTFILE);
AddTest("cchunk3",
- coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE);
+ coll_chunk3,NULL, "multi-chunk collective chunk io",PARATESTFILE);
AddTest("cchunk4",
- coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE);
+ coll_chunk4,NULL, "collective chunk io with partial non-selection ",PARATESTFILE);
if((mpi_size < 3)&& MAINPROCESS ) {
- printf("Collective chunk IO optimization APIs ");
- printf("needs at least 3 processes to participate\n");
- printf("Collective chunk IO API tests will be skipped \n");
+ HDprintf("Collective chunk IO optimization APIs ");
+ HDprintf("needs at least 3 processes to participate\n");
+ HDprintf("Collective chunk IO API tests will be skipped \n");
}
AddTest((mpi_size <3)? "-cchunk5":"cchunk5" ,
- coll_chunk5,NULL,
- "linked chunk collective IO without optimization",PARATESTFILE);
+ coll_chunk5,NULL,
+ "linked chunk collective IO without optimization",PARATESTFILE);
AddTest((mpi_size < 3)? "-cchunk6" : "cchunk6",
- coll_chunk6,NULL,
- "multi-chunk collective IO with direct request",PARATESTFILE);
+ coll_chunk6,NULL,
+ "multi-chunk collective IO with direct request",PARATESTFILE);
AddTest((mpi_size < 3)? "-cchunk7" : "cchunk7",
- coll_chunk7,NULL,
- "linked chunk collective IO with optimization",PARATESTFILE);
+ coll_chunk7,NULL,
+ "linked chunk collective IO with optimization",PARATESTFILE);
AddTest((mpi_size < 3)? "-cchunk8" : "cchunk8",
- coll_chunk8,NULL,
- "linked chunk collective IO transferring to multi-chunk",PARATESTFILE);
+ coll_chunk8,NULL,
+ "linked chunk collective IO transferring to multi-chunk",PARATESTFILE);
AddTest((mpi_size < 3)? "-cchunk9" : "cchunk9",
- coll_chunk9,NULL,
- "multiple chunk collective IO with optimization",PARATESTFILE);
+ coll_chunk9,NULL,
+ "multiple chunk collective IO with optimization",PARATESTFILE);
AddTest((mpi_size < 3)? "-cchunk10" : "cchunk10",
- coll_chunk10,NULL,
- "multiple chunk collective IO transferring to independent IO",PARATESTFILE);
+ coll_chunk10,NULL,
+ "multiple chunk collective IO transferring to independent IO",PARATESTFILE);
-/* irregular collective IO tests*/
+ /* irregular collective IO tests*/
AddTest("ccontw",
- coll_irregular_cont_write,NULL,
- "collective irregular contiguous write",PARATESTFILE);
+ coll_irregular_cont_write,NULL,
+ "collective irregular contiguous write",PARATESTFILE);
AddTest("ccontr",
- coll_irregular_cont_read,NULL,
- "collective irregular contiguous read",PARATESTFILE);
+ coll_irregular_cont_read,NULL,
+ "collective irregular contiguous read",PARATESTFILE);
AddTest("cschunkw",
- coll_irregular_simple_chunk_write,NULL,
- "collective irregular simple chunk write",PARATESTFILE);
+ coll_irregular_simple_chunk_write,NULL,
+ "collective irregular simple chunk write",PARATESTFILE);
AddTest("cschunkr",
- coll_irregular_simple_chunk_read,NULL,
- "collective irregular simple chunk read",PARATESTFILE);
+ coll_irregular_simple_chunk_read,NULL,
+ "collective irregular simple chunk read",PARATESTFILE);
AddTest("ccchunkw",
- coll_irregular_complex_chunk_write,NULL,
- "collective irregular complex chunk write",PARATESTFILE);
+ coll_irregular_complex_chunk_write,NULL,
+ "collective irregular complex chunk write",PARATESTFILE);
AddTest("ccchunkr",
- coll_irregular_complex_chunk_read,NULL,
- "collective irregular complex chunk read",PARATESTFILE);
+ coll_irregular_complex_chunk_read,NULL,
+ "collective irregular complex chunk read",PARATESTFILE);
AddTest("null", null_dataset, NULL,
- "null dataset test", PARATESTFILE);
+ "null dataset test", PARATESTFILE);
io_mode_confusion_params.name = PARATESTFILE;
io_mode_confusion_params.count = 0; /* value not used */
AddTest("I/Omodeconf", io_mode_confusion, NULL,
- "I/O mode confusion test -- hangs quickly on failure",
+ "I/O mode confusion test -- hangs quickly on failure",
&io_mode_confusion_params);
if((mpi_size < 3) && MAINPROCESS) {
- printf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n");
- printf("rr_obj_hdr_flush_confusion test will be skipped \n");
+ HDprintf("rr_obj_hdr_flush_confusion test needs at least 3 processes.\n");
+ HDprintf("rr_obj_hdr_flush_confusion test will be skipped \n");
}
if(mpi_size > 2) {
rr_obj_flush_confusion_params.name = PARATESTFILE;
@@ -506,12 +506,12 @@ int main(int argc, char **argv)
AddTest("tldsc",
lower_dim_size_comp_test, NULL,
- "test lower dim size comp in span tree to mpi derived type",
+ "test lower dim size comp in span tree to mpi derived type",
PARATESTFILE);
AddTest("lccio",
link_chunk_collective_io_test, NULL,
- "test mpi derived type management",
+ "test mpi derived type management",
PARATESTFILE);
AddTest("actualio", actual_io_mode_tests, NULL,
@@ -523,21 +523,21 @@ int main(int argc, char **argv)
PARATESTFILE);
AddTest("edpl", test_plist_ed, NULL,
- "encode/decode Property Lists", NULL);
+ "encode/decode Property Lists", NULL);
if((mpi_size < 2) && MAINPROCESS) {
- printf("File Image Ops daisy chain test needs at least 2 processes.\n");
- printf("File Image Ops daisy chain test will be skipped \n");
+ HDprintf("File Image Ops daisy chain test needs at least 2 processes.\n");
+ HDprintf("File Image Ops daisy chain test will be skipped \n");
}
AddTest((mpi_size < 2)? "-fiodc" : "fiodc", file_image_daisy_chain_test, NULL,
"file image ops daisy chain", NULL);
if((mpi_size < 2)&& MAINPROCESS ) {
- printf("Atomicity tests need at least 2 processes to participate\n");
- printf("8 is more recommended.. Atomicity tests will be skipped \n");
+ HDprintf("Atomicity tests need at least 2 processes to participate\n");
+ HDprintf("8 is more recommended.. Atomicity tests will be skipped \n");
}
else if (facc_type != FACC_MPIO && MAINPROCESS) {
- printf("Atomicity tests will not work with a non MPIO VFD\n");
+ HDprintf("Atomicity tests will not work with a non MPIO VFD\n");
}
else if(mpi_size >= 2 && facc_type == FACC_MPIO){
AddTest("atomicity", dataset_atomicity, NULL,
@@ -545,8 +545,14 @@ int main(int argc, char **argv)
}
AddTest("denseattr", test_dense_attr, NULL,
- "Store Dense Attributes", PARATESTFILE);
+ "Store Dense Attributes", PARATESTFILE);
+ AddTest("noselcollmdread", test_partial_no_selection_coll_md_read, NULL,
+ "Collective Metadata read with some ranks having no selection", PARATESTFILE);
+ AddTest("MC_coll_MD_read", test_multi_chunk_io_addrmap_issue, NULL,
+ "Collective MD read with multi chunk I/O (H5D__chunk_addrmap)", PARATESTFILE);
+ AddTest("LC_coll_MD_read", test_link_chunk_io_sort_chunk_issue, NULL,
+ "Collective MD read with link chunk I/O (H5D__sort_chunk)", PARATESTFILE);
/* Display testing information */
TestInfo(argv[0]);
@@ -559,9 +565,9 @@ int main(int argc, char **argv)
TestParseCmdLine(argc, argv);
if (dxfer_coll_type == DXFER_INDEPENDENT_IO && MAINPROCESS){
- printf("===================================\n"
- " Using Independent I/O with file set view to replace collective I/O \n"
- "===================================\n");
+ HDprintf("===================================\n"
+ " Using Independent I/O with file set view to replace collective I/O \n"
+ "===================================\n");
}
@@ -569,8 +575,8 @@ int main(int argc, char **argv)
PerformTests();
/* make sure all processes are finished before final report, cleanup
- * and exit.
- */
+ * and exit.
+ */
MPI_Barrier(MPI_COMM_WORLD);
/* Display test summary, if requested */
@@ -586,16 +592,16 @@ int main(int argc, char **argv)
{
int temp;
MPI_Allreduce(&nerrors, &temp, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
- nerrors=temp;
+ nerrors=temp;
}
- if (MAINPROCESS){ /* only process 0 reports */
- printf("===================================\n");
- if (nerrors)
- printf("***PHDF5 tests detected %d errors***\n", nerrors);
- else
- printf("PHDF5 tests finished with no errors\n");
- printf("===================================\n");
+ if (MAINPROCESS){ /* only process 0 reports */
+ HDprintf("===================================\n");
+ if (nerrors)
+ HDprintf("***PHDF5 tests detected %d errors***\n", nerrors);
+ else
+ HDprintf("PHDF5 tests finished with no errors\n");
+ HDprintf("===================================\n");
}
/* close HDF5 library */
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 322cb9b..cf611b7 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -19,7 +19,7 @@
#include "testpar.h"
enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
- API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE,
+ API_MULTI_HARD,API_LINK_TRUE,API_LINK_FALSE,
API_MULTI_COLL,API_MULTI_IND};
#ifndef FALSE
@@ -32,20 +32,20 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
/* Constants definitions */
-#define DIM0 600 /* Default dataset sizes. */
-#define DIM1 1200 /* Values are from a monitor pixel sizes */
-#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
-#define COL_FACTOR 16 /* Nominal column factor for dataset size */
-#define RANK 2
-#define DATASETNAME1 "Data1"
-#define DATASETNAME2 "Data2"
-#define DATASETNAME3 "Data3"
-#define DATASETNAME4 "Data4"
-#define DATASETNAME5 "Data5"
-#define DATASETNAME6 "Data6"
-#define DATASETNAME7 "Data7"
-#define DATASETNAME8 "Data8"
-#define DATASETNAME9 "Data9"
+#define DIM0 600 /* Default dataset sizes. */
+#define DIM1 1200 /* Values are from a monitor pixel sizes */
+#define ROW_FACTOR 8 /* Nominal row factor for dataset size */
+#define COL_FACTOR 16 /* Nominal column factor for dataset size */
+#define RANK 2
+#define DATASETNAME1 "Data1"
+#define DATASETNAME2 "Data2"
+#define DATASETNAME3 "Data3"
+#define DATASETNAME4 "Data4"
+#define DATASETNAME5 "Data5"
+#define DATASETNAME6 "Data6"
+#define DATASETNAME7 "Data7"
+#define DATASETNAME8 "Data8"
+#define DATASETNAME9 "Data9"
/* point selection order */
#define IN_ORDER 1
@@ -179,14 +179,14 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
/* Definitions of the selection mode for the no_collective_cause_tests function. */
#define TEST_COLLECTIVE 0x001
-#define TEST_SET_INDEPENDENT 0x002
+#define TEST_SET_INDEPENDENT 0x002
#define TEST_DATATYPE_CONVERSION 0x004
#define TEST_DATA_TRANSFORMS 0x008
#define TEST_NOT_SIMPLE_OR_SCALAR_DATASPACES 0x010
#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_COMPACT 0x020
#define TEST_NOT_CONTIGUOUS_OR_CHUNKED_DATASET_EXTERNAL 0x040
#define TEST_FILTERS 0x080
-/* TEST_FILTERS will take place of this after supporting mpio + filter for
+/* TEST_FILTERS will take place of this after supporting mpio + filter for
* H5Dcreate and H5Dwrite */
#define TEST_FILTERS_READ 0x100
@@ -209,8 +209,8 @@ enum H5TEST_COLL_CHUNK_API {API_NONE=0,API_LINK_HARD,
/* type definitions */
typedef struct H5Ptest_param_t /* holds extra test parameters */
{
- char *name;
- int count;
+ char *name;
+ int count;
} H5Ptest_param_t;
/* Dataset data type. Int's can be easily octo dumped. */
@@ -218,19 +218,19 @@ typedef int DATATYPE;
/* Shape Same Tests Definitions */
typedef enum {
- IND_CONTIG, /* Independent IO on contigous datasets */
- COL_CONTIG, /* Collective IO on contigous datasets */
- IND_CHUNKED, /* Independent IO on chunked datasets */
- COL_CHUNKED /* Collective IO on chunked datasets */
+ IND_CONTIG, /* Independent IO on contigous datasets */
+ COL_CONTIG, /* Collective IO on contigous datasets */
+ IND_CHUNKED, /* Independent IO on chunked datasets */
+ COL_CHUNKED /* Collective IO on chunked datasets */
} ShapeSameTestMethods;
/* Shared global variables */
-extern int dim0, dim1; /*Dataset dimensions */
-extern int chunkdim0, chunkdim1; /*Chunk dimensions */
-extern int nerrors; /*errors count */
-extern H5E_auto2_t old_func; /* previous error handler */
-extern void *old_client_data; /*previous error handler arg.*/
-extern int facc_type; /*Test file access type */
+extern int dim0, dim1; /*Dataset dimensions */
+extern int chunkdim0, chunkdim1; /*Chunk dimensions */
+extern int nerrors; /*errors count */
+extern H5E_auto2_t old_func; /* previous error handler */
+extern void *old_client_data; /*previous error handler arg.*/
+extern int facc_type; /*Test file access type */
extern int dxfer_coll_type;
/* Test program prototypes */
@@ -240,6 +240,7 @@ void test_file_properties(void);
void multiple_dset_write(void);
void multiple_group_write(void);
void multiple_group_read(void);
+void collective_group_write_independent_group_read(void);
void collective_group_write(void);
void independent_group_read(void);
void test_fapl_mpio_dup(void);
@@ -294,6 +295,9 @@ void file_image_daisy_chain_test(void);
void compress_readAll(void);
#endif /* H5_HAVE_FILTER_DEFLATE */
void test_dense_attr(void);
+void test_partial_no_selection_coll_md_read(void);
+void test_multi_chunk_io_addrmap_issue(void);
+void test_link_chunk_io_sort_chunk_issue(void);
/* commonly used prototypes */
hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);